hexsha stringlengths 40 40 | size int64 4 996k | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 996k | avg_line_length float64 1.33 58.2k | max_line_length int64 2 323k | alphanum_fraction float64 0 0.97 | content_no_comment stringlengths 0 946k | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f7f4613700e79755fd22a4351d1131f23a104c76 | 253 | py | Python | classwork/05_01_2021/plt.py | Katsute/Baruch-CIS-3120-Assignments | 2cb470a7e3b7bf2d49da520fdff079f832624c06 | [
"CC0-1.0"
] | null | null | null | classwork/05_01_2021/plt.py | Katsute/Baruch-CIS-3120-Assignments | 2cb470a7e3b7bf2d49da520fdff079f832624c06 | [
"CC0-1.0"
] | null | null | null | classwork/05_01_2021/plt.py | Katsute/Baruch-CIS-3120-Assignments | 2cb470a7e3b7bf2d49da520fdff079f832624c06 | [
"CC0-1.0"
] | 1 | 2022-01-12T18:19:11.000Z | 2022-01-12T18:19:11.000Z | import matplotlib.pyplot as plt
import pandas as pd
x = [5, 2, 9, 4, 7]
y = [10, 5, 8, 4, 2]
plt.plot(x, y)
plt.show()
plt.bar(x, y)
plt.show()
plt.hist(x)
plt.show()
df = pd.DataFrame({'x': x, 'y': y})
df.plot('x', 'y', kind="scatter")
plt.show()
| 13.315789 | 35 | 0.573123 | import matplotlib.pyplot as plt
import pandas as pd
x = [5, 2, 9, 4, 7]
y = [10, 5, 8, 4, 2]
plt.plot(x, y)
plt.show()
plt.bar(x, y)
plt.show()
plt.hist(x)
plt.show()
df = pd.DataFrame({'x': x, 'y': y})
df.plot('x', 'y', kind="scatter")
plt.show()
| true | true |
f7f462936a168049d71b80aef6e95a105a250120 | 2,066 | py | Python | fairscale/optim/grad_scaler.py | jessijzhao/fairscale | d6a8fc6dadc5d5ab4e3ee3f42f8cd570d70d30ec | [
"Apache-2.0",
"BSD-3-Clause"
] | 6 | 2020-11-09T11:24:26.000Z | 2021-01-15T13:35:06.000Z | fairscale/optim/grad_scaler.py | jessijzhao/fairscale | d6a8fc6dadc5d5ab4e3ee3f42f8cd570d70d30ec | [
"Apache-2.0",
"BSD-3-Clause"
] | 1 | 2021-02-16T13:42:50.000Z | 2021-02-16T13:42:50.000Z | fairscale/optim/grad_scaler.py | jessijzhao/fairscale | d6a8fc6dadc5d5ab4e3ee3f42f8cd570d70d30ec | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import Dict
import torch
from torch.cuda.amp import GradScaler as TorchGradScaler
import torch.distributed as dist
from torch.optim import Optimizer
from .oss import OSS
class GradScaler(TorchGradScaler):
def _unscale_grads_(
self, optimizer: Optimizer, inv_scale: torch.Tensor, found_inf: torch.Tensor, allow_fp16: bool
) -> Dict[torch.device, torch.Tensor]:
return super()._unscale_grads_(optimizer, inv_scale, found_inf, True)
class ShardedGradScaler(TorchGradScaler):
"""
A shard-aware :class:`GradScaler<torch.cuda.amp.GradScaler>`, to be used in conjunction with
:class:`OSS` and :class:`ShardedOptimizer`.
Interface and usecases are not changed, more explanations can be found in the corresponding pytorch
documentation https://pytorch.org/docs/stable/amp.html#torch.cuda.amp.GradScaler
"""
def __init__(self) -> None:
super().__init__()
self.display_warning = True
def unscale_(self, optimizer: Optimizer) -> None:
# Could be a mistake, this scaler is supposed to work with ZeroRedundancyOptimizer only
if self.display_warning and not isinstance(optimizer, OSS):
logging.warning(
"ShardedGradScaler is to be used in combination with a sharded optimizer, this could not be checked"
)
self.display_warning = False # Only warn once
# Call the upstream unscale_ method which will only act on this rank's gradients
super().unscale_(optimizer)
# Synchronize the detected inf across the ranks
optimizer_state = self._per_optimizer_states[id(optimizer)]
handles = [dist.all_reduce(v, async_op=True) for v in optimizer_state["found_inf_per_device"].values()]
# Make sure that the calls are done before moving out
_ = list(map(lambda x: x.wait(), handles))
| 37.563636 | 116 | 0.712488 |
import logging
from typing import Dict
import torch
from torch.cuda.amp import GradScaler as TorchGradScaler
import torch.distributed as dist
from torch.optim import Optimizer
from .oss import OSS
class GradScaler(TorchGradScaler):
def _unscale_grads_(
self, optimizer: Optimizer, inv_scale: torch.Tensor, found_inf: torch.Tensor, allow_fp16: bool
) -> Dict[torch.device, torch.Tensor]:
return super()._unscale_grads_(optimizer, inv_scale, found_inf, True)
class ShardedGradScaler(TorchGradScaler):
def __init__(self) -> None:
super().__init__()
self.display_warning = True
def unscale_(self, optimizer: Optimizer) -> None:
if self.display_warning and not isinstance(optimizer, OSS):
logging.warning(
"ShardedGradScaler is to be used in combination with a sharded optimizer, this could not be checked"
)
self.display_warning = False
super().unscale_(optimizer)
# Synchronize the detected inf across the ranks
optimizer_state = self._per_optimizer_states[id(optimizer)]
handles = [dist.all_reduce(v, async_op=True) for v in optimizer_state["found_inf_per_device"].values()]
# Make sure that the calls are done before moving out
_ = list(map(lambda x: x.wait(), handles))
| true | true |
f7f462e3193e9d12342db25d876628e4fc92b9ca | 4,282 | py | Python | backend/CodeSimilar/plot.py | Ridhii/SyncdSim | 4cd120e9f7d4db348d405db4608ef9c6f9499d01 | [
"BSD-3-Clause"
] | 50 | 2015-10-21T23:16:35.000Z | 2021-09-27T12:52:04.000Z | backend/CodeSimilar/plot.py | Ridhii/SyncdSim | 4cd120e9f7d4db348d405db4608ef9c6f9499d01 | [
"BSD-3-Clause"
] | 187 | 2015-01-08T22:24:54.000Z | 2020-04-17T17:23:50.000Z | backend/CodeSimilar/plot.py | Ridhii/SyncdSim | 4cd120e9f7d4db348d405db4608ef9c6f9499d01 | [
"BSD-3-Clause"
] | 25 | 2015-11-02T17:54:49.000Z | 2020-06-16T07:28:11.000Z | #!/usr/bin/env python
import os
import sys
sys.path.append(os.path.join(os.environ["CONTECH_HOME"], "scripts"))
import util
import subprocess
import shutil
import time
import datetime
import glob
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib as matplotlib
import json
import math
import csv
#This script parses the Harmony backend output to plot (thread count, exec count) tuples
def main(arg):
if (len(arg)) == 1:
print "Usage: {0} input\n".format(arg[0])
exit()
p = 1
fig = plt.figure(figsize=(7, 7))
for harmony_in in arg[1:]:
execTuple = []
threadTuple = []
zeroTuple = []
max_threads = 0
avg_val = 0
with open(harmony_in, "r") as csvfile:
#sorry to use blah, but I blanked on a name for this latest temp
blah = csv.reader(csvfile)
i = 0
for row in blah:
r = map(int, row)
if (len(r) == 2):
continue
sval = r[1] # includes 0:*
zval = r[2] # removes 0:*
execTuple.append(i)
val = zval
if (sval > zval):
zeroTuple.append(sval)
val = sval
else:
threadTuple.append(zval)
val = zval
if val > max_threads:
max_threads = val
avg_val += val
i += 1
if i == 0:
continue
avg_val /= i
if (p <= 28):
if (len(arg) == 2):
ax = fig.add_subplot(1,1, p)
else:
ax = fig.add_subplot(7,4, p)
#plt.xlim(0, 17)
#plt.plot(threadTuple, execTuple, 'k.', linestyle='None')
tHist = [0.0] * (max_threads + 1)
zHist = [0.0] * (max_threads + 1)
print len(tHist)
for t in threadTuple:
tHist[t] += 1.0
for z in zeroTuple:
zHist[z] += 1.0
nsum = sum(tHist) + sum(zHist)
tFin = []
for t in tHist:
tFin.append(int(100.0 * (t / nsum)))
zFin = []
for z in zHist:
zFin.append(int(100.0 * (z / nsum)))
xval = range(0, max_threads+1)
leftv = []
for x in xval:
leftv.append(x - 0.4)
plt.bar(leftv, tFin, width=0.85,color='b')
plt.bar(leftv, zFin, width=0.85,color='g', bottom=tFin)
#plt.hist([tFin,zFin], bins=max_threads, align='left',stacked=True)
plt.ylim(0,100)
plt.vlines(avg_val, 0, 100, colors='r')
if (len(arg) == 2):
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
else:
if (p % 4 == 1):
p = p
elif (p % 4 == 0):
ax.yaxis.tick_right()
ax.yaxis.set_ticks_position('both')
else:
ax.set_yticklabels([])
plt.xticks(fontsize=5)
plt.yticks(fontsize=5)
plt.tick_params(axis='y', which='minor', left='off', right='off')
harmony_l = harmony_in.split('/')
harmony_in = harmony_l[-1]
harmony_l = harmony_in.split('.')
harmony_in = harmony_l[-2]
t = plt.title(harmony_in, fontsize=5, verticalalignment='bottom')
(x,y) = t.get_position()
t.set_position((x, (y- 0.07)))
p += 1
plt.subplots_adjust(left=0.05, top = 0.98, bottom = 0.05, wspace = 0.1, hspace = 0.27)
fig.text(0.5, 0.02, 'Unique Contexts', ha='center', va='center', fontsize=7)
fig.text(0.01, 0.5, 'Fraction of Contexts Executing a Basic Block', ha='center', va='center', rotation='vertical', fontsize=7)
plt.savefig( "temp.png")
plt.savefig( "temp.pdf")
if __name__ == "__main__":
main(sys.argv)
| 31.955224 | 130 | 0.459832 |
import os
import sys
sys.path.append(os.path.join(os.environ["CONTECH_HOME"], "scripts"))
import util
import subprocess
import shutil
import time
import datetime
import glob
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib as matplotlib
import json
import math
import csv
def main(arg):
if (len(arg)) == 1:
print "Usage: {0} input\n".format(arg[0])
exit()
p = 1
fig = plt.figure(figsize=(7, 7))
for harmony_in in arg[1:]:
execTuple = []
threadTuple = []
zeroTuple = []
max_threads = 0
avg_val = 0
with open(harmony_in, "r") as csvfile:
blah = csv.reader(csvfile)
i = 0
for row in blah:
r = map(int, row)
if (len(r) == 2):
continue
sval = r[1]
zval = r[2]
execTuple.append(i)
val = zval
if (sval > zval):
zeroTuple.append(sval)
val = sval
else:
threadTuple.append(zval)
val = zval
if val > max_threads:
max_threads = val
avg_val += val
i += 1
if i == 0:
continue
avg_val /= i
if (p <= 28):
if (len(arg) == 2):
ax = fig.add_subplot(1,1, p)
else:
ax = fig.add_subplot(7,4, p)
tHist = [0.0] * (max_threads + 1)
zHist = [0.0] * (max_threads + 1)
print len(tHist)
for t in threadTuple:
tHist[t] += 1.0
for z in zeroTuple:
zHist[z] += 1.0
nsum = sum(tHist) + sum(zHist)
tFin = []
for t in tHist:
tFin.append(int(100.0 * (t / nsum)))
zFin = []
for z in zHist:
zFin.append(int(100.0 * (z / nsum)))
xval = range(0, max_threads+1)
leftv = []
for x in xval:
leftv.append(x - 0.4)
plt.bar(leftv, tFin, width=0.85,color='b')
plt.bar(leftv, zFin, width=0.85,color='g', bottom=tFin)
plt.ylim(0,100)
plt.vlines(avg_val, 0, 100, colors='r')
if (len(arg) == 2):
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
else:
if (p % 4 == 1):
p = p
elif (p % 4 == 0):
ax.yaxis.tick_right()
ax.yaxis.set_ticks_position('both')
else:
ax.set_yticklabels([])
plt.xticks(fontsize=5)
plt.yticks(fontsize=5)
plt.tick_params(axis='y', which='minor', left='off', right='off')
harmony_l = harmony_in.split('/')
harmony_in = harmony_l[-1]
harmony_l = harmony_in.split('.')
harmony_in = harmony_l[-2]
t = plt.title(harmony_in, fontsize=5, verticalalignment='bottom')
(x,y) = t.get_position()
t.set_position((x, (y- 0.07)))
p += 1
plt.subplots_adjust(left=0.05, top = 0.98, bottom = 0.05, wspace = 0.1, hspace = 0.27)
fig.text(0.5, 0.02, 'Unique Contexts', ha='center', va='center', fontsize=7)
fig.text(0.01, 0.5, 'Fraction of Contexts Executing a Basic Block', ha='center', va='center', rotation='vertical', fontsize=7)
plt.savefig( "temp.png")
plt.savefig( "temp.pdf")
if __name__ == "__main__":
main(sys.argv)
| false | true |
f7f4641d5cc2b222475caef2c4a28b050470889b | 6,535 | py | Python | src/sage/categories/cw_complexes.py | fredstro/sage | c936d2cda81ec7ec3552a3bdb29c994b40d1bb24 | [
"BSL-1.0"
] | 2 | 2018-06-30T01:37:35.000Z | 2018-06-30T01:37:39.000Z | src/sage/categories/cw_complexes.py | boothby/sage | 1b1e6f608d1ef8ee664bb19e991efbbc68cbd51f | [
"BSL-1.0"
] | null | null | null | src/sage/categories/cw_complexes.py | boothby/sage | 1b1e6f608d1ef8ee664bb19e991efbbc68cbd51f | [
"BSL-1.0"
] | null | null | null | r"""
CW Complexes
"""
#*****************************************************************************
# Copyright (C) 2015 Travis Scrimshaw <tscrim at ucdavis.edu>
#
# Distributed under the terms of the GNU General Public License (GPL)
# http://www.gnu.org/licenses/
#******************************************************************************
from sage.misc.abstract_method import abstract_method
from sage.misc.cachefunc import cached_method
from sage.categories.category_singleton import Category_singleton
from sage.categories.category_with_axiom import CategoryWithAxiom
from sage.categories.sets_cat import Sets
class CWComplexes(Category_singleton):
r"""
The category of CW complexes.
A CW complex is a Closure-finite cell complex in the Weak toplogy.
REFERENCES:
- :wikipedia:`CW_complex`
.. NOTE::
The notion of "finite" is that the number of cells is finite.
EXAMPLES::
sage: from sage.categories.cw_complexes import CWComplexes
sage: C = CWComplexes(); C
Category of CW complexes
TESTS::
sage: TestSuite(C).run()
"""
@cached_method
def super_categories(self):
"""
EXAMPLES::
sage: from sage.categories.cw_complexes import CWComplexes
sage: CWComplexes().super_categories()
[Category of topological spaces]
"""
return [Sets().Topological()]
def _repr_object_names(self):
"""
EXAMPLES::
sage: from sage.categories.cw_complexes import CWComplexes
sage: CWComplexes() # indirect doctest
Category of CW complexes
"""
return "CW complexes"
class SubcategoryMethods:
@cached_method
def Connected(self):
"""
Return the full subcategory of the connected objects of ``self``.
EXAMPLES::
sage: from sage.categories.cw_complexes import CWComplexes
sage: CWComplexes().Connected()
Category of connected CW complexes
TESTS::
sage: TestSuite(CWComplexes().Connected()).run()
sage: CWComplexes().Connected.__module__
'sage.categories.cw_complexes'
"""
return self._with_axiom('Connected')
@cached_method
def FiniteDimensional(self):
"""
Return the full subcategory of the finite dimensional
objects of ``self``.
EXAMPLES::
sage: from sage.categories.cw_complexes import CWComplexes
sage: C = CWComplexes().FiniteDimensional(); C
Category of finite dimensional CW complexes
TESTS::
sage: from sage.categories.cw_complexes import CWComplexes
sage: C = CWComplexes().FiniteDimensional()
sage: TestSuite(C).run()
sage: CWComplexes().Connected().FiniteDimensional.__module__
'sage.categories.cw_complexes'
"""
return self._with_axiom('FiniteDimensional')
class Connected(CategoryWithAxiom):
"""
The category of connected CW complexes.
"""
class FiniteDimensional(CategoryWithAxiom):
"""
Category of finite dimensional CW complexes.
"""
class Finite(CategoryWithAxiom):
"""
Category of finite CW complexes.
A finite CW complex is a CW complex with a finite number of cells.
"""
def extra_super_categories(self):
"""
Return the extra super categories of ``self``.
A finite CW complex is a compact finite-dimensional CW complex.
EXAMPLES::
sage: from sage.categories.cw_complexes import CWComplexes
sage: C = CWComplexes().Finite()
sage: C.extra_super_categories()
[Category of finite dimensional CW complexes,
Category of compact topological spaces]
"""
return [CWComplexes().FiniteDimensional(), Sets().Topological().Compact()]
class ParentMethods:
@cached_method
def dimension(self):
"""
Return the dimension of ``self``.
EXAMPLES::
sage: from sage.categories.cw_complexes import CWComplexes
sage: X = CWComplexes().example()
sage: X.dimension()
2
"""
C = self.cells()
return max(c.dimension() for d in C.keys() for c in C[d])
def Compact_extra_super_categories(self):
"""
Return extraneous super categories for ``CWComplexes().Compact()``.
A compact CW complex is finite, see Proposition A.1 in [Hat]_.
.. TODO::
Fix the name of finite CW complexes.
EXAMPLES::
sage: from sage.categories.cw_complexes import CWComplexes
sage: CWComplexes().Compact() # indirect doctest
Category of finite finite dimensional CW complexes
sage: CWComplexes().Compact() is CWComplexes().Finite()
True
"""
return (Sets().Finite(),)
class ElementMethods:
@abstract_method
def dimension(self):
"""
Return the dimension of ``self``.
EXAMPLES::
sage: from sage.categories.cw_complexes import CWComplexes
sage: X = CWComplexes().example()
sage: X.an_element().dimension()
2
"""
class ParentMethods:
@abstract_method
def dimension(self):
"""
Return the dimension of ``self``.
EXAMPLES::
sage: from sage.categories.cw_complexes import CWComplexes
sage: X = CWComplexes().example()
sage: X.dimension()
2
"""
@abstract_method(optional=True)
def cells(self):
"""
Return the cells of ``self``.
EXAMPLES::
sage: from sage.categories.cw_complexes import CWComplexes
sage: X = CWComplexes().example()
sage: C = X.cells()
sage: sorted((d, C[d]) for d in C.keys())
[(0, (0-cell v,)),
(1, (0-cell e1, 0-cell e2)),
(2, (2-cell f,))]
"""
| 30.25463 | 86 | 0.543688 |
from sage.misc.abstract_method import abstract_method
from sage.misc.cachefunc import cached_method
from sage.categories.category_singleton import Category_singleton
from sage.categories.category_with_axiom import CategoryWithAxiom
from sage.categories.sets_cat import Sets
class CWComplexes(Category_singleton):
@cached_method
def super_categories(self):
return [Sets().Topological()]
def _repr_object_names(self):
return "CW complexes"
class SubcategoryMethods:
@cached_method
def Connected(self):
return self._with_axiom('Connected')
@cached_method
def FiniteDimensional(self):
return self._with_axiom('FiniteDimensional')
class Connected(CategoryWithAxiom):
class FiniteDimensional(CategoryWithAxiom):
class Finite(CategoryWithAxiom):
def extra_super_categories(self):
return [CWComplexes().FiniteDimensional(), Sets().Topological().Compact()]
class ParentMethods:
@cached_method
def dimension(self):
C = self.cells()
return max(c.dimension() for d in C.keys() for c in C[d])
def Compact_extra_super_categories(self):
return (Sets().Finite(),)
class ElementMethods:
@abstract_method
def dimension(self):
class ParentMethods:
@abstract_method
def dimension(self):
@abstract_method(optional=True)
def cells(self):
| true | true |
f7f464b25b8b98e0e0a928116eefef17c121f027 | 12,734 | py | Python | ost/helpers/vector.py | AnglinaBhambra/OpenSarToolkit | b2d6562a77eea86b4c236cc14f81f73ff4e75c17 | [
"MIT"
] | null | null | null | ost/helpers/vector.py | AnglinaBhambra/OpenSarToolkit | b2d6562a77eea86b4c236cc14f81f73ff4e75c17 | [
"MIT"
] | null | null | null | ost/helpers/vector.py | AnglinaBhambra/OpenSarToolkit | b2d6562a77eea86b4c236cc14f81f73ff4e75c17 | [
"MIT"
] | null | null | null | import os
import sys
import json
from functools import partial
import osr
import ogr
import pyproj
import geopandas as gpd
from shapely.ops import transform
from shapely.wkt import loads
from shapely.geometry import Point, Polygon, mapping, shape
from fiona import collection
from fiona.crs import from_epsg
def get_epsg(prjfile):
'''Get the epsg code from a projection file of a shapefile
Args:
prjfile: a .prj file of a shapefile
Returns:
str: EPSG code
'''
prj_file = open(prjfile, 'r')
prj_txt = prj_file.read()
srs = osr.SpatialReference()
srs.ImportFromESRI([prj_txt])
srs.AutoIdentifyEPSG()
# return EPSG code
return srs.GetAuthorityCode(None)
def get_proj4(prjfile):
'''Get the proj4 string from a projection file of a shapefile
Args:
prjfile: a .prj file of a shapefile
Returns:
str: PROJ4 code
'''
prj_file = open(prjfile, 'r')
prj_string = prj_file.read()
# Lambert error
if '\"Lambert_Conformal_Conic\"' in prj_string:
print(' ERROR: It seems you used an ESRI generated shapefile'
' with Lambert Conformal Conic projection. ')
print(' This one is not compatible with Open Standard OGR/GDAL'
' tools used here. ')
print(' Reproject your shapefile to a standard Lat/Long projection'
' and try again')
exit(1)
srs = osr.SpatialReference()
srs.ImportFromESRI([prj_string])
return srs.ExportToProj4()
def epsg_to_wkt_projection(epsg_code):
spatial_ref = osr.SpatialReference()
spatial_ref.ImportFromEPSG(epsg_code)
return spatial_ref.ExpotToWkt()
def reproject_geometry(geom, inproj4, out_epsg):
'''Reproject a wkt geometry based on EPSG code
Args:
geom (ogr-geom): an ogr geom objecct
inproj4 (str): a proj4 string
out_epsg (str): the EPSG code to which the geometry should transformed
Returns
geom (ogr-geometry object): the transformed geometry
'''
geom = ogr.CreateGeometryFromWkt(geom)
# input SpatialReference
spatial_ref_in = osr.SpatialReference()
spatial_ref_in.ImportFromProj4(inproj4)
# output SpatialReference
spatial_ref_out = osr.SpatialReference()
spatial_ref_out.ImportFromEPSG(int(out_epsg))
# create the CoordinateTransformation
coord_transform = osr.CoordinateTransformation(spatial_ref_in,
spatial_ref_out)
try:
geom.Transform(coord_transform)
except:
print(' ERROR: Not able to transform the geometry')
sys.exit()
return geom
def geodesic_point_buffer(lat, lon, meters, envelope=False):
# get WGS 84 proj
proj_wgs84 = pyproj.Proj(init='epsg:4326')
# Azimuthal equidistant projection
aeqd_proj = '+proj=aeqd +lat_0={lat} +lon_0={lon} +x_0=0 +y_0=0'
project = partial(
pyproj.transform,
pyproj.Proj(aeqd_proj.format(lat=lat, lon=lon)),
proj_wgs84)
buf = Point(0, 0).buffer(meters) # distance in metres
if envelope is True:
geom = Polygon(transform(project, buf).exterior.coords[:]).envelope
else:
geom = Polygon(transform(project, buf).exterior.coords[:])
return geom.to_wkt()
def latlon_to_wkt(lat, lon, buffer_degree=None, buffer_meter=None, envelope=False):
'''A helper function to create a WKT representation of Lat/Lon pair
This function takes lat and lon vale and returns the WKT Point
representation by default.
A buffer can be set in metres, which returns a WKT POLYGON. If envelope
is set to True, the buffer will be squared by the extent buffer radius.
Args:
lat (str): Latitude (deg) of a point
lon (str): Longitude (deg) of a point
buffer (float): optional buffer around the point
envelope (bool): gives a square instead of a circular buffer
(only applies if bufferis set)
Returns:
wkt (str): WKT string
'''
if buffer_degree is None and buffer_meter is None:
aoi_wkt = 'POINT ({} {})'.format(lon, lat)
elif buffer_degree:
aoi_geom = loads('POINT ({} {})'.format(lon, lat)).buffer(buffer_degree)
if envelope:
aoi_geom = aoi_geom.envelope
aoi_wkt = aoi_geom.to_wkt()
elif buffer_meter:
aoi_wkt = geodesic_point_buffer(lat, lon, buffer_meter, envelope)
return aoi_wkt
def wkt_manipulations(wkt, buffer=None, convex=False, envelope=False):
geom = ogr.CreateGeometryFromWkt(wkt)
if buffer:
geom = geom.Buffer(buffer)
if convex:
geom = geom.ConvexHull()
if envelope:
geom = geom.GetEnvelope()
geom = ogr.CreateGeometryFromWkt(
'POLYGON (({} {}, {} {}, {} {}, {} {}, {} {}, {} {}))'.format(
geom[1], geom[3], geom[0], geom[3], geom[0], geom[2],
geom[1], geom[2], geom[1], geom[3], geom[1], geom[3]))
return geom.ExportToWkt()
def shp_to_wkt(shapefile, buffer=None, convex=False, envelope=False):
'''A helper function to translate a shapefile into WKT
'''
# get filepaths and proj4 string
shpfile = os.path.abspath(shapefile)
prjfile = shpfile[:-4] + '.prj'
proj4 = get_proj4(prjfile)
lyr_name = os.path.basename(shapefile)[:-4]
shp = ogr.Open(os.path.abspath(shapefile))
lyr = shp.GetLayerByName(lyr_name)
geom = ogr.Geometry(ogr.wkbGeometryCollection)
for feat in lyr:
geom.AddGeometry(feat.GetGeometryRef())
wkt = geom.ExportToWkt()
if proj4 != '+proj=longlat +datum=WGS84 +no_defs':
print(' INFO: Reprojecting AOI file to Lat/Long (WGS84)')
wkt = reproject_geometry(wkt, proj4, 4326).ExportToWkt()
# do manipulations if needed
wkt = wkt_manipulations(wkt, buffer=buffer, convex=convex,
envelope=envelope)
return wkt
def kml_to_wkt(kmlfile):
shp = ogr.Open(os.path.abspath(kmlfile))
lyr = shp.GetLayerByName()
for feat in lyr:
geom = feat.GetGeometryRef()
wkt = str(geom)
return wkt
def latlon_to_shp(lon, lat, shapefile):
shapefile = str(shapefile)
schema = {'geometry': 'Point',
'properties': {'id': 'str'}}
wkt = loads('POINT ({} {})'.format(lon, lat))
with collection(shapefile, "w",
crs=from_epsg(4326),
driver="ESRI Shapefile",
schema=schema) as output:
output.write({'geometry': mapping(wkt),
'properties': {'id': '1'}})
def shp_to_gdf(shapefile):
gdf = gpd.GeoDataFrame.from_file(shapefile)
prjfile = shapefile[:-4] + '.prj'
proj4 = get_proj4(prjfile)
if proj4 != '+proj=longlat +datum=WGS84 +no_defs':
print(' INFO: reprojecting AOI layer to WGS84.')
# reproject
gdf.crs = (proj4)
gdf = gdf.to_crs({'init': 'epsg:4326'})
return gdf
def wkt_to_gdf(wkt):
geometry = loads(wkt)
# point wkt
if geometry.geom_type == 'Point':
data = {'id': ['1'],
'geometry': loads(wkt).buffer(0.05).envelope}
gdf = gpd.GeoDataFrame(data)
# polygon wkt
elif geometry.geom_type == 'Polygon':
data = {'id': ['1'],
'geometry': loads(wkt)}
gdf = gpd.GeoDataFrame(data)
# geometry collection of single multiploygon
elif geometry.geom_type == 'GeometryCollection' and len(geometry) == 1 and 'MULTIPOLYGON' in str(geometry):
data = {'id': ['1'],
'geometry': geometry}
gdf = gpd.GeoDataFrame(data, crs = {'init': 'epsg:4326', 'no_defs': True})
ids, feats =[], []
for i, feat in enumerate(gdf.geometry.values[0]):
ids.append(i)
feats.append(feat)
gdf = gpd.GeoDataFrame({'id': ids,
'geometry': feats},
geometry='geometry',
crs = gdf.crs
)
# geometry collection of single polygon
elif geometry.geom_type == 'GeometryCollection' and len(geometry) == 1:
data = {'id': ['1'],
'geometry': geometry}
gdf = gpd.GeoDataFrame(data, crs = {'init': 'epsg:4326', 'no_defs': True})
# everything else (hopefully)
else:
i, ids, geoms = 1, [], []
for geom in geometry:
ids.append(i)
geoms.append(geom)
i += 1
gdf = gpd.GeoDataFrame({'id': ids,
'geometry': geoms},
crs = {'init': 'epsg:4326', 'no_defs': True}
)
return gdf
def wkt_to_shp(wkt, outfile):
gdf = wkt_to_gdf(wkt)
gdf.to_file(outfile)
def gdf_to_json_geometry(gdf):
"""Function to parse features from GeoDataFrame in such a manner
that rasterio wants them"""
#
# try:
# gdf.geometry.values[0].type
# features = [json.loads(gdf.to_json())['features'][0]['geometry']]
# except AttributeError:
# ids, feats =[], []
# for i, feat in enumerate(gdf.geometry.values[0]):
# ids.append(i)
# feats.append(feat)
#
# gdf = gpd.GeoDataFrame({'id': ids,
# 'geometry': feats},
# geometry='geometry',
# crs = gdf.crs
# )
geojson = json.loads(gdf.to_json())
return [feature['geometry'] for feature in geojson['features']
if feature['geometry']]
def inventory_to_shp(inventory_df, outfile):
# change datetime datatypes
inventory_df['acquisitiondate'] = inventory_df[
'acquisitiondate'].astype(str)
inventory_df['ingestiondate'] = inventory_df['ingestiondate'].astype(str)
inventory_df['beginposition'] = inventory_df['beginposition'].astype(str)
inventory_df['endposition'] = inventory_df['endposition'].astype(str)
# write to shapefile
inventory_df.to_file(outfile)
def exterior(infile, outfile, buffer=None):
gdf = gpd.read_file(infile, crs={'init': 'EPSG:4326'})
gdf.geometry = gdf.geometry.apply(lambda row: Polygon(row.exterior))
gdf_clean = gdf[gdf.geometry.area >= 1.0e-6]
gdf_clean.geometry = gdf_clean.geometry.buffer(-0.0018)
#if buffer:
# gdf.geometry = gdf.geometry.apply(
# lambda row: Polygon(row.buffer(-0.0018)))
gdf_clean.to_file(outfile)
def difference(infile1, infile2, outfile):
gdf1 = gpd.read_file(infile1)
gdf2 = gpd.read_file(infile2)
gdf3 = gpd.overlay(gdf1, gdf2, how='symmetric_difference')
gdf3.to_file(outfile)
def buffer_shape(infile, outfile, buffer=None):
with collection(infile, "r") as in_shape:
# schema = in_shape.schema.copy()
schema = {'geometry': 'Polygon', 'properties': {'id': 'int'}}
crs = in_shape.crs
with collection(
outfile, "w", "ESRI Shapefile", schema, crs=crs) as output:
for i, point in enumerate(in_shape):
output.write({
'properties': {
'id': i
},
'geometry': mapping(
shape(point['geometry']).buffer(buffer))
})
def plot_inventory(aoi, inventory_df, transparency=0.05, annotate = False):
import matplotlib.pyplot as plt
# load world borders for background
world = gpd.read_file(gpd.datasets.get_path('naturalearth_lowres'))
# import aoi as gdf
aoi_gdf = wkt_to_gdf(aoi)
# get bounds of AOI
bounds = inventory_df.geometry.bounds
# get world map as base
base = world.plot(color='lightgrey', edgecolor='white')
# plot aoi
aoi_gdf.plot(ax=base, color='None', edgecolor='black')
# plot footprints
inventory_df.plot(ax=base, alpha=transparency)
# set bounds
plt.xlim([bounds.minx.min()-2, bounds.maxx.max()+2])
plt.ylim([bounds.miny.min()-2, bounds.maxy.max()+2])
plt.grid(color='grey', linestyle='-', linewidth=0.2)
if annotate:
import math
for idx, row in inventory_df.iterrows():
# print([row['geometry'].bounds[0],row['geometry'].bounds[3]])
coord = [row['geometry'].centroid.x, row['geometry'].centroid.y]
x1, y2, x2, y1 = row['geometry'].bounds
angle = math.degrees(math.atan2((y2 - y1), (x2 - x1)))
# rint(angle)
plt.annotate(s=row['bid'], xy=coord, rotation=angle + 5, size=10, color='red', horizontalalignment='center')
| 28.940909 | 120 | 0.598084 | import os
import sys
import json
from functools import partial
import osr
import ogr
import pyproj
import geopandas as gpd
from shapely.ops import transform
from shapely.wkt import loads
from shapely.geometry import Point, Polygon, mapping, shape
from fiona import collection
from fiona.crs import from_epsg
def get_epsg(prjfile):
prj_file = open(prjfile, 'r')
prj_txt = prj_file.read()
srs = osr.SpatialReference()
srs.ImportFromESRI([prj_txt])
srs.AutoIdentifyEPSG()
return srs.GetAuthorityCode(None)
def get_proj4(prjfile):
prj_file = open(prjfile, 'r')
prj_string = prj_file.read()
if '\"Lambert_Conformal_Conic\"' in prj_string:
print(' ERROR: It seems you used an ESRI generated shapefile'
' with Lambert Conformal Conic projection. ')
print(' This one is not compatible with Open Standard OGR/GDAL'
' tools used here. ')
print(' Reproject your shapefile to a standard Lat/Long projection'
' and try again')
exit(1)
srs = osr.SpatialReference()
srs.ImportFromESRI([prj_string])
return srs.ExportToProj4()
def epsg_to_wkt_projection(epsg_code):
spatial_ref = osr.SpatialReference()
spatial_ref.ImportFromEPSG(epsg_code)
return spatial_ref.ExpotToWkt()
def reproject_geometry(geom, inproj4, out_epsg):
geom = ogr.CreateGeometryFromWkt(geom)
spatial_ref_in = osr.SpatialReference()
spatial_ref_in.ImportFromProj4(inproj4)
spatial_ref_out = osr.SpatialReference()
spatial_ref_out.ImportFromEPSG(int(out_epsg))
coord_transform = osr.CoordinateTransformation(spatial_ref_in,
spatial_ref_out)
try:
geom.Transform(coord_transform)
except:
print(' ERROR: Not able to transform the geometry')
sys.exit()
return geom
def geodesic_point_buffer(lat, lon, meters, envelope=False):
proj_wgs84 = pyproj.Proj(init='epsg:4326')
aeqd_proj = '+proj=aeqd +lat_0={lat} +lon_0={lon} +x_0=0 +y_0=0'
project = partial(
pyproj.transform,
pyproj.Proj(aeqd_proj.format(lat=lat, lon=lon)),
proj_wgs84)
buf = Point(0, 0).buffer(meters)
if envelope is True:
geom = Polygon(transform(project, buf).exterior.coords[:]).envelope
else:
geom = Polygon(transform(project, buf).exterior.coords[:])
return geom.to_wkt()
def latlon_to_wkt(lat, lon, buffer_degree=None, buffer_meter=None, envelope=False):
if buffer_degree is None and buffer_meter is None:
aoi_wkt = 'POINT ({} {})'.format(lon, lat)
elif buffer_degree:
aoi_geom = loads('POINT ({} {})'.format(lon, lat)).buffer(buffer_degree)
if envelope:
aoi_geom = aoi_geom.envelope
aoi_wkt = aoi_geom.to_wkt()
elif buffer_meter:
aoi_wkt = geodesic_point_buffer(lat, lon, buffer_meter, envelope)
return aoi_wkt
def wkt_manipulations(wkt, buffer=None, convex=False, envelope=False):
geom = ogr.CreateGeometryFromWkt(wkt)
if buffer:
geom = geom.Buffer(buffer)
if convex:
geom = geom.ConvexHull()
if envelope:
geom = geom.GetEnvelope()
geom = ogr.CreateGeometryFromWkt(
'POLYGON (({} {}, {} {}, {} {}, {} {}, {} {}, {} {}))'.format(
geom[1], geom[3], geom[0], geom[3], geom[0], geom[2],
geom[1], geom[2], geom[1], geom[3], geom[1], geom[3]))
return geom.ExportToWkt()
def shp_to_wkt(shapefile, buffer=None, convex=False, envelope=False):
shpfile = os.path.abspath(shapefile)
prjfile = shpfile[:-4] + '.prj'
proj4 = get_proj4(prjfile)
lyr_name = os.path.basename(shapefile)[:-4]
shp = ogr.Open(os.path.abspath(shapefile))
lyr = shp.GetLayerByName(lyr_name)
geom = ogr.Geometry(ogr.wkbGeometryCollection)
for feat in lyr:
geom.AddGeometry(feat.GetGeometryRef())
wkt = geom.ExportToWkt()
if proj4 != '+proj=longlat +datum=WGS84 +no_defs':
print(' INFO: Reprojecting AOI file to Lat/Long (WGS84)')
wkt = reproject_geometry(wkt, proj4, 4326).ExportToWkt()
wkt = wkt_manipulations(wkt, buffer=buffer, convex=convex,
envelope=envelope)
return wkt
def kml_to_wkt(kmlfile):
shp = ogr.Open(os.path.abspath(kmlfile))
lyr = shp.GetLayerByName()
for feat in lyr:
geom = feat.GetGeometryRef()
wkt = str(geom)
return wkt
def latlon_to_shp(lon, lat, shapefile):
shapefile = str(shapefile)
schema = {'geometry': 'Point',
'properties': {'id': 'str'}}
wkt = loads('POINT ({} {})'.format(lon, lat))
with collection(shapefile, "w",
crs=from_epsg(4326),
driver="ESRI Shapefile",
schema=schema) as output:
output.write({'geometry': mapping(wkt),
'properties': {'id': '1'}})
def shp_to_gdf(shapefile):
gdf = gpd.GeoDataFrame.from_file(shapefile)
prjfile = shapefile[:-4] + '.prj'
proj4 = get_proj4(prjfile)
if proj4 != '+proj=longlat +datum=WGS84 +no_defs':
print(' INFO: reprojecting AOI layer to WGS84.')
gdf.crs = (proj4)
gdf = gdf.to_crs({'init': 'epsg:4326'})
return gdf
def wkt_to_gdf(wkt):
geometry = loads(wkt)
if geometry.geom_type == 'Point':
data = {'id': ['1'],
'geometry': loads(wkt).buffer(0.05).envelope}
gdf = gpd.GeoDataFrame(data)
elif geometry.geom_type == 'Polygon':
data = {'id': ['1'],
'geometry': loads(wkt)}
gdf = gpd.GeoDataFrame(data)
elif geometry.geom_type == 'GeometryCollection' and len(geometry) == 1 and 'MULTIPOLYGON' in str(geometry):
data = {'id': ['1'],
'geometry': geometry}
gdf = gpd.GeoDataFrame(data, crs = {'init': 'epsg:4326', 'no_defs': True})
ids, feats =[], []
for i, feat in enumerate(gdf.geometry.values[0]):
ids.append(i)
feats.append(feat)
gdf = gpd.GeoDataFrame({'id': ids,
'geometry': feats},
geometry='geometry',
crs = gdf.crs
)
elif geometry.geom_type == 'GeometryCollection' and len(geometry) == 1:
data = {'id': ['1'],
'geometry': geometry}
gdf = gpd.GeoDataFrame(data, crs = {'init': 'epsg:4326', 'no_defs': True})
else:
i, ids, geoms = 1, [], []
for geom in geometry:
ids.append(i)
geoms.append(geom)
i += 1
gdf = gpd.GeoDataFrame({'id': ids,
'geometry': geoms},
crs = {'init': 'epsg:4326', 'no_defs': True}
)
return gdf
def wkt_to_shp(wkt, outfile):
gdf = wkt_to_gdf(wkt)
gdf.to_file(outfile)
def gdf_to_json_geometry(gdf):
geojson = json.loads(gdf.to_json())
return [feature['geometry'] for feature in geojson['features']
if feature['geometry']]
def inventory_to_shp(inventory_df, outfile):
inventory_df['acquisitiondate'] = inventory_df[
'acquisitiondate'].astype(str)
inventory_df['ingestiondate'] = inventory_df['ingestiondate'].astype(str)
inventory_df['beginposition'] = inventory_df['beginposition'].astype(str)
inventory_df['endposition'] = inventory_df['endposition'].astype(str)
inventory_df.to_file(outfile)
def exterior(infile, outfile, buffer=None):
gdf = gpd.read_file(infile, crs={'init': 'EPSG:4326'})
gdf.geometry = gdf.geometry.apply(lambda row: Polygon(row.exterior))
gdf_clean = gdf[gdf.geometry.area >= 1.0e-6]
gdf_clean.geometry = gdf_clean.geometry.buffer(-0.0018)
gdf_clean.to_file(outfile)
def difference(infile1, infile2, outfile):
gdf1 = gpd.read_file(infile1)
gdf2 = gpd.read_file(infile2)
gdf3 = gpd.overlay(gdf1, gdf2, how='symmetric_difference')
gdf3.to_file(outfile)
def buffer_shape(infile, outfile, buffer=None):
with collection(infile, "r") as in_shape:
schema = {'geometry': 'Polygon', 'properties': {'id': 'int'}}
crs = in_shape.crs
with collection(
outfile, "w", "ESRI Shapefile", schema, crs=crs) as output:
for i, point in enumerate(in_shape):
output.write({
'properties': {
'id': i
},
'geometry': mapping(
shape(point['geometry']).buffer(buffer))
})
def plot_inventory(aoi, inventory_df, transparency=0.05, annotate = False):
import matplotlib.pyplot as plt
world = gpd.read_file(gpd.datasets.get_path('naturalearth_lowres'))
aoi_gdf = wkt_to_gdf(aoi)
bounds = inventory_df.geometry.bounds
base = world.plot(color='lightgrey', edgecolor='white')
aoi_gdf.plot(ax=base, color='None', edgecolor='black')
inventory_df.plot(ax=base, alpha=transparency)
plt.xlim([bounds.minx.min()-2, bounds.maxx.max()+2])
plt.ylim([bounds.miny.min()-2, bounds.maxy.max()+2])
plt.grid(color='grey', linestyle='-', linewidth=0.2)
if annotate:
import math
for idx, row in inventory_df.iterrows():
coord = [row['geometry'].centroid.x, row['geometry'].centroid.y]
x1, y2, x2, y1 = row['geometry'].bounds
angle = math.degrees(math.atan2((y2 - y1), (x2 - x1)))
plt.annotate(s=row['bid'], xy=coord, rotation=angle + 5, size=10, color='red', horizontalalignment='center')
| true | true |
f7f4653ef1fedbd383fbf309cdb3cb4c7e7ab8d6 | 8,208 | py | Python | ceilometer/tests/alarm/test_notifier.py | rackerlabs/instrumented-ceilometer | 6ac5215ac0476120d9c99adcabc9cad0d32963da | [
"Apache-2.0"
] | 3 | 2021-04-18T00:37:48.000Z | 2021-07-21T10:20:11.000Z | ceilometer/tests/alarm/test_notifier.py | lexxito/monitoring | bec8dfb8d3610331c7ae5ec543e0b8da0948c164 | [
"Apache-2.0"
] | null | null | null | ceilometer/tests/alarm/test_notifier.py | lexxito/monitoring | bec8dfb8d3610331c7ae5ec543e0b8da0948c164 | [
"Apache-2.0"
] | null | null | null | # -*- encoding: utf-8 -*-
#
# Copyright © 2013 eNovance
#
# Author: Julien Danjou <julien@danjou.info>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import urlparse
import mock
import requests
from ceilometer.alarm import service
from ceilometer.openstack.common import context
from ceilometer.openstack.common.fixture import config
from ceilometer.openstack.common import test
DATA_JSON = ('{"current": "ALARM", "alarm_id": "foobar",'
' "reason": "what ?", "previous": "OK"}')
NOTIFICATION = dict(alarm_id='foobar',
condition=dict(threshold=42),
reason='what ?',
previous='OK',
current='ALARM')
class TestAlarmNotifier(test.BaseTestCase):
def setUp(self):
super(TestAlarmNotifier, self).setUp()
self.CONF = self.useFixture(config.Config()).conf
self.service = service.AlarmNotifierService('somehost', 'sometopic')
@mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock())
def test_init_host(self):
# If we try to create a real RPC connection, init_host() never
# returns. Mock it out so we can establish the service
# configuration.
with mock.patch('ceilometer.openstack.common.rpc.create_connection'):
self.service.start()
def test_notify_alarm(self):
data = {
'actions': ['test://'],
'alarm_id': 'foobar',
'previous': 'OK',
'current': 'ALARM',
'reason': 'Everything is on fire',
}
self.service.notify_alarm(context.get_admin_context(), data)
notifications = self.service.notifiers['test'].obj.notifications
self.assertEqual(len(notifications), 1)
self.assertEqual(notifications[0], (
urlparse.urlsplit(data['actions'][0]),
data['alarm_id'],
data['previous'],
data['current'],
data['reason']))
def test_notify_alarm_no_action(self):
self.service.notify_alarm(context.get_admin_context(), {})
def test_notify_alarm_log_action(self):
self.service.notify_alarm(context.get_admin_context(),
{
'actions': ['log://'],
'alarm_id': 'foobar',
'condition': {'threshold': 42},
})
@staticmethod
def _fake_spawn_n(func, *args, **kwargs):
func(*args, **kwargs)
@staticmethod
def _notification(action):
notification = {}
notification.update(NOTIFICATION)
notification['actions'] = [action]
return notification
def test_notify_alarm_rest_action_ok(self):
action = 'http://host/action'
with mock.patch('eventlet.spawn_n', self._fake_spawn_n):
with mock.patch.object(requests, 'post') as poster:
self.service.notify_alarm(context.get_admin_context(),
self._notification(action))
poster.assert_called_with(action, data=DATA_JSON)
def test_notify_alarm_rest_action_with_ssl_client_cert(self):
action = 'https://host/action'
certificate = "/etc/ssl/cert/whatever.pem"
self.CONF.set_override("rest_notifier_certificate_file", certificate,
group='alarm')
with mock.patch('eventlet.spawn_n', self._fake_spawn_n):
with mock.patch.object(requests, 'post') as poster:
self.service.notify_alarm(context.get_admin_context(),
self._notification(action))
poster.assert_called_with(action, data=DATA_JSON,
cert=certificate, verify=True)
def test_notify_alarm_rest_action_with_ssl_client_cert_and_key(self):
action = 'https://host/action'
certificate = "/etc/ssl/cert/whatever.pem"
key = "/etc/ssl/cert/whatever.key"
self.CONF.set_override("rest_notifier_certificate_file", certificate,
group='alarm')
self.CONF.set_override("rest_notifier_certificate_key", key,
group='alarm')
with mock.patch('eventlet.spawn_n', self._fake_spawn_n):
with mock.patch.object(requests, 'post') as poster:
self.service.notify_alarm(context.get_admin_context(),
self._notification(action))
poster.assert_called_with(action, data=DATA_JSON,
cert=(certificate, key), verify=True)
def test_notify_alarm_rest_action_with_ssl_verify_disable_by_cfg(self):
action = 'https://host/action'
self.CONF.set_override("rest_notifier_ssl_verify", False,
group='alarm')
with mock.patch('eventlet.spawn_n', self._fake_spawn_n):
with mock.patch.object(requests, 'post') as poster:
self.service.notify_alarm(context.get_admin_context(),
self._notification(action))
poster.assert_called_with(action, data=DATA_JSON,
verify=False)
def test_notify_alarm_rest_action_with_ssl_verify_disable(self):
action = 'https://host/action?ceilometer-alarm-ssl-verify=0'
with mock.patch('eventlet.spawn_n', self._fake_spawn_n):
with mock.patch.object(requests, 'post') as poster:
self.service.notify_alarm(context.get_admin_context(),
self._notification(action))
poster.assert_called_with(action, data=DATA_JSON,
verify=False)
def test_notify_alarm_rest_action_with_ssl_verify_enable_by_user(self):
action = 'https://host/action?ceilometer-alarm-ssl-verify=1'
self.CONF.set_override("rest_notifier_ssl_verify", False,
group='alarm')
with mock.patch('eventlet.spawn_n', self._fake_spawn_n):
with mock.patch.object(requests, 'post') as poster:
self.service.notify_alarm(context.get_admin_context(),
self._notification(action))
poster.assert_called_with(action, data=DATA_JSON,
verify=True)
@staticmethod
def _fake_urlsplit(*args, **kwargs):
raise Exception("Evil urlsplit!")
def test_notify_alarm_invalid_url(self):
with mock.patch('ceilometer.openstack.common.network_utils.urlsplit',
self._fake_urlsplit):
LOG = mock.MagicMock()
with mock.patch('ceilometer.alarm.service.LOG', LOG):
self.service.notify_alarm(
context.get_admin_context(),
{
'actions': ['no-such-action-i-am-sure'],
'alarm_id': 'foobar',
'condition': {'threshold': 42},
})
self.assertTrue(LOG.error.called)
def test_notify_alarm_invalid_action(self):
LOG = mock.MagicMock()
with mock.patch('ceilometer.alarm.service.LOG', LOG):
self.service.notify_alarm(
context.get_admin_context(),
{
'actions': ['no-such-action-i-am-sure://'],
'alarm_id': 'foobar',
'condition': {'threshold': 42},
})
self.assertTrue(LOG.error.called)
| 41.454545 | 79 | 0.58175 |
import urlparse
import mock
import requests
from ceilometer.alarm import service
from ceilometer.openstack.common import context
from ceilometer.openstack.common.fixture import config
from ceilometer.openstack.common import test
DATA_JSON = ('{"current": "ALARM", "alarm_id": "foobar",'
' "reason": "what ?", "previous": "OK"}')
NOTIFICATION = dict(alarm_id='foobar',
condition=dict(threshold=42),
reason='what ?',
previous='OK',
current='ALARM')
class TestAlarmNotifier(test.BaseTestCase):
def setUp(self):
super(TestAlarmNotifier, self).setUp()
self.CONF = self.useFixture(config.Config()).conf
self.service = service.AlarmNotifierService('somehost', 'sometopic')
@mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock())
def test_init_host(self):
with mock.patch('ceilometer.openstack.common.rpc.create_connection'):
self.service.start()
def test_notify_alarm(self):
data = {
'actions': ['test://'],
'alarm_id': 'foobar',
'previous': 'OK',
'current': 'ALARM',
'reason': 'Everything is on fire',
}
self.service.notify_alarm(context.get_admin_context(), data)
notifications = self.service.notifiers['test'].obj.notifications
self.assertEqual(len(notifications), 1)
self.assertEqual(notifications[0], (
urlparse.urlsplit(data['actions'][0]),
data['alarm_id'],
data['previous'],
data['current'],
data['reason']))
def test_notify_alarm_no_action(self):
self.service.notify_alarm(context.get_admin_context(), {})
def test_notify_alarm_log_action(self):
self.service.notify_alarm(context.get_admin_context(),
{
'actions': ['log://'],
'alarm_id': 'foobar',
'condition': {'threshold': 42},
})
@staticmethod
def _fake_spawn_n(func, *args, **kwargs):
func(*args, **kwargs)
@staticmethod
def _notification(action):
notification = {}
notification.update(NOTIFICATION)
notification['actions'] = [action]
return notification
def test_notify_alarm_rest_action_ok(self):
action = 'http://host/action'
with mock.patch('eventlet.spawn_n', self._fake_spawn_n):
with mock.patch.object(requests, 'post') as poster:
self.service.notify_alarm(context.get_admin_context(),
self._notification(action))
poster.assert_called_with(action, data=DATA_JSON)
def test_notify_alarm_rest_action_with_ssl_client_cert(self):
action = 'https://host/action'
certificate = "/etc/ssl/cert/whatever.pem"
self.CONF.set_override("rest_notifier_certificate_file", certificate,
group='alarm')
with mock.patch('eventlet.spawn_n', self._fake_spawn_n):
with mock.patch.object(requests, 'post') as poster:
self.service.notify_alarm(context.get_admin_context(),
self._notification(action))
poster.assert_called_with(action, data=DATA_JSON,
cert=certificate, verify=True)
def test_notify_alarm_rest_action_with_ssl_client_cert_and_key(self):
action = 'https://host/action'
certificate = "/etc/ssl/cert/whatever.pem"
key = "/etc/ssl/cert/whatever.key"
self.CONF.set_override("rest_notifier_certificate_file", certificate,
group='alarm')
self.CONF.set_override("rest_notifier_certificate_key", key,
group='alarm')
with mock.patch('eventlet.spawn_n', self._fake_spawn_n):
with mock.patch.object(requests, 'post') as poster:
self.service.notify_alarm(context.get_admin_context(),
self._notification(action))
poster.assert_called_with(action, data=DATA_JSON,
cert=(certificate, key), verify=True)
def test_notify_alarm_rest_action_with_ssl_verify_disable_by_cfg(self):
action = 'https://host/action'
self.CONF.set_override("rest_notifier_ssl_verify", False,
group='alarm')
with mock.patch('eventlet.spawn_n', self._fake_spawn_n):
with mock.patch.object(requests, 'post') as poster:
self.service.notify_alarm(context.get_admin_context(),
self._notification(action))
poster.assert_called_with(action, data=DATA_JSON,
verify=False)
def test_notify_alarm_rest_action_with_ssl_verify_disable(self):
action = 'https://host/action?ceilometer-alarm-ssl-verify=0'
with mock.patch('eventlet.spawn_n', self._fake_spawn_n):
with mock.patch.object(requests, 'post') as poster:
self.service.notify_alarm(context.get_admin_context(),
self._notification(action))
poster.assert_called_with(action, data=DATA_JSON,
verify=False)
def test_notify_alarm_rest_action_with_ssl_verify_enable_by_user(self):
action = 'https://host/action?ceilometer-alarm-ssl-verify=1'
self.CONF.set_override("rest_notifier_ssl_verify", False,
group='alarm')
with mock.patch('eventlet.spawn_n', self._fake_spawn_n):
with mock.patch.object(requests, 'post') as poster:
self.service.notify_alarm(context.get_admin_context(),
self._notification(action))
poster.assert_called_with(action, data=DATA_JSON,
verify=True)
@staticmethod
def _fake_urlsplit(*args, **kwargs):
raise Exception("Evil urlsplit!")
def test_notify_alarm_invalid_url(self):
with mock.patch('ceilometer.openstack.common.network_utils.urlsplit',
self._fake_urlsplit):
LOG = mock.MagicMock()
with mock.patch('ceilometer.alarm.service.LOG', LOG):
self.service.notify_alarm(
context.get_admin_context(),
{
'actions': ['no-such-action-i-am-sure'],
'alarm_id': 'foobar',
'condition': {'threshold': 42},
})
self.assertTrue(LOG.error.called)
def test_notify_alarm_invalid_action(self):
LOG = mock.MagicMock()
with mock.patch('ceilometer.alarm.service.LOG', LOG):
self.service.notify_alarm(
context.get_admin_context(),
{
'actions': ['no-such-action-i-am-sure://'],
'alarm_id': 'foobar',
'condition': {'threshold': 42},
})
self.assertTrue(LOG.error.called)
| true | true |
f7f465ffcebce3e7751317b4e3674b66de601daa | 1,313 | py | Python | nipype/interfaces/freesurfer/tests/test_auto_WatershedSkullStrip.py | HussainAther/nipype | 7e33d086fd5cea6ef6de99ee3e35929c1d5730d4 | [
"Apache-2.0"
] | null | null | null | nipype/interfaces/freesurfer/tests/test_auto_WatershedSkullStrip.py | HussainAther/nipype | 7e33d086fd5cea6ef6de99ee3e35929c1d5730d4 | [
"Apache-2.0"
] | null | null | null | nipype/interfaces/freesurfer/tests/test_auto_WatershedSkullStrip.py | HussainAther/nipype | 7e33d086fd5cea6ef6de99ee3e35929c1d5730d4 | [
"Apache-2.0"
] | null | null | null | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from __future__ import unicode_literals
from ..preprocess import WatershedSkullStrip
def test_WatershedSkullStrip_inputs():
input_map = dict(args=dict(argstr='%s',
),
brain_atlas=dict(argstr='-brain_atlas %s',
position=-4,
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
in_file=dict(argstr='%s',
mandatory=True,
position=-2,
),
out_file=dict(argstr='%s',
mandatory=True,
position=-1,
usedefault=True,
),
subjects_dir=dict(),
t1=dict(argstr='-T1',
),
terminal_output=dict(deprecated='1.0.0',
nohash=True,
),
transform=dict(argstr='%s',
position=-3,
),
)
inputs = WatershedSkullStrip.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_WatershedSkullStrip_outputs():
output_map = dict(out_file=dict(),
)
outputs = WatershedSkullStrip.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
| 25.25 | 67 | 0.644326 |
from __future__ import unicode_literals
from ..preprocess import WatershedSkullStrip
def test_WatershedSkullStrip_inputs():
input_map = dict(args=dict(argstr='%s',
),
brain_atlas=dict(argstr='-brain_atlas %s',
position=-4,
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
in_file=dict(argstr='%s',
mandatory=True,
position=-2,
),
out_file=dict(argstr='%s',
mandatory=True,
position=-1,
usedefault=True,
),
subjects_dir=dict(),
t1=dict(argstr='-T1',
),
terminal_output=dict(deprecated='1.0.0',
nohash=True,
),
transform=dict(argstr='%s',
position=-3,
),
)
inputs = WatershedSkullStrip.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_WatershedSkullStrip_outputs():
output_map = dict(out_file=dict(),
)
outputs = WatershedSkullStrip.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
| true | true |
f7f466bd72a0eaa0a511aa3d25a842958c9c502b | 94,414 | py | Python | selfdrive/car/toyota/values.py | birdman6450/openpilot | 2429f86bed65cca163122ac9f7a0c1d53597d463 | [
"MIT"
] | 1 | 2021-01-12T01:38:25.000Z | 2021-01-12T01:38:25.000Z | selfdrive/car/toyota/values.py | birdman6450/openpilot | 2429f86bed65cca163122ac9f7a0c1d53597d463 | [
"MIT"
] | null | null | null | selfdrive/car/toyota/values.py | birdman6450/openpilot | 2429f86bed65cca163122ac9f7a0c1d53597d463 | [
"MIT"
] | null | null | null | # flake8: noqa
from cereal import car
from selfdrive.car import dbc_dict
from selfdrive.config import Conversions as CV
Ecu = car.CarParams.Ecu
MIN_ACC_SPEED = 19. * CV.MPH_TO_MS
PEDAL_HYST_GAP = 3. * CV.MPH_TO_MS
class CarControllerParams:
ACCEL_HYST_GAP = 0.02 # don't change accel command for small oscilalitons within this value
ACCEL_MAX = 2.0 # m/s2
ACCEL_MIN = -3.5 # m/s2
ACCEL_SCALE = max(ACCEL_MAX, -ACCEL_MIN)
STEER_MAX = 1500
STEER_DELTA_UP = 10 # 1.5s time to peak torque
STEER_DELTA_DOWN = 25 # always lower than 45 otherwise the Rav4 faults (Prius seems ok with 50)
STEER_ERROR_MAX = 350 # max delta between torque cmd and torque motor
class CAR:
PRIUS = "TOYOTA PRIUS 2017"
PRIUS_TSS2 = "TOYOTA PRIUS TSS2 2021"
RAV4H = "TOYOTA RAV4 HYBRID 2017"
RAV4 = "TOYOTA RAV4 2017"
COROLLA = "TOYOTA COROLLA 2017"
LEXUS_RX = "LEXUS RX 2016"
LEXUS_RXH = "LEXUS RX HYBRID 2017"
LEXUS_RX_TSS2 = "LEXUS RX 2020"
LEXUS_RXH_TSS2 = "LEXUS RX HYBRID 2020"
CHR = "TOYOTA C-HR 2018"
CHRH = "TOYOTA C-HR HYBRID 2018"
CAMRY = "TOYOTA CAMRY 2018"
CAMRYH = "TOYOTA CAMRY HYBRID 2018"
CAMRY_TSS2 = "TOYOTA CAMRY 2021" # TSS 2.5
CAMRYH_TSS2 = "TOYOTA CAMRY HYBRID 2021"
HIGHLANDER = "TOYOTA HIGHLANDER 2017"
HIGHLANDER_TSS2 = "TOYOTA HIGHLANDER 2020"
HIGHLANDERH = "TOYOTA HIGHLANDER HYBRID 2018"
HIGHLANDERH_TSS2 = "TOYOTA HIGHLANDER HYBRID 2020"
AVALON = "TOYOTA AVALON 2016"
AVALON_2019 = "TOYOTA AVALON 2019"
AVALONH_2019 = "TOYOTA AVALON HYBRID 2019"
RAV4_TSS2 = "TOYOTA RAV4 2019"
COROLLA_TSS2 = "TOYOTA COROLLA TSS2 2019"
# LSS2 Lexus UX Hybrid is same as a TSS2 Corolla Hybrid
COROLLAH_TSS2 = "TOYOTA COROLLA HYBRID TSS2 2019"
LEXUS_ES_TSS2 = "LEXUS ES 2019"
LEXUS_ESH_TSS2 = "LEXUS ES HYBRID 2019"
LEXUS_ESH = "LEXUS ES HYBRID 2018"
SIENNA = "TOYOTA SIENNA 2018"
LEXUS_IS = "LEXUS IS 2018"
LEXUS_CTH = "LEXUS CT HYBRID 2018"
RAV4H_TSS2 = "TOYOTA RAV4 HYBRID 2019"
LEXUS_NXH = "LEXUS NX HYBRID 2018"
LEXUS_NX = "LEXUS NX 2018"
LEXUS_NX_TSS2 = "LEXUS NX 2020"
MIRAI = "TOYOTA MIRAI 2021" # TSS 2.5
ALPHARD_TSS2 = "TOYOTA ALPHARD 2020"
LEXUS_ISH = "LEXUS ISH 2017"
# (addr, cars, bus, 1/freq*100, vl)
STATIC_DSU_MSGS = [
(0x128, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.LEXUS_NXH, CAR.LEXUS_NX, CAR.RAV4, CAR.COROLLA, CAR.AVALON), 1, 3, b'\xf4\x01\x90\x83\x00\x37'),
(0x128, (CAR.HIGHLANDER, CAR.HIGHLANDERH, CAR.SIENNA, CAR.LEXUS_CTH, CAR.LEXUS_ESH), 1, 3, b'\x03\x00\x20\x00\x00\x52'),
(0x141, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.LEXUS_NXH, CAR.LEXUS_NX, CAR.RAV4, CAR.COROLLA, CAR.HIGHLANDER, CAR.HIGHLANDERH, CAR.AVALON, CAR.SIENNA, CAR.LEXUS_CTH, CAR.LEXUS_ESH, CAR.LEXUS_RX), 1, 2, b'\x00\x00\x00\x46'),
(0x160, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.LEXUS_NXH, CAR.LEXUS_NX, CAR.RAV4, CAR.COROLLA, CAR.HIGHLANDER, CAR.HIGHLANDERH, CAR.AVALON, CAR.SIENNA, CAR.LEXUS_CTH, CAR.LEXUS_ESH, CAR.LEXUS_RX), 1, 7, b'\x00\x00\x08\x12\x01\x31\x9c\x51'),
(0x161, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.LEXUS_NXH, CAR.LEXUS_NX, CAR.RAV4, CAR.COROLLA, CAR.AVALON, CAR.LEXUS_RX), 1, 7, b'\x00\x1e\x00\x00\x00\x80\x07'),
(0X161, (CAR.HIGHLANDERH, CAR.HIGHLANDER, CAR.SIENNA, CAR.LEXUS_CTH, CAR.LEXUS_ESH), 1, 7, b'\x00\x1e\x00\xd4\x00\x00\x5b'),
(0x283, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.LEXUS_NXH, CAR.LEXUS_NX, CAR.RAV4, CAR.COROLLA, CAR.HIGHLANDER, CAR.HIGHLANDERH, CAR.AVALON, CAR.SIENNA, CAR.LEXUS_CTH, CAR.LEXUS_ESH, CAR.LEXUS_RX), 0, 3, b'\x00\x00\x00\x00\x00\x00\x8c'),
(0x2E6, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH), 0, 3, b'\xff\xf8\x00\x08\x7f\xe0\x00\x4e'),
(0x2E7, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH), 0, 3, b'\xa8\x9c\x31\x9c\x00\x00\x00\x02'),
(0x33E, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH), 0, 20, b'\x0f\xff\x26\x40\x00\x1f\x00'),
(0x344, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.LEXUS_NXH, CAR.LEXUS_NX, CAR.RAV4, CAR.COROLLA, CAR.HIGHLANDER, CAR.HIGHLANDERH, CAR.AVALON, CAR.SIENNA, CAR.LEXUS_CTH, CAR.LEXUS_ESH, CAR.LEXUS_RX), 0, 5, b'\x00\x00\x01\x00\x00\x00\x00\x50'),
(0x365, (CAR.PRIUS, CAR.LEXUS_RXH, CAR.LEXUS_NXH, CAR.LEXUS_NX, CAR.HIGHLANDERH), 0, 20, b'\x00\x00\x00\x80\x03\x00\x08'),
(0x365, (CAR.RAV4, CAR.RAV4H, CAR.COROLLA, CAR.HIGHLANDER, CAR.AVALON, CAR.SIENNA, CAR.LEXUS_CTH, CAR.LEXUS_ESH, CAR.LEXUS_RX), 0, 20, b'\x00\x00\x00\x80\xfc\x00\x08'),
(0x366, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.LEXUS_NXH, CAR.LEXUS_NX, CAR.HIGHLANDERH), 0, 20, b'\x00\x00\x4d\x82\x40\x02\x00'),
(0x366, (CAR.RAV4, CAR.COROLLA, CAR.HIGHLANDER, CAR.AVALON, CAR.SIENNA, CAR.LEXUS_CTH, CAR.LEXUS_ESH, CAR.LEXUS_RX), 0, 20, b'\x00\x72\x07\xff\x09\xfe\x00'),
(0x470, (CAR.PRIUS, CAR.LEXUS_RXH), 1, 100, b'\x00\x00\x02\x7a'),
(0x470, (CAR.HIGHLANDER, CAR.HIGHLANDERH, CAR.RAV4H, CAR.SIENNA, CAR.LEXUS_CTH, CAR.LEXUS_ESH), 1, 100, b'\x00\x00\x01\x79'),
(0x4CB, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.LEXUS_NXH, CAR.LEXUS_NX, CAR.RAV4, CAR.COROLLA, CAR.HIGHLANDERH, CAR.HIGHLANDER, CAR.AVALON, CAR.SIENNA, CAR.LEXUS_CTH, CAR.LEXUS_ESH, CAR.LEXUS_RX), 0, 100, b'\x0c\x00\x00\x00\x00\x00\x00\x00'),
]
FINGERPRINTS = {
CAR.RAV4: [{
36: 8, 37: 8, 170: 8, 180: 8, 186: 4, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 512: 6, 513: 6, 547: 8, 548: 8, 552: 4, 562: 4, 608: 8, 610: 5, 643: 7, 705: 8, 725: 2, 740: 5, 767: 4, 800: 8, 835: 8, 836: 8, 849: 4, 869: 7, 870: 7, 871: 2, 896: 8, 897: 8, 900: 6, 902: 6, 905: 8, 911: 8, 916: 3, 918: 7, 921: 8, 933: 8, 944: 8, 945: 8, 951: 8, 955: 4, 956: 8, 979: 2, 998: 5, 999: 7, 1000: 8, 1001: 8, 1005: 2, 1008: 2, 1014: 8, 1017: 8, 1041: 8, 1042: 8, 1043: 8, 1044: 8, 1056: 8, 1059: 1, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1176: 8, 1177: 8, 1178: 8, 1179: 8, 1180: 8, 1181: 8, 1190: 8, 1191: 8, 1192: 8, 1196: 8, 1227: 8, 1228: 8, 1235: 8, 1237: 8, 1263: 8, 1264: 8, 1279: 8, 1408: 8, 1409: 8, 1410: 8, 1552: 8, 1553: 8, 1554: 8, 1555: 8, 1556: 8, 1557: 8, 1561: 8, 1562: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1584: 8, 1589: 8, 1592: 8, 1593: 8, 1595: 8, 1596: 8, 1597: 8, 1600: 8, 1656: 8, 1664: 8, 1728: 8, 1745: 8, 1779: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8, 2015: 8, 2024: 8
}],
CAR.RAV4H: [{
36: 8, 37: 8, 170: 8, 180: 8, 186: 4, 296: 8, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 547: 8, 548: 8, 550: 8, 552: 4, 560: 7, 562: 4, 581: 5, 608: 8, 610: 5, 643: 7, 705: 8, 713: 8, 725: 2, 740: 5, 767: 4, 800: 8, 835: 8, 836: 8, 849: 4, 869: 7, 870: 7, 871: 2, 896: 8, 897: 8, 900: 6, 902: 6, 905: 8, 911: 8, 916: 3, 918: 7, 921: 8, 933: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 3, 955: 8, 956: 8, 979: 2, 998: 5, 999: 7, 1000: 8, 1001: 8, 1005: 2, 1008: 2, 1014: 8, 1017: 8, 1041: 8, 1042: 8, 1043: 8, 1044: 8, 1056: 8, 1059: 1, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1176: 8, 1177: 8, 1178: 8, 1179: 8, 1180: 8, 1181: 8, 1184: 8, 1185: 8, 1186: 8, 1190: 8, 1191: 8, 1192: 8, 1196: 8, 1197: 8, 1198: 8, 1199: 8, 1212: 8, 1227: 8, 1228: 8, 1232: 8, 1235: 8, 1237: 8, 1263: 8, 1264: 8, 1279: 8, 1408: 8, 1409: 8, 1410: 8, 1552: 8, 1553: 8, 1554: 8, 1555: 8, 1556: 8, 1557: 8, 1561: 8, 1562: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1584: 8, 1589: 8, 1592: 8, 1593: 8, 1595: 8, 1596: 8, 1597: 8, 1600: 8, 1656: 8, 1664: 8, 1728: 8, 1745: 8, 1779: 8, 1872: 8, 1880: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8
},
# Chinese RAV4
{
36: 8, 37: 8, 170: 8, 180: 8, 186: 4, 355: 5, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 512: 6, 513: 6, 547: 8, 548: 8, 552: 4, 562: 4, 608: 8, 610: 5, 643: 7, 705: 8, 725: 2, 740: 5, 742: 8, 743: 8, 767: 4, 800: 8, 830: 7, 835: 8, 836: 8, 849: 4, 869: 7, 870: 7, 871: 2, 896: 8, 897: 8, 900: 6, 902: 6, 905: 8, 911: 8, 916: 3, 921: 8, 922: 8, 933: 8, 944: 8, 945: 8, 951: 8, 955: 8, 956: 8, 979: 2, 998: 5, 999: 7, 1000: 8, 1001: 8, 1008: 2, 1017: 8, 1041: 8, 1042: 8, 1043: 8, 1044: 8, 1056: 8, 1059: 1, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1176: 8, 1177: 8, 1178: 8, 1179: 8, 1180: 8, 1181: 8, 1190: 8, 1191: 8, 1192: 8, 1196: 8, 1207: 8, 1227: 8, 1235: 8, 1263: 8, 1279: 8, 1552: 8, 1553: 8, 1554: 8, 1555: 8, 1556: 8, 1557: 8, 1561: 8, 1562: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1584: 8, 1589: 8, 1592: 8, 1593: 8, 1595: 8, 1596: 8, 1597: 8, 1600: 8, 1664: 8, 1728: 8, 1745: 8, 1779: 8
}],
CAR.PRIUS: [{
35: 8, 36: 8, 37: 8, 166: 8, 170: 8, 180: 8, 295: 8, 296: 8, 426: 6, 452: 8, 466: 8, 467: 8, 512: 6, 513: 6, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 8, 614: 8, 643: 7, 658: 8, 713: 8, 740: 5, 742: 8, 743: 8, 767: 4, 800: 8, 810: 2, 814: 8, 824: 2, 829: 2, 830: 7, 835: 8, 836: 8, 845: 5, 863: 8, 869: 7, 870: 7, 871: 2, 898: 8, 900: 6, 902: 6, 905: 8, 913: 8, 918: 8, 921: 8, 933: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 8, 955: 8, 956: 8, 971: 7, 974: 8, 975: 5, 993: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1005: 2, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1071: 8, 1076: 8, 1077: 8, 1082: 8, 1083: 8, 1084: 8, 1085: 8, 1086: 8, 1114: 8, 1132: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1175: 8, 1227: 8, 1228: 8, 1235: 8, 1237: 8, 1264: 8, 1279: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1595: 8, 1777: 8, 1779: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8
},
#2019 LE
{
35: 8, 36: 8, 37: 8, 166: 8, 170: 8, 180: 8, 295: 8, 296: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 8, 614: 8, 643: 7, 658: 8, 713: 8, 740: 5, 742: 8, 743: 8, 767: 4, 800: 8, 810: 2, 814: 8, 829: 2, 830: 7, 835: 8, 836: 8, 863: 8, 865: 8, 869: 7, 870: 7, 871: 2, 896: 8, 898: 8, 900: 6, 902: 6, 905: 8, 918: 8, 921: 8, 933: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 8, 955: 8, 956: 8, 971: 7, 975: 5, 993: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1071: 8, 1076: 8, 1077: 8, 1082: 8, 1083: 8, 1084: 8, 1085: 8, 1086: 8, 1114: 8, 1132: 8, 1161: 8, 1162: 8, 1163: 8, 1175: 8, 1227: 8, 1228: 8, 1235: 8, 1237: 8, 1279: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1592: 8, 1595: 8, 1777: 8, 1779: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8
},
# 2020 Prius Prime LE
{
35: 8, 36: 8, 37: 8, 166: 8, 170: 8, 180: 8, 295: 8, 296: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 8, 643: 7, 713: 8, 740: 5, 742: 8, 743: 8, 764: 8, 767: 4, 800: 8, 810: 2, 824: 8, 829: 2, 830: 7, 835: 8, 836: 8, 863: 8, 865: 8, 869: 7, 870: 7, 871: 2, 896: 8, 898: 8, 900: 6, 902: 6, 905: 8, 913: 8, 921: 8, 933: 8, 934: 8, 935: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 8, 955: 8, 956: 8, 971: 7, 974: 8, 975: 5, 993: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1071: 8, 1076: 8, 1077: 8, 1084: 8, 1085: 8, 1086: 8, 1114: 8, 1132: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1227: 8, 1235: 8, 1279: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1592: 8, 1595: 8, 1649: 8, 1777: 8, 1779: 8, 1786: 8, 1787: 8, 1788: 8, 1789: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8
},
#2020 Prius Prime Limited
{
35: 8, 36: 8, 37: 8, 166: 8, 170: 8, 180: 8, 295: 8, 296: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 8, 614: 8, 643: 7, 658: 8, 713: 8, 740: 5, 742: 8, 743: 8, 767: 4, 800: 8, 810: 2, 814: 8, 824: 2, 829: 2, 830: 7, 835: 8, 836: 8, 863: 8, 865: 8, 869: 7, 870: 7, 871: 2, 896: 8, 898: 8, 900: 6, 902: 6, 905: 8, 913: 8, 918: 8, 921: 8, 933: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 8, 955: 8, 956: 8, 971: 7, 974: 8, 975: 5, 993: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1071: 8, 1076: 8, 1077: 8, 1082: 8, 1083: 8, 1084: 8, 1085: 8, 1086: 8, 1114: 8, 1132: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1175: 8, 1227: 8, 1228: 8, 1235: 8, 1237: 8, 1279: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1592: 8, 1595: 8, 1649: 8, 1777: 8, 1779: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8, 2015: 8, 2024: 8, 2026: 8, 2027: 8, 2029: 8, 2030: 8, 2031: 8
},
#2020 Central Europe Prime
{
35: 8, 36: 8, 37: 8, 166: 8, 170: 8, 180: 8, 295: 8, 296: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 8, 643: 7, 713: 8, 740: 5, 742: 8, 743: 8, 764: 8, 767: 4, 800: 8, 810: 2, 818: 8, 824: 8, 829: 2, 830: 7, 835: 8, 836: 8, 845: 5, 863: 8, 865: 8, 869: 7, 870: 7, 871: 2, 889: 8, 896: 8, 898: 8, 900: 6, 902: 6, 905: 8, 913: 8, 921: 8, 933: 8, 934: 8, 935: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 8, 955: 8, 956: 8, 971: 8, 974: 8, 975: 5, 993: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1056: 8, 1057: 8, 1059: 1, 1071: 8, 1076: 8, 1077: 8, 1084: 8, 1085: 8, 1086: 8, 1114: 8, 1132: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1227: 8, 1235: 8, 1237: 8, 1264: 8, 1279: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1592: 8, 1595: 8, 1777: 8, 1779: 8, 1786: 8, 1787: 8, 1788: 8, 1789: 8
},
#2017 German Prius
{
35: 8, 36: 8, 37: 8, 166: 8, 170: 8, 180: 8, 295: 8, 296: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 8, 614: 8, 643: 7, 658: 8, 713: 8, 740: 5, 742: 8, 743: 8, 767: 4, 800: 8, 810: 2, 814: 8, 829: 2, 830: 7, 835: 8, 836: 8, 845: 5, 863: 8, 869: 7, 870: 7, 871: 2, 898: 8, 900: 6, 902: 6, 905: 8, 913: 8, 918: 8, 921: 8, 933: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 8, 955: 8, 956: 8, 971: 7, 975: 5, 993: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1005: 2, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1071: 8, 1077: 8, 1082: 8, 1083: 8, 1084: 8, 1085: 8, 1086: 8, 1114: 8, 1132: 8, 1161: 8, 1162: 8, 1163: 8, 1175: 8, 1227: 8, 1228: 8, 1235: 8, 1237: 8, 1264: 8, 1279: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1595: 8, 1777: 8, 1779: 8, 1792: 8, 1767: 4, 1863: 8, 1904: 8, 1912: 8, 1984: 8, 1988: 8, 1990: 8, 1992: 8, 1996: 8, 1998: 8, 2002: 8, 2010: 8, 2015: 8, 2016: 8, 2018: 8, 2024: 8, 2026: 8, 2030: 8
}],
CAR.PRIUS_TSS2: [{
36: 8, 37: 8, 166: 8, 170: 8, 180: 8, 295: 8, 296: 8, 401: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 8, 614: 8, 643: 7, 658: 8, 713: 8, 740: 5, 742: 8, 743: 8, 764: 8, 765: 8, 800: 8, 810: 2, 814: 8, 824: 8, 829: 2, 830: 7, 835: 8, 836: 8, 863: 8, 865: 8, 869: 7, 870: 7, 871: 2, 877: 8, 881: 8, 882: 8, 896: 8, 898: 8, 900: 6, 902: 6, 905: 8, 918: 8, 921: 8, 933: 8, 934: 8, 935: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 8, 955: 8, 956: 8, 971: 7, 975: 5, 993: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1071: 8, 1076: 8, 1077: 8, 1082: 8, 1083: 8, 1084: 8, 1085: 8, 1086: 8, 1114: 8, 1132: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1172: 8, 1175: 8, 1228: 8, 1235: 8, 1237: 8, 1279: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1592: 8, 1593: 8, 1595: 8, 1649: 8, 1653: 8, 1654: 8, 1655: 8, 1775: 8, 1777: 8, 1779: 8, 1786: 8, 1787: 8, 1788: 8, 1789: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8
}],
#Corolla w/ added Pedal Support (512L and 513L)
CAR.COROLLA: [{
36: 8, 37: 8, 170: 8, 180: 8, 186: 4, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 512: 6, 513: 6, 547: 8, 548: 8, 552: 4, 608: 8, 610: 5, 643: 7, 705: 8, 740: 5, 767: 4, 800: 8, 835: 8, 836: 8, 849: 4, 869: 7, 870: 7, 871: 2, 896: 8, 897: 8, 900: 6, 902: 6, 905: 8, 911: 8, 916: 2, 921: 8, 933: 8, 944: 8, 945: 8, 951: 8, 955: 4, 956: 8, 979: 2, 992: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1017: 8, 1041: 8, 1042: 8, 1043: 8, 1044: 8, 1056: 8, 1059: 1, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1196: 8, 1227: 8, 1235: 8, 1279: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1561: 8, 1562: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1584: 8, 1589: 8, 1592: 8, 1596: 8, 1597: 8, 1600: 8, 1664: 8, 1728: 8, 1779: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8, 2016: 8, 2017: 8, 2018: 8, 2019: 8, 2020: 8, 2021: 8, 2022: 8, 2023: 8, 2024: 8
}],
CAR.CAMRY: [
#XLE and LE
{
36: 8, 37: 8, 119: 6, 170: 8, 180: 8, 186: 4, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 544: 4, 550: 8, 552: 4, 562: 6, 608: 8, 610: 8, 643: 7, 658: 8, 705: 8, 728: 8, 740: 5, 761: 8, 764: 8, 767: 4, 800: 8, 810: 2, 812: 8, 814: 8, 818: 8, 822: 8, 824: 8, 830: 7, 835: 8, 836: 8, 869: 7, 870: 7, 871: 2, 888: 8, 889: 8, 891: 8, 898: 8, 900: 6, 902: 6, 905: 8, 918: 8, 921: 8, 933: 8, 934: 8, 935: 8, 944: 8, 945: 8, 951: 8, 955: 8, 956: 8, 976: 1, 983: 8, 984: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1011: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1059: 1, 1076: 8, 1077: 8, 1082: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1228: 8, 1235: 8, 1237: 8, 1263: 8, 1264: 8, 1279: 8, 1412: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1595: 8, 1745: 8, 1779: 8, 1786: 8, 1787: 8, 1788: 8, 1789: 8, 1808: 8, 1816: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8
},
#XSE and SE
# TODO: get proper fingerprint in stock mode
{
36: 8, 37: 8, 114: 5, 119: 6, 120: 4, 170: 8, 180: 8, 186: 4, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 544: 4, 550: 8, 552: 4, 562: 6, 608: 8, 610: 8, 643: 7, 658: 8, 705: 8, 728: 8, 740: 5, 761: 8, 764: 8, 767: 4, 800: 8, 810: 2, 812: 8, 814: 8, 818: 8, 822: 8, 824: 8, 830: 7, 835: 8, 836: 8, 869: 7, 870: 7, 888: 8, 889: 8, 891: 8, 898: 8, 900: 6, 902: 6, 905: 8, 918: 8, 921: 8, 933: 8, 934: 8, 935: 8, 944: 8, 945: 8, 951: 8, 955: 8, 956: 8, 976: 1, 983: 8, 984: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1011: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1056: 8, 1059: 1, 1076: 8, 1077: 8, 1082: 8, 1114: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1228: 8, 1237: 8, 1263: 8, 1264: 8, 1279: 8, 1412: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1595: 8, 1745: 8, 1779: 8, 1786: 8, 1787: 8, 1788: 8, 1789: 8, 1808: 8, 1816: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8
},
{
# 2019 XSE
36: 8, 37: 8, 170: 8, 180: 8, 186: 4, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 544: 4, 550: 8, 552: 4, 562: 6, 608: 8, 610: 8, 643: 7, 658: 8, 705: 8, 728: 8, 740: 5, 761: 8, 764: 8, 767: 4, 800: 8, 810: 2, 812: 8, 814: 8, 818: 8, 822: 8, 824: 8, 830: 7, 835: 8, 836: 8, 865: 8, 869: 7, 870: 7, 871: 2, 888: 8, 889: 8, 891: 8, 896: 8, 898: 8, 900: 6, 902: 6, 905: 8, 918: 8, 921: 8, 933: 8, 934: 8, 935: 8, 942: 8, 944: 8, 945: 8, 951: 8, 955: 8, 956: 8, 976: 1, 983: 8, 984: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1011: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1059: 1, 1076: 8, 1077: 8, 1082: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1228: 8, 1235: 8, 1237: 8, 1263: 8, 1264: 8, 1279: 8, 1412: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1592: 8, 1594: 8, 1595: 8, 1649: 8, 1745: 8, 1779: 8, 1786: 8, 1787: 8, 1788: 8, 1789: 8, 1792: 8, 1767: 4, 1808: 8, 1816: 8, 1872: 8, 1880: 8, 1904: 8, 1912: 8, 1937: 8, 1945: 8, 1953: 8, 1961: 8, 1968: 8, 1976: 8, 1990: 8, 1998: 8, 2015: 8, 2016: 8, 2024: 8
}],
CAR.CAMRYH: [
#SE, LE and LE with Blindspot Monitor
{
36: 8, 37: 8, 166: 8, 170: 8, 180: 8, 295: 8, 296: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 8, 643: 7, 713: 8, 728: 8, 740: 5, 761: 8, 764: 8, 767: 4, 800: 8, 810: 2, 812: 8, 818: 8, 824: 8, 829: 2, 830: 7, 835: 8, 836: 8, 865: 8, 869: 7, 870: 7, 871: 2, 889: 8, 896: 8, 898: 8, 900: 6, 902: 6, 905: 8, 921: 8, 933: 8, 934: 8, 935: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 8, 955: 8, 956: 8, 971: 7, 975: 5, 983: 8, 984: 8, 993: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1011: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1071: 8, 1076: 8, 1077: 8, 1084: 8, 1085: 8, 1086: 8, 1114: 8, 1132: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1235: 8, 1237: 8, 1264: 8, 1279: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1592: 8, 1594: 8, 1595: 8, 1649: 8, 1745: 8, 1779: 8, 1786: 8, 1787: 8, 1788: 8, 1789: 8, 1808: 8, 1810: 8, 1816: 8, 1818: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8
},
#SL
{
36: 8, 37: 8, 166: 8, 170: 8, 180: 8, 295: 8, 296: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 8, 643: 7, 713: 8, 728: 8, 740: 5, 761: 8, 764: 8, 767: 4, 800: 8, 810: 2, 812: 8, 818: 8, 824: 8, 829: 2, 830: 7, 835: 8, 836: 8, 869: 7, 870: 7, 871: 2, 888: 8, 889: 8, 898: 8, 900: 6, 902: 6, 905: 8, 913: 8, 918: 8, 921: 8, 933: 8, 934: 8, 935: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 8, 955: 8, 956: 8, 971: 7, 975: 5, 993: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1071: 8, 1076: 8, 1077: 8, 1084: 8, 1085: 8, 1086: 8, 1114: 8, 1132: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1228: 8, 1235: 8, 1237: 8, 1264: 8, 1279: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1595: 8, 1745: 8, 1779: 8, 1786: 8, 1787: 8, 1788: 8, 1789: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8
},
#XLE
{
36: 8, 37: 8, 166: 8, 170: 8, 180: 8, 295: 8, 296: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 8, 643: 7, 658: 8, 713: 8, 728: 8, 740: 5, 761: 8, 764: 8, 767: 4, 800: 8, 810: 2, 812: 8, 814: 8, 818: 8, 824: 8, 829: 2, 830: 7, 835: 8, 836: 8, 869: 7, 870: 7, 871: 2, 888: 8, 889: 8, 898: 8, 900: 6, 902: 6, 905: 8, 918: 8, 921: 8, 933: 8, 934: 8, 935: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 8, 955: 8, 956: 8, 971: 7, 975: 5, 983: 8, 984: 8, 993: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1011: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1071: 8, 1076: 8, 1077: 8, 1082: 8, 1084: 8, 1085: 8, 1086: 8, 1114: 8, 1132: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1228: 8, 1235: 8, 1237: 8, 1264: 8, 1279: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1595: 8, 1745: 8, 1779: 8, 1786: 8, 1787: 8, 1788: 8, 1789: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8
},
# 2018 Chinese Camry Hybrid
{
36: 8, 37: 8, 166: 8, 170: 8, 180: 8, 295: 8, 296: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 8, 643: 7, 713: 8, 728: 8, 740: 5, 761: 8, 764: 8, 800: 8, 810: 2, 812: 8, 818: 8, 824: 8, 829: 2, 830: 7, 835: 8, 836: 8, 869: 7, 870: 7, 871: 2, 888: 8, 889: 8, 898: 8, 900: 6, 902: 6, 905: 8, 913: 8, 918: 8, 921: 8, 933: 8, 934: 8, 935: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 8, 955: 8, 956: 8, 971: 7, 975: 5, 993: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1071: 8, 1076: 8, 1077: 8, 1084: 8, 1085: 8, 1086: 8, 1112: 8, 1114: 8, 1132: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1235: 8, 1264: 8, 1279: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1595: 8, 1745: 8, 1779: 8, 1786: 8, 1787: 8, 1788: 8, 1789: 8
}],
CAR.HIGHLANDER: [{
36: 8, 37: 8, 114: 5, 119: 6, 120: 4, 170: 8, 180: 8, 186: 4, 238: 4, 355: 5, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 544: 4, 545: 5, 550: 8, 552: 4, 608: 8, 610: 5, 643: 7, 705: 8, 725: 2, 740: 5, 767: 4, 800: 8, 835: 8, 836: 8, 849: 4, 869: 7, 870: 7, 871: 2, 896: 8, 900: 6, 902: 6, 905: 8, 911: 8, 916: 3, 921: 8, 922: 8, 933: 8, 944: 8, 945: 8, 951: 8, 955: 8, 956: 8, 979: 2, 998: 5, 999: 7, 1000: 8, 1001: 8, 1008: 2, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1043: 8, 1044: 8, 1056: 8, 1059: 1, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1176: 8, 1177: 8, 1178: 8, 1179: 8, 1180: 8, 1181: 8, 1182: 8, 1183: 8, 1189: 8, 1190: 8, 1191: 8, 1192: 8, 1196: 8, 1197: 8, 1198: 8, 1199: 8, 1206: 8, 1207: 8, 1212: 8, 1227: 8, 1235: 8, 1237: 8, 1279: 8, 1408: 8, 1409: 8, 1410: 8, 1552: 8, 1553: 8, 1554: 8, 1556: 8, 1557: 8, 1561: 8, 1562: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1584: 8, 1589: 8, 1592: 8, 1593: 8, 1595: 8, 1599: 8, 1656: 8, 1666: 8, 1667: 8, 1728: 8, 1745: 8, 1779: 8, 1872: 8, 1880: 8, 1904: 8, 1912: 8, 1984: 8, 1988: 8, 1992: 8, 1996: 8, 1990: 8, 1998: 8
},
# 2019 Highlander XLE
{
36: 8, 37: 8, 114: 5, 119: 6, 120: 4, 170: 8, 180: 8, 186: 4, 238: 4, 355: 5, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 544: 4, 545: 5, 550: 8, 552: 4, 608: 8, 610: 5, 643: 7, 705: 8, 725: 2, 740: 5, 767: 4, 800: 8, 835: 8, 836: 8, 849: 4, 869: 7, 870: 7, 871: 2, 896: 8, 900: 6, 902: 6, 905: 8, 911: 8, 916: 3, 921: 8, 922: 8, 933: 8, 944: 8, 945: 8, 951: 8, 955: 8, 956: 8, 979: 2, 992: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1008: 2, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1043: 8, 1044: 8, 1056: 8, 1059: 1, 1076: 8, 1077: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1176: 8, 1177: 8, 1178: 8, 1179: 8, 1180: 8, 1181: 8, 1182: 8, 1183: 8, 1189: 8, 1190: 8, 1191: 8, 1192: 8, 1196: 8, 1197: 8, 1198: 8, 1199: 8, 1206: 8, 1207: 8, 1212: 8, 1227: 8, 1235: 8, 1237: 8, 1279: 8, 1408: 8, 1409: 8, 1410: 8, 1552: 8, 1553: 8, 1554: 8, 1556: 8, 1557: 8, 1561: 8, 1562: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1584: 8, 1589: 8, 1592: 8, 1593: 8, 1595: 8, 1599: 8, 1656: 8, 1728: 8, 1745: 8, 1779: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8
},
# 2017 Highlander Limited
{
36: 8, 37: 8, 114: 5, 119: 6, 120: 4, 170: 8, 180: 8, 186: 4, 238: 4, 355: 5, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 544: 4, 545: 5, 550: 8, 552: 4, 608: 8, 610: 5, 643: 7, 705: 8, 725: 2, 740: 5, 767: 4, 800: 8, 835: 8, 836: 8, 849: 4, 869: 7, 870: 7, 871: 2, 896: 8, 900: 6, 902: 6, 905: 8, 911: 8, 916: 3, 918: 7, 921: 8, 922: 8, 933: 8, 944: 8, 945: 8, 951: 8, 955: 8, 956: 8, 979: 2, 998: 5, 999: 7, 1000: 8, 1001: 8, 1005: 2, 1008: 2, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1043: 8, 1044: 8, 1056: 8, 1059: 1, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1176: 8, 1177: 8, 1178: 8, 1179: 8, 1180: 8, 1181: 8, 1182: 8, 1183: 8, 1189: 8, 1190: 8, 1191: 8, 1192: 8, 1196: 8, 1197: 8, 1198: 8, 1199: 8, 1206: 8, 1207: 8, 1212: 8, 1227: 8, 1235: 8, 1237: 8, 1264: 8, 1279: 8, 1408: 8, 1409: 8, 1410: 8, 1552: 8, 1553: 8, 1554: 8, 1556: 8, 1557: 8, 1561: 8, 1562: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1584: 8, 1589: 8, 1592: 8, 1593: 8, 1595: 8, 1599: 8, 1656: 8, 1728: 8, 1745: 8, 1779: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8
},
# 2018 Highlander Limited Platinum
{
36: 8, 37: 8, 114: 5, 119: 6, 120: 4, 170: 8, 180: 8, 186: 4, 238: 4, 355: 5, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 544: 4, 545: 5, 550: 8, 552: 4, 608: 8, 610: 5, 643: 7, 705: 8, 725: 2, 740: 5, 767: 4, 800: 8, 835: 8, 836: 8, 849: 4, 869: 7, 870: 7, 871: 2, 896: 8, 900: 6, 902: 6, 905: 8, 911: 8, 916: 3, 918: 7, 921: 8, 922: 8, 933: 8, 944: 8, 945: 8, 951: 8, 955: 8, 956: 8, 979: 2, 998: 5, 999: 7, 1000: 8, 1001: 8, 1008: 2, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1043: 8, 1044: 8, 1056: 8, 1059: 1, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1176: 8, 1177: 8, 1178: 8, 1179: 8, 1180: 8, 1181: 8, 1182: 8, 1183: 8, 1189: 8, 1190: 8, 1191: 8, 1192: 8, 1196: 8, 1197: 8, 1198: 8, 1199: 8, 1206: 8, 1207: 8, 1212: 8, 1227: 8, 1235: 8, 1237: 8, 1263: 8, 1279: 8, 1408: 8, 1409: 8, 1410: 8, 1552: 8, 1553: 8, 1554: 8, 1556: 8, 1557: 8, 1561: 8, 1562: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1584: 8, 1585: 8, 1589: 8, 1592: 8, 1593: 8, 1595: 8, 1599: 8, 1656: 8, 1728: 8, 1745: 8, 1779: 8, 1872: 8, 1880: 8, 1904: 8, 1912: 8, 1988: 8, 1990: 8, 1996: 8, 1998: 8, 2015: 8, 2016: 8, 2024: 8
}],
CAR.HIGHLANDERH: [{
36: 8, 37: 8, 170: 8, 180: 8, 296: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 581: 5, 608: 8, 610: 5, 643: 7, 713: 8, 740: 5, 767: 4, 800: 8, 835: 8, 836: 8, 849: 4, 869: 7, 870: 7, 871: 2, 896: 8, 897: 8, 900: 6, 902: 6, 905: 8, 911: 8, 916: 3, 918: 7, 921: 8, 933: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 3, 955: 8, 956: 8, 979: 2, 998: 5, 999: 7, 1000: 8, 1001: 8, 1005: 2, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1043: 8, 1044: 8, 1056: 8, 1059: 1, 1112: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1176: 8, 1177: 8, 1178: 8, 1179: 8, 1180: 8, 1181: 8, 1184: 8, 1185: 8, 1186: 8, 1189: 8, 1190: 8, 1191: 8, 1192: 8, 1196: 8, 1197: 8, 1198: 8, 1199: 8, 1206: 8, 1212: 8, 1227: 8, 1232: 8, 1235: 8, 1237: 8, 1263: 8, 1264: 8, 1279: 8, 1552: 8, 1553: 8, 1554: 8, 1556: 8, 1557: 8, 1561: 8, 1562: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1584: 8, 1589: 8, 1592: 8, 1593: 8, 1595: 8, 1599: 8, 1656: 8, 1728: 8, 1745: 8, 1779: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8
},
{
# 2019 Highlander Hybrid Limited Platinum
36: 8, 37: 8, 170: 8, 180: 8, 296: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 581: 5, 608: 8, 610: 5, 643: 7, 713: 8, 740: 5, 767: 4, 800: 8, 835: 8, 836: 8, 849: 4, 869: 7, 870: 7, 871: 2, 896: 8, 897: 8, 900: 6, 902: 6, 905: 8, 911: 8, 916: 3, 918: 7, 921: 8, 933: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 3, 955: 8, 956: 8, 979: 2, 992: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1043: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1076: 8, 1077: 8, 1112: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1176: 8, 1177: 8, 1178: 8, 1179: 8, 1180: 8, 1181: 8, 1184: 8, 1185: 8, 1186: 8, 1189: 8, 1190: 8, 1191: 8, 1192: 8, 1196: 8, 1197: 8, 1198: 8, 1199: 8, 1206: 8, 1212: 8, 1227: 8, 1232: 8, 1235: 8, 1237: 8, 1263: 8, 1279: 8, 1552: 8, 1553: 8, 1554: 8, 1556: 8, 1557: 8, 1561: 8, 1562: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1584: 8, 1589: 8, 1592: 8, 1593: 8, 1595: 8, 1599: 8, 1656: 8, 1666: 8, 1667: 8, 1728: 8, 1745: 8, 1779: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8
}],
CAR.COROLLAH_TSS2: [
# 2019 Taiwan Altis Hybrid
{
36: 8, 37: 8, 166: 8, 170: 8, 180: 8, 295: 8, 296: 8, 401: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 8, 643: 7, 713: 8, 728: 8, 740: 5, 742: 8, 743: 8, 761: 8, 765: 8, 767: 4, 800: 8, 810: 2, 829: 2, 830: 7, 835: 8, 836: 8, 865: 8, 869: 7, 870: 7, 871: 2, 877: 8, 881: 8, 885: 8, 896: 8, 898: 8, 918: 7, 921: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 8, 955: 8, 956: 8, 971: 7, 975: 5, 987: 8, 993: 8, 1002: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1071: 8, 1082: 8, 1112: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1172: 8, 1235: 8, 1237: 8, 1279: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1592: 8, 1594: 8, 1595: 8, 1745: 8, 1775: 8, 1779: 8
},
# 2019 Chinese Levin Hybrid
{
36: 8, 37: 8, 166: 8, 170: 8, 180: 8, 295: 8, 296: 8, 401: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 8, 643: 7, 713: 8, 728: 8, 740: 5, 742: 8, 743: 8, 761: 8, 765: 8, 767: 4, 800: 8, 810: 2, 812: 8, 829: 2, 830: 7, 835: 8, 836: 8, 865: 8, 869: 7, 870: 7, 871: 2, 877: 8, 881: 8, 885: 8, 896: 8, 898: 8, 921: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 8, 955: 8, 956: 8, 971: 7, 975: 5, 993: 8, 1002: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1071: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1172: 8, 1235: 8, 1279: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1592: 8, 1594: 8, 1595: 8, 1600: 8, 1649: 8, 1745: 8, 1775: 8, 1779: 8
}
],
CAR.SIENNA: [
{
36: 8, 37: 8, 114: 5, 119: 6, 120: 4, 170: 8, 180: 8, 186: 4, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 544: 4, 545: 5, 548: 8, 550: 8, 552: 4, 562: 4, 608: 8, 610: 5, 643: 7, 705: 8, 725: 2, 740: 5, 764: 8, 767: 4, 800: 8, 824: 8, 835: 8, 836: 8, 849: 4, 869: 7, 870: 7, 871: 2, 888: 8, 896: 8, 900: 6, 902: 6, 905: 8, 911: 8, 916: 1, 918: 7, 921: 8, 933: 8, 944: 6, 945: 8, 951: 8, 955: 8, 956: 8, 979: 2, 992: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1008: 2, 1014: 8, 1017: 8, 1041: 8, 1042: 8, 1043: 8, 1056: 8, 1059: 1, 1076: 8, 1077: 8, 1114: 8, 1160: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1176: 8, 1177: 8, 1178: 8, 1179: 8, 1180: 8, 1181: 8, 1182: 8, 1183: 8, 1191: 8, 1192: 8, 1196: 8, 1197: 8, 1198: 8, 1199: 8, 1200: 8, 1201: 8, 1202: 8, 1203: 8, 1212: 8, 1227: 8, 1228: 8, 1235: 8, 1237: 8, 1279: 8, 1552: 8, 1553: 8, 1555: 8, 1556: 8, 1557: 8, 1561: 8, 1562: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1584: 8, 1589: 8, 1592: 8, 1593: 8, 1595: 8, 1656: 8, 1664: 8, 1666: 8, 1667: 8, 1728: 8, 1745: 8, 1779: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8
},
# XLE AWD 2018
{
36: 8, 37: 8, 114: 5, 119: 6, 120: 4, 170: 8, 180: 8, 186: 4, 238: 4, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 544: 4, 545: 5, 548: 8, 550: 8, 552: 4, 562: 4, 608: 8, 610: 5, 643: 7, 705: 8, 725: 2, 740: 5, 764: 8, 767: 4, 800: 8, 824: 8, 835: 8, 836: 8, 849: 4, 869: 7, 870: 7, 871: 2, 896: 8, 900: 6, 902: 6, 905: 8, 911: 8, 916: 1, 921: 8, 933: 8, 944: 6, 945: 8, 951: 8, 955: 8, 956: 8, 979: 2, 992: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1008: 2, 1014: 8, 1017: 8, 1041: 8, 1042: 8, 1043: 8, 1056: 8, 1059: 1, 1076: 8, 1077: 8, 1114: 8, 1160: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1176: 8, 1177: 8, 1178: 8, 1179: 8, 1180: 8, 1181: 8, 1182: 8, 1183: 8, 1191: 8, 1192: 8, 1196: 8, 1197: 8, 1198: 8, 1199: 8, 1200: 8, 1201: 8, 1202: 8, 1203: 8, 1212: 8, 1227: 8, 1235: 8, 1237: 8, 1279: 8, 1552: 8, 1553: 8, 1555: 8, 1556: 8, 1557: 8, 1561: 8, 1562: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1584: 8, 1589: 8, 1592: 8, 1593: 8, 1595: 8, 1656: 8, 1664: 8, 1666: 8, 1667: 8, 1728: 8, 1745: 8, 1779: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8
}],
# dp - fake values, for generate car selection
CAR.LEXUS_ISH: [{ 65535: 1 }],
}
FW_VERSIONS = {
CAR.AVALON: {
(Ecu.esp, 0x7b0, None): [
b'F152607060\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881510701300\x00\x00\x00\x00',
b'881510705100\x00\x00\x00\x00',
b'881510705200\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B41051\x00\x00\x00\x00\x00\x00',
],
(Ecu.engine, 0x7e0, None): [
b'\x0230721100\x00\x00\x00\x00\x00\x00\x00\x00A0C01000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230721200\x00\x00\x00\x00\x00\x00\x00\x00A0C01000\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702000\x00\x00\x00\x00',
b'8821F4702100\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F0701100\x00\x00\x00\x00',
b'8646F0703000\x00\x00\x00\x00',
],
},
CAR.AVALON_2019: {
(Ecu.esp, 0x7b0, None): [
b'F152607140\x00\x00\x00\x00\x00\x00',
b'F152607171\x00\x00\x00\x00\x00\x00',
b'F152607110\x00\x00\x00\x00\x00\x00',
b'F152607180\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881510703200\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B41080\x00\x00\x00\x00\x00\x00',
b'8965B07010\x00\x00\x00\x00\x00\x00',
b'8965B41090\x00\x00\x00\x00\x00\x00',
],
(Ecu.engine, 0x700, None): [
b'\x01896630735100\x00\x00\x00\x00',
b'\x01896630725300\x00\x00\x00\x00',
b'\x01896630738000\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F0702100\x00\x00\x00\x00',
],
},
CAR.AVALONH_2019: {
(Ecu.esp, 0x7b0, None): [
b'F152641040\x00\x00\x00\x00\x00\x00',
b'F152641061\x00\x00\x00\x00\x00\x00',
b'F152641050\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881510704200\x00\x00\x00\x00',
b'881514107100\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B07010\x00\x00\x00\x00\x00\x00',
b'8965B41090\x00\x00\x00\x00\x00\x00',
b'8965B41070\x00\x00\x00\x00\x00\x00',
],
(Ecu.engine, 0x700, None): [
b'\x02896630724000\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
b'\x02896630737000\x00\x00\x00\x00897CF3305001\x00\x00\x00\x00',
b'\x02896630728000\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F0702100\x00\x00\x00\x00',
],
},
CAR.CAMRY: {
(Ecu.engine, 0x700, None): [
b'\x018966306L3100\x00\x00\x00\x00',
b'\x018966306L4200\x00\x00\x00\x00',
b'\x018966306L5200\x00\x00\x00\x00',
b'\x018966306P8000\x00\x00\x00\x00',
b'\x018966306Q3100\x00\x00\x00\x00',
b'\x018966306Q4000\x00\x00\x00\x00',
b'\x018966306Q4100\x00\x00\x00\x00',
b'\x018966306Q4200\x00\x00\x00\x00',
b'\x018966333Q9200\x00\x00\x00\x00',
b'\x018966333P3100\x00\x00\x00\x00',
b'\x018966333P3200\x00\x00\x00\x00',
b'\x018966333P4200\x00\x00\x00\x00',
b'\x018966333P4300\x00\x00\x00\x00',
b'\x018966333P4400\x00\x00\x00\x00',
b'\x018966333P4500\x00\x00\x00\x00',
b'\x018966333P4700\x00\x00\x00\x00',
b'\x018966333Q6000\x00\x00\x00\x00',
b'\x018966333Q6200\x00\x00\x00\x00',
b'\x018966333Q6300\x00\x00\x00\x00',
b'\x018966333W6000\x00\x00\x00\x00',
],
(Ecu.engine, 0x7e0, None): [
b'\x02333P1100\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'8821F0601200 ',
b'8821F0601300 ',
b'8821F0602000 ',
b'8821F0603300 ',
b'8821F0604100 ',
b'8821F0605200 ',
b'8821F0607200 ',
b'8821F0608000 ',
b'8821F0608200 ',
b'8821F0609100 ',
],
(Ecu.esp, 0x7b0, None): [
b'F152606210\x00\x00\x00\x00\x00\x00',
b'F152606230\x00\x00\x00\x00\x00\x00',
b'F152606270\x00\x00\x00\x00\x00\x00',
b'F152606290\x00\x00\x00\x00\x00\x00',
b'F152606410\x00\x00\x00\x00\x00\x00',
b'F152633540\x00\x00\x00\x00\x00\x00',
b'F152633A10\x00\x00\x00\x00\x00\x00',
b'F152633A20\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B33540\x00\x00\x00\x00\x00\x00',
b'8965B33542\x00\x00\x00\x00\x00\x00',
b'8965B33580\x00\x00\x00\x00\x00\x00',
b'8965B33581\x00\x00\x00\x00\x00\x00',
b'8965B33621\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [ # Same as 0x791
b'8821F0601200 ',
b'8821F0601300 ',
b'8821F0602000 ',
b'8821F0603300 ',
b'8821F0604100 ',
b'8821F0605200 ',
b'8821F0607200 ',
b'8821F0608000 ',
b'8821F0608200 ',
b'8821F0609100 ',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F0601200 ',
b'8646F0601300 ',
b'8646F0601400 ',
b'8646F0603400 ',
b'8646F0604100 ',
b'8646F0605000 ',
b'8646F0606000 ',
b'8646F0606100 ',
b'8646F0607100 ',
],
},
CAR.CAMRYH: {
(Ecu.engine, 0x700, None): [
b'\x018966306Q6000\x00\x00\x00\x00',
b'\x018966333N1100\x00\x00\x00\x00',
b'\x018966333N4300\x00\x00\x00\x00',
b'\x018966333X0000\x00\x00\x00\x00',
b'\x018966333X4000\x00\x00\x00\x00',
b'\x01896633T16000\x00\x00\x00\x00',
b'\x028966306B2100\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
b'\x028966306B2300\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
b'\x028966306N8100\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
b'\x028966306N8200\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
b'\x028966306N8300\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
b'\x028966306N8400\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
b'\x028966306R5000\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
b'\x028966306R5000\x00\x00\x00\x00897CF3305001\x00\x00\x00\x00',
b'\x028966306R6000\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
b'\x028966306R6000\x00\x00\x00\x00897CF3305001\x00\x00\x00\x00',
b'\x028966306S0000\x00\x00\x00\x00897CF3305001\x00\x00\x00\x00',
b'\x028966306S0100\x00\x00\x00\x00897CF3305001\x00\x00\x00\x00',
b'\x028966306S1100\x00\x00\x00\x00897CF3305001\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152633214\x00\x00\x00\x00\x00\x00',
b'F152633660\x00\x00\x00\x00\x00\x00',
b'F152633712\x00\x00\x00\x00\x00\x00',
b'F152633713\x00\x00\x00\x00\x00\x00',
b'F152633B51\x00\x00\x00\x00\x00\x00',
b'F152633B60\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'8821F0601200 ',
b'8821F0601300 ',
b'8821F0603400 ',
b'8821F0604000 ',
b'8821F0604200 ',
b'8821F0605200 ',
b'8821F0606200 ',
b'8821F0607200 ',
b'8821F0608000 ',
b'8821F0608200 ',
b'8821F0609000 ',
b'8821F0609100 ',
],
(Ecu.eps, 0x7a1, None): [
b'8965B33540\x00\x00\x00\x00\x00\x00',
b'8965B33542\x00\x00\x00\x00\x00\x00',
b'8965B33550\x00\x00\x00\x00\x00\x00',
b'8965B33551\x00\x00\x00\x00\x00\x00',
b'8965B33580\x00\x00\x00\x00\x00\x00',
b'8965B33581\x00\x00\x00\x00\x00\x00',
b'8965B33611\x00\x00\x00\x00\x00\x00',
b'8965B33621\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [ # Same as 0x791
b'8821F0601200 ',
b'8821F0601300 ',
b'8821F0603400 ',
b'8821F0604000 ',
b'8821F0604200 ',
b'8821F0605200 ',
b'8821F0606200 ',
b'8821F0607200 ',
b'8821F0608000 ',
b'8821F0608200 ',
b'8821F0609000 ',
b'8821F0609100 ',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F0601200 ',
b'8646F0601300 ',
b'8646F0601400 ',
b'8646F0603400 ',
b'8646F0603500 ',
b'8646F0604100 ',
b'8646F0605000 ',
b'8646F0606000 ',
b'8646F0606100 ',
b'8646F0607000 ',
b'8646F0607100 ',
],
},
CAR.CAMRY_TSS2: {
(Ecu.eps, 0x7a1, None): [
b'8965B33630\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'\x01F152606370\x00\x00\x00\x00\x00\x00',
b'\x01F152606390\x00\x00\x00\x00\x00\x00',
b'\x01F152606400\x00\x00\x00\x00\x00\x00',
],
(Ecu.engine, 0x700, None): [
b'\x018966306Q5000\x00\x00\x00\x00',
b'\x018966306T3100\x00\x00\x00\x00',
b'\x018966306T3200\x00\x00\x00\x00',
b'\x018966306T4100\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 15): [
b'\x018821F6201200\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 109): [
b'\x028646F3305200\x00\x00\x00\x008646G5301200\x00\x00\x00\x00',
b'\x028646F3305300\x00\x00\x00\x008646G5301200\x00\x00\x00\x00',
],
},
CAR.CAMRYH_TSS2: {
(Ecu.eps, 0x7a1, None): [
b'8965B33630\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152633D00\x00\x00\x00\x00\x00\x00',
],
(Ecu.engine, 0x700, None): [
b'\x018966306Q6000\x00\x00\x00\x00',
b'\x018966306Q7000\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 15): [
b'\x018821F6201200\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 109): [
b'\x028646F3305200\x00\x00\x00\x008646G5301200\x00\x00\x00\x00',
b'\x028646F3305300\x00\x00\x00\x008646G5301200\x00\x00\x00\x00',
],
},
CAR.CHR: {
(Ecu.engine, 0x700, None): [
b'\x01896631017100\x00\x00\x00\x00',
b'\x01896631017200\x00\x00\x00\x00',
b'\x0189663F413100\x00\x00\x00\x00',
b'\x0189663F414100\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'8821F0W01000 ',
b'8821F0W01100 ',
b'8821FF401600 ',
b'8821FF404000 ',
b'8821FF404100 ',
b'8821FF405100 ',
b'8821FF406000 ',
b'8821FF407100 ',
],
(Ecu.esp, 0x7b0, None): [
b'F152610020\x00\x00\x00\x00\x00\x00',
b'F152610153\x00\x00\x00\x00\x00\x00',
b'F152610210\x00\x00\x00\x00\x00\x00',
b'F1526F4034\x00\x00\x00\x00\x00\x00',
b'F1526F4044\x00\x00\x00\x00\x00\x00',
b'F1526F4073\x00\x00\x00\x00\x00\x00',
b'F1526F4121\x00\x00\x00\x00\x00\x00',
b'F1526F4122\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B10011\x00\x00\x00\x00\x00\x00',
b'8965B10040\x00\x00\x00\x00\x00\x00',
b'8965B10070\x00\x00\x00\x00\x00\x00',
],
(Ecu.engine, 0x7e0, None): [
b'\x0331024000\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00895231203202\x00\x00\x00\x00',
b'\x0331024000\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00895231203302\x00\x00\x00\x00',
b'\x0331036000\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00895231203302\x00\x00\x00\x00',
b'\x033F401100\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00895231203102\x00\x00\x00\x00',
b'\x033F424000\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00895231203202\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F0W01000 ',
b'8821FF401600 ',
b'8821FF404000 ',
b'8821FF404100 ',
b'8821FF405100 ',
b'8821FF406000 ',
b'8821FF407100 ',
b'8821F0W01100 ',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646FF401800 ',
b'8646FF404000 ',
b'8646FF406000 ',
b'8646FF407000 ',
],
},
CAR.CHRH: {
(Ecu.engine, 0x700, None): [
b'\x02896631013200\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x0289663F405000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x0289663F418000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x0289663F423000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x0289663F431000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x0189663F438000\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152610013\x00\x00\x00\x00\x00\x00',
b'F152610014\x00\x00\x00\x00\x00\x00',
b'F152610040\x00\x00\x00\x00\x00\x00',
b'F152610190\x00\x00\x00\x00\x00\x00',
b'F152610200\x00\x00\x00\x00\x00\x00',
b'F152610230\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'8821F0W01000 ',
b'8821FF402300 ',
b'8821FF402400 ',
b'8821FF404000 ',
b'8821FF406000 ',
b'8821FF407100 ',
],
(Ecu.eps, 0x7a1, None): [
b'8965B10011\x00\x00\x00\x00\x00\x00',
b'8965B10020\x00\x00\x00\x00\x00\x00',
b'8965B10040\x00\x00\x00\x00\x00\x00',
b'8965B10050\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F0W01000 ',
b'8821FF402300 ',
b'8821FF402400 ',
b'8821FF404000 ',
b'8821FF406000 ',
b'8821FF407100 ',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646FF402100 ',
b'8646FF404000 ',
b'8646FF406000 ',
b'8646FF407000 ',
],
},
CAR.COROLLA: {
(Ecu.engine, 0x7e0, None): [
b'\x0230ZC2000\x00\x00\x00\x00\x00\x00\x00\x0050212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230ZC2100\x00\x00\x00\x00\x00\x00\x00\x0050212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230ZC2200\x00\x00\x00\x00\x00\x00\x00\x0050212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230ZC2300\x00\x00\x00\x00\x00\x00\x00\x0050212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230ZC3000\x00\x00\x00\x00\x00\x00\x00\x0050212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230ZC3100\x00\x00\x00\x00\x00\x00\x00\x0050212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230ZC3200\x00\x00\x00\x00\x00\x00\x00\x0050212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230ZC3300\x00\x00\x00\x00\x00\x00\x00\x0050212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0330ZC1200\x00\x00\x00\x00\x00\x00\x00\x0050212000\x00\x00\x00\x00\x00\x00\x00\x00895231203202\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881510201100\x00\x00\x00\x00',
b'881510201200\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152602190\x00\x00\x00\x00\x00\x00',
b'F152602191\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B02181\x00\x00\x00\x00\x00\x00',
b'8965B02191\x00\x00\x00\x00\x00\x00',
b'8965B48150\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702100\x00\x00\x00\x00',
b'8821F4702300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F0201101\x00\x00\x00\x00',
b'8646F0201200\x00\x00\x00\x00',
],
},
CAR.COROLLA_TSS2: {
(Ecu.engine, 0x700, None): [
b'\x01896630ZG2000\x00\x00\x00\x00',
b'\x01896630ZG5000\x00\x00\x00\x00',
b'\x01896630ZG5100\x00\x00\x00\x00',
b'\x01896630ZG5200\x00\x00\x00\x00',
b'\x01896630ZG5300\x00\x00\x00\x00',
b'\x01896630ZP2000\x00\x00\x00\x00',
b'\x01896630ZQ5000\x00\x00\x00\x00',
b'\x018966312L8000\x00\x00\x00\x00',
b'\x018966312M0000\x00\x00\x00\x00',
b'\x018966312M9000\x00\x00\x00\x00',
b'\x018966312P9000\x00\x00\x00\x00',
b'\x018966312P9100\x00\x00\x00\x00',
b'\x018966312P9200\x00\x00\x00\x00',
b'\x018966312Q2300\x00\x00\x00\x00',
b'\x018966312R0100\x00\x00\x00\x00',
b'\x018966312R1000\x00\x00\x00\x00',
b'\x018966312R1100\x00\x00\x00\x00',
b'\x018966312R3100\x00\x00\x00\x00',
b'\x018966312S5000\x00\x00\x00\x00',
b'\x018966312S7000\x00\x00\x00\x00',
b'\x018966312W3000\x00\x00\x00\x00',
],
(Ecu.engine, 0x7e0, None): [
b'\x0230ZN4000\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x03312M3000\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00895231203402\x00\x00\x00\x00',
b'\x03312N6000\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00895231203202\x00\x00\x00\x00',
b'\x03312N6000\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00895231203302\x00\x00\x00\x00',
b'\x03312N6000\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00895231203402\x00\x00\x00\x00',
b'\x03312N6100\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00895231203302\x00\x00\x00\x00',
b'\x03312N6100\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00895231203402\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'\x018965B12350\x00\x00\x00\x00\x00\x00',
b'\x018965B12470\x00\x00\x00\x00\x00\x00',
b'\x018965B12490\x00\x00\x00\x00\x00\x00',
b'\x018965B12500\x00\x00\x00\x00\x00\x00',
b'\x018965B12520\x00\x00\x00\x00\x00\x00',
b'\x018965B12530\x00\x00\x00\x00\x00\x00',
b'\x018965B1255000\x00\x00\x00\x00',
b'8965B12361\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'\x01F152602280\x00\x00\x00\x00\x00\x00',
b'\x01F152602560\x00\x00\x00\x00\x00\x00',
b'\x01F152602590\x00\x00\x00\x00\x00\x00',
b'\x01F152602650\x00\x00\x00\x00\x00\x00',
b'\x01F152612641\x00\x00\x00\x00\x00\x00',
b'\x01F152612651\x00\x00\x00\x00\x00\x00',
b'\x01F152612B10\x00\x00\x00\x00\x00\x00',
b'\x01F152612B51\x00\x00\x00\x00\x00\x00',
b'\x01F152612B60\x00\x00\x00\x00\x00\x00',
b'\x01F152612B61\x00\x00\x00\x00\x00\x00',
b'\x01F152612B90\x00\x00\x00\x00\x00\x00',
b'\x01F152612C00\x00\x00\x00\x00\x00\x00',
b'F152602191\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301100\x00\x00\x00\x00',
b'\x018821F3301200\x00\x00\x00\x00',
b'\x018821F3301300\x00\x00\x00\x00',
b'\x018821F3301400\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F12010D0\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F1201100\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F1201200\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F1201300\x00\x00\x00\x008646G2601400\x00\x00\x00\x00',
b'\x028646F1201400\x00\x00\x00\x008646G2601500\x00\x00\x00\x00',
b'\x028646F1202000\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F1202100\x00\x00\x00\x008646G2601400\x00\x00\x00\x00',
b'\x028646F1202200\x00\x00\x00\x008646G2601500\x00\x00\x00\x00',
],
},
CAR.COROLLAH_TSS2: {
(Ecu.engine, 0x700, None): [
b'\x01896630ZJ1000\x00\x00\x00\x00',
b'\x01896630ZU8000\x00\x00\x00\x00',
b'\x01896637621000\x00\x00\x00\x00',
b'\x01896637624000\x00\x00\x00\x00',
b'\x01896637626000\x00\x00\x00\x00',
b'\x01896637648000\x00\x00\x00\x00',
b'\x02896630ZJ5000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896630ZN8000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896630ZQ3000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896630ZR2000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896630ZT8000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896630ZT9000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x028966312Q3000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x028966312Q4000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x038966312L7000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF1205001\x00\x00\x00\x00',
b'\x038966312N1000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF1203001\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B12361\x00\x00\x00\x00\x00\x00',
b'8965B12451\x00\x00\x00\x00\x00\x00',
b'8965B76012\x00\x00\x00\x00\x00\x00',
b'8965B76050\x00\x00\x00\x00\x00\x00',
b'\x018965B12350\x00\x00\x00\x00\x00\x00',
b'\x018965B12470\x00\x00\x00\x00\x00\x00',
b'\x018965B12490\x00\x00\x00\x00\x00\x00',
b'\x018965B12500\x00\x00\x00\x00\x00\x00',
b'\x018965B12520\x00\x00\x00\x00\x00\x00',
b'\x018965B12530\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152612590\x00\x00\x00\x00\x00\x00',
b'F152612691\x00\x00\x00\x00\x00\x00',
b'F152612692\x00\x00\x00\x00\x00\x00',
b'F152612700\x00\x00\x00\x00\x00\x00',
b'F152612710\x00\x00\x00\x00\x00\x00',
b'F152612790\x00\x00\x00\x00\x00\x00',
b'F152612800\x00\x00\x00\x00\x00\x00',
b'F152612820\x00\x00\x00\x00\x00\x00',
b'F152612840\x00\x00\x00\x00\x00\x00',
b'F152612A00\x00\x00\x00\x00\x00\x00',
b'F152612A10\x00\x00\x00\x00\x00\x00',
b'F152642540\x00\x00\x00\x00\x00\x00',
b'F152676293\x00\x00\x00\x00\x00\x00',
b'F152676303\x00\x00\x00\x00\x00\x00',
b'F152676304\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301100\x00\x00\x00\x00',
b'\x018821F3301200\x00\x00\x00\x00',
b'\x018821F3301300\x00\x00\x00\x00',
b'\x018821F3301400\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F12010D0\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F1201100\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F1201300\x00\x00\x00\x008646G2601400\x00\x00\x00\x00',
b'\x028646F1201400\x00\x00\x00\x008646G2601500\x00\x00\x00\x00',
b'\x028646F1202000\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F1202100\x00\x00\x00\x008646G2601400\x00\x00\x00\x00',
b'\x028646F4203400\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F76020C0\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F7603100\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F7603200\x00\x00\x00\x008646G2601400\x00\x00\x00\x00',
],
},
CAR.HIGHLANDER: {
(Ecu.engine, 0x700, None): [
b'\x01896630E09000\x00\x00\x00\x00',
b'\x01896630E43000\x00\x00\x00\x00',
b'\x01896630E43100\x00\x00\x00\x00',
b'\x01896630E43200\x00\x00\x00\x00',
b'\x01896630E44200\x00\x00\x00\x00',
b'\x01896630E45000\x00\x00\x00\x00',
b'\x01896630E45100\x00\x00\x00\x00',
b'\x01896630E45200\x00\x00\x00\x00',
b'\x01896630E46200\x00\x00\x00\x00',
b'\x01896630E74000\x00\x00\x00\x00',
b'\x01896630E75000\x00\x00\x00\x00',
b'\x01896630E76000\x00\x00\x00\x00',
b'\x01896630E77000\x00\x00\x00\x00',
b'\x01896630E83000\x00\x00\x00\x00',
b'\x01896630E84000\x00\x00\x00\x00',
b'\x01896630E85000\x00\x00\x00\x00',
b'\x01896630E86000\x00\x00\x00\x00',
b'\x01896630E88000\x00\x00\x00\x00',
b'\x01896630EA0000\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B48140\x00\x00\x00\x00\x00\x00',
b'8965B48150\x00\x00\x00\x00\x00\x00',
b'8965B48210\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [b'F15260E011\x00\x00\x00\x00\x00\x00'],
(Ecu.dsu, 0x791, None): [
b'881510E01100\x00\x00\x00\x00',
b'881510E01200\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702100\x00\x00\x00\x00',
b'8821F4702300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F0E01200\x00\x00\x00\x00',
b'8646F0E01300\x00\x00\x00\x00',
],
},
CAR.HIGHLANDERH: {
(Ecu.eps, 0x7a1, None): [
b'8965B48160\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152648541\x00\x00\x00\x00\x00\x00',
b'F152648542\x00\x00\x00\x00\x00\x00',
],
(Ecu.engine, 0x7e0, None): [
b'\x0230E40000\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230E40100\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230EA2000\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230EA2100\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702100\x00\x00\x00\x00',
b'8821F4702300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F0E01200\x00\x00\x00\x00',
b'8646F0E01300\x00\x00\x00\x00',
],
},
CAR.HIGHLANDER_TSS2: {
(Ecu.eps, 0x7a1, None): [
b'8965B48241\x00\x00\x00\x00\x00\x00',
b'8965B48310\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'\x01F15260E051\x00\x00\x00\x00\x00\x00',
b'\x01F15260E110\x00\x00\x00\x00\x00\x00',
],
(Ecu.engine, 0x700, None): [
b'\x01896630E62100\x00\x00\x00\x00',
b'\x01896630E62200\x00\x00\x00\x00',
b'\x01896630E64100\x00\x00\x00\x00',
b'\x01896630E64200\x00\x00\x00\x00',
b'\x01896630EB1000\x00\x00\x00\x00',
b'\x01896630EB1100\x00\x00\x00\x00',
b'\x01896630EB2000\x00\x00\x00\x00',
b'\x01896630EB2100\x00\x00\x00\x00',
b'\x01896630EB2200\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301400\x00\x00\x00\x00',
b'\x018821F6201200\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F0E02100\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F4803000\x00\x00\x00\x008646G5301200\x00\x00\x00\x00',
],
},
CAR.HIGHLANDERH_TSS2: {
(Ecu.eps, 0x7a1, None): [
b'8965B48241\x00\x00\x00\x00\x00\x00',
b'8965B48310\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'\x01F15264872300\x00\x00\x00\x00',
b'\x01F15264872400\x00\x00\x00\x00',
b'\x01F15264872500\x00\x00\x00\x00',
b'\x01F152648C6300\x00\x00\x00\x00',
],
(Ecu.engine, 0x700, None): [
b'\x01896630EA1000\000\000\000\000',
b'\x01896630EA1000\x00\x00\x00\x00897CF4801001\x00\x00\x00\x00',
b'\x02896630E66000\x00\x00\x00\x00897CF4801001\x00\x00\x00\x00',
b'\x02896630EB3000\x00\x00\x00\x00897CF4801001\x00\x00\x00\x00',
b'\x02896630EB3100\x00\x00\x00\x00897CF4801001\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301400\x00\x00\x00\x00',
b'\x018821F6201200\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F0E02100\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F4803000\x00\x00\x00\x008646G5301200\x00\x00\x00\x00',
],
},
CAR.LEXUS_IS: {
(Ecu.engine, 0x700, None): [
b'\x018966353M7100\x00\x00\x00\x00',
b'\x018966353Q2000\x00\x00\x00\x00',
b'\x018966353Q2300\x00\x00\x00\x00',
b'\x018966353R1100\x00\x00\x00\x00',
b'\x018966353R7100\x00\x00\x00\x00',
b'\x018966353R8100\x00\x00\x00\x00',
b'\x018966353Q4000\x00\x00\x00\x00',
],
(Ecu.engine, 0x7e0, None): [
b'\x0232480000\x00\x00\x00\x00\x00\x00\x00\x00A4701000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02353P7000\x00\x00\x00\x00\x00\x00\x00\x00530J5000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02353P9000\x00\x00\x00\x00\x00\x00\x00\x00553C1000\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152653301\x00\x00\x00\x00\x00\x00',
b'F152653310\x00\x00\x00\x00\x00\x00',
b'F152653330\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881515306200\x00\x00\x00\x00',
b'881515306400\x00\x00\x00\x00',
b'881515306500\x00\x00\x00\x00',
b'881515307400\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B53270\x00\x00\x00\x00\x00\x00',
b'8965B53271\x00\x00\x00\x00\x00\x00',
b'8965B53280\x00\x00\x00\x00\x00\x00',
b'8965B53281\x00\x00\x00\x00\x00\x00',
b'8965B53311\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702300\x00\x00\x00\x00',
b'8821F4702100\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F5301300\x00\x00\x00\x00',
b'8646F5301400\x00\x00\x00\x00',
b'8646F5301200\x00\x00\x00\x00',
],
},
CAR.PRIUS: {
(Ecu.engine, 0x700, None): [
b'\x02896634761000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634761100\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634761200\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634762000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634763000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634763100\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634765000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634765100\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634769100\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634769200\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634770000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634774000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634774100\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634774200\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634782000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634784000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x028966347A0000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x028966347A5000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x028966347A8000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x028966347B0000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x03896634759100\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701003\x00\x00\x00\x00',
b'\x03896634759200\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701003\x00\x00\x00\x00',
b'\x03896634759200\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701004\x00\x00\x00\x00',
b'\x03896634759300\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701004\x00\x00\x00\x00',
b'\x03896634760000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701002\x00\x00\x00\x00',
b'\x03896634760000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701003\x00\x00\x00\x00',
b'\x03896634760000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701004\x00\x00\x00\x00',
b'\x03896634760100\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701003\x00\x00\x00\x00',
b'\x03896634760200\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701003\x00\x00\x00\x00',
b'\x03896634760200\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701004\x00\x00\x00\x00',
b'\x03896634760300\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701004\x00\x00\x00\x00',
b'\x03896634768000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4703001\x00\x00\x00\x00',
b'\x03896634768000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4703002\x00\x00\x00\x00',
b'\x03896634768100\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4703002\x00\x00\x00\x00',
b'\x03896634785000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4705001\x00\x00\x00\x00',
b'\x03896634785000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4710001\x00\x00\x00\x00',
b'\x03896634786000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4705001\x00\x00\x00\x00',
b'\x03896634786000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4710001\x00\x00\x00\x00',
b'\x03896634789000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4703002\x00\x00\x00\x00',
b'\x038966347A3000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701003\x00\x00\x00\x00',
b'\x038966347A3000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4707001\x00\x00\x00\x00',
b'\x038966347B6000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4710001\x00\x00\x00\x00',
b'\x038966347B7000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4710001\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B47021\x00\x00\x00\x00\x00\x00',
b'8965B47022\x00\x00\x00\x00\x00\x00',
b'8965B47023\x00\x00\x00\x00\x00\x00',
b'8965B47050\x00\x00\x00\x00\x00\x00',
b'8965B47060\x00\x00\x00\x00\x00\x00', # This is the EPS with good angle sensor
],
(Ecu.esp, 0x7b0, None): [
b'F152647290\x00\x00\x00\x00\x00\x00',
b'F152647300\x00\x00\x00\x00\x00\x00',
b'F152647310\x00\x00\x00\x00\x00\x00',
b'F152647414\x00\x00\x00\x00\x00\x00',
b'F152647415\x00\x00\x00\x00\x00\x00',
b'F152647416\x00\x00\x00\x00\x00\x00',
b'F152647417\x00\x00\x00\x00\x00\x00',
b'F152647470\x00\x00\x00\x00\x00\x00',
b'F152647490\x00\x00\x00\x00\x00\x00',
b'F152647683\x00\x00\x00\x00\x00\x00',
b'F152647684\x00\x00\x00\x00\x00\x00',
b'F152647862\x00\x00\x00\x00\x00\x00',
b'F152647863\x00\x00\x00\x00\x00\x00',
b'F152647864\x00\x00\x00\x00\x00\x00',
b'F152647865\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881514702300\x00\x00\x00\x00',
b'881514703100\x00\x00\x00\x00',
b'881514704100\x00\x00\x00\x00',
b'881514706000\x00\x00\x00\x00',
b'881514706100\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702000\x00\x00\x00\x00',
b'8821F4702100\x00\x00\x00\x00',
b'8821F4702300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F4701300\x00\x00\x00\x00',
b'8646F4702001\x00\x00\x00\x00',
b'8646F4702100\x00\x00\x00\x00',
b'8646F4702200\x00\x00\x00\x00',
b'8646F4705000\x00\x00\x00\x00',
b'8646F4705200\x00\x00\x00\x00',
],
},
CAR.RAV4: {
(Ecu.engine, 0x7e0, None): [
b'\x02342Q1000\x00\x00\x00\x00\x00\x00\x00\x0054212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02342Q1100\x00\x00\x00\x00\x00\x00\x00\x0054212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02342Q1200\x00\x00\x00\x00\x00\x00\x00\x0054212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02342Q1300\x00\x00\x00\x00\x00\x00\x00\x0054212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02342Q2000\x00\x00\x00\x00\x00\x00\x00\x0054213000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02342Q2100\x00\x00\x00\x00\x00\x00\x00\x0054213000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02342Q2200\x00\x00\x00\x00\x00\x00\x00\x0054213000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02342Q4000\x00\x00\x00\x00\x00\x00\x00\x0054215000\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B42063\x00\x00\x00\x00\x00\x00',
b'8965B42073\x00\x00\x00\x00\x00\x00',
b'8965B42082\x00\x00\x00\x00\x00\x00',
b'8965B42083\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F15260R102\x00\x00\x00\x00\x00\x00',
b'F15260R103\x00\x00\x00\x00\x00\x00',
b'F152642493\x00\x00\x00\x00\x00\x00',
b'F152642492\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881514201200\x00\x00\x00\x00',
b'881514201300\x00\x00\x00\x00',
b'881514201400\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702000\x00\x00\x00\x00',
b'8821F4702100\x00\x00\x00\x00',
b'8821F4702300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F4201200\x00\x00\x00\x00',
b'8646F4202001\x00\x00\x00\x00',
b'8646F4202100\x00\x00\x00\x00',
b'8646F4204000\x00\x00\x00\x00',
],
},
CAR.RAV4H: {
(Ecu.engine, 0x7e0, None): [
b'\x02342N9000\x00\x00\x00\x00\x00\x00\x00\x00A4701000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02342N9100\x00\x00\x00\x00\x00\x00\x00\x00A4701000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02342P0000\x00\x00\x00\x00\x00\x00\x00\x00A4701000\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B42102\x00\x00\x00\x00\x00\x00',
b'8965B42103\x00\x00\x00\x00\x00\x00',
b'8965B42112\x00\x00\x00\x00\x00\x00',
b'8965B42162\x00\x00\x00\x00\x00\x00',
b'8965B42163\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152642090\x00\x00\x00\x00\x00\x00',
b'F152642110\x00\x00\x00\x00\x00\x00',
b'F152642120\x00\x00\x00\x00\x00\x00',
b'F152642400\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881514202200\x00\x00\x00\x00',
b'881514202300\x00\x00\x00\x00',
b'881514202400\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702000\x00\x00\x00\x00',
b'8821F4702100\x00\x00\x00\x00',
b'8821F4702300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F4201100\x00\x00\x00\x00',
b'8646F4201200\x00\x00\x00\x00',
b'8646F4202001\x00\x00\x00\x00',
b'8646F4202100\x00\x00\x00\x00',
b'8646F4204000\x00\x00\x00\x00',
],
},
CAR.RAV4_TSS2: {
(Ecu.engine, 0x700, None): [
b'\x01896630R58000\x00\x00\x00\x00',
b'\x01896630R58100\x00\x00\x00\x00',
b'\x018966342E2000\x00\x00\x00\x00',
b'\x018966342M8000\x00\x00\x00\x00',
b'\x018966342S9000\x00\x00\x00\x00',
b'\x018966342T1000\x00\x00\x00\x00',
b'\x018966342T6000\x00\x00\x00\x00',
b'\x018966342T9000\x00\x00\x00\x00',
b'\x018966342U4000\x00\x00\x00\x00',
b'\x018966342U4100\x00\x00\x00\x00',
b'\x018966342V3000\x00\x00\x00\x00',
b'\x018966342V3100\x00\x00\x00\x00',
b'\x018966342V3200\x00\x00\x00\x00',
b'\x01896634A05000\x00\x00\x00\x00',
b'\x01896634A19000\x00\x00\x00\x00',
b'\x01896634A19100\x00\x00\x00\x00',
b'\x01896634A20000\x00\x00\x00\x00',
b'\x01896634A20100\x00\x00\x00\x00',
b'\x01896634A22000\x00\x00\x00\x00',
b'\x01896634A22100\x00\x00\x00\x00',
b'\x01896634A30000\x00\x00\x00\x00',
b'\x01896634A44000\x00\x00\x00\x00',
b'\x01896634A45000\x00\x00\x00\x00',
b'\x01896634A46000\x00\x00\x00\x00',
b'\x028966342M7000\x00\x00\x00\x00897CF1201001\x00\x00\x00\x00',
b'\x028966342T0000\x00\x00\x00\x00897CF1201001\x00\x00\x00\x00',
b'\x028966342V1000\x00\x00\x00\x00897CF1202001\x00\x00\x00\x00',
b'\x028966342Y8000\x00\x00\x00\x00897CF1201001\x00\x00\x00\x00',
b'\x02896634A18000\x00\x00\x00\x00897CF1201001\x00\x00\x00\x00',
b'\x02896634A18100\x00\x00\x00\x00897CF1201001\x00\x00\x00\x00',
b'\x02896634A43000\x00\x00\x00\x00897CF4201001\x00\x00\x00\x00',
b'\x02896634A47000\x00\x00\x00\x00897CF4201001\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'\x01F15260R210\x00\x00\x00\x00\x00\x00',
b'\x01F15260R220\x00\x00\x00\x00\x00\x00',
b'\x01F15260R290\x00\x00\x00\x00\x00\x00',
b'\x01F15260R300\x00\x00\x00\x00\x00\x00',
b'\x01F152642551\x00\x00\x00\x00\x00\x00',
b'\x01F152642561\x00\x00\x00\x00\x00\x00',
b'\x01F152642700\x00\x00\x00\x00\x00\x00',
b'\x01F152642701\x00\x00\x00\x00\x00\x00',
b'\x01F152642710\x00\x00\x00\x00\x00\x00',
b'\x01F152642711\x00\x00\x00\x00\x00\x00',
b'\x01F152642750\x00\x00\x00\x00\x00\x00',
b'\x01F152642751\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B42170\x00\x00\x00\x00\x00\x00',
b'8965B42171\x00\x00\x00\x00\x00\x00',
b'8965B42180\x00\x00\x00\x00\x00\x00',
b'8965B42181\x00\x00\x00\x00\x00\x00',
b'\x028965B0R01200\x00\x00\x00\x008965B0R02200\x00\x00\x00\x00',
b'\x028965B0R01300\x00\x00\x00\x008965B0R02300\x00\x00\x00\x00',
b'\x028965B0R01400\x00\x00\x00\x008965B0R02400\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301100\x00\x00\x00\x00',
b'\x018821F3301200\x00\x00\x00\x00',
b'\x018821F3301300\x00\x00\x00\x00',
b'\x018821F3301400\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F4203200\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F4203300\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F4203400\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F4203500\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F4203700\x00\x00\x00\x008646G2601400\x00\x00\x00\x00',
b'\x028646F4203800\x00\x00\x00\x008646G2601500\x00\x00\x00\x00',
],
},
CAR.RAV4H_TSS2: {
(Ecu.engine, 0x700, None): [
b'\x01896634A15000\x00\x00\x00\x00',
b'\x018966342M5000\x00\x00\x00\x00',
b'\x018966342W8000\x00\x00\x00\x00',
b'\x018966342X5000\x00\x00\x00\x00',
b'\x018966342X6000\x00\x00\x00\x00',
b'\x01896634A25000\x00\x00\x00\x00',
b'\x018966342W5000\x00\x00\x00\x00',
b'\x028966342W4001\x00\x00\x00\x00897CF1203001\x00\x00\x00\x00',
b'\x02896634A13001\x00\x00\x00\x00897CF4801001\x00\x00\x00\x00',
b'\x02896634A13101\x00\x00\x00\x00897CF4801001\x00\x00\x00\x00',
b'\x02896634A14001\x00\x00\x00\x00897CF1203001\x00\x00\x00\x00',
b'\x02896634A23001\x00\x00\x00\x00897CF1203001\x00\x00\x00\x00',
b'\x02896634A14001\x00\x00\x00\x00897CF4801001\x00\x00\x00\x00',
b'\x02896634A14101\x00\x00\x00\x00897CF4801001\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152642291\x00\x00\x00\x00\x00\x00',
b'F152642290\x00\x00\x00\x00\x00\x00',
b'F152642330\x00\x00\x00\x00\x00\x00',
b'F152642331\x00\x00\x00\x00\x00\x00',
b'F152642531\x00\x00\x00\x00\x00\x00',
b'F152642532\x00\x00\x00\x00\x00\x00',
b'F152642520\x00\x00\x00\x00\x00\x00',
b'F152642521\x00\x00\x00\x00\x00\x00',
b'F152642540\x00\x00\x00\x00\x00\x00',
b'F152642541\x00\x00\x00\x00\x00\x00',
b'F152642542\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B42170\x00\x00\x00\x00\x00\x00',
b'8965B42171\x00\x00\x00\x00\x00\x00',
b'8965B42180\x00\x00\x00\x00\x00\x00',
b'8965B42181\x00\x00\x00\x00\x00\x00',
b'\x028965B0R01200\x00\x00\x00\x008965B0R02200\x00\x00\x00\x00',
b'\x028965B0R01300\x00\x00\x00\x008965B0R02300\x00\x00\x00\x00',
b'\x028965B0R01400\x00\x00\x00\x008965B0R02400\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301100\x00\x00\x00\x00',
b'\x018821F3301200\x00\x00\x00\x00',
b'\x018821F3301300\x00\x00\x00\x00',
b'\x018821F3301400\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F4203200\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F4203300\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F4203400\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F4203500\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F4203700\x00\x00\x00\x008646G2601400\x00\x00\x00\x00',
b'\x028646F4203800\x00\x00\x00\x008646G2601500\x00\x00\x00\x00',
],
},
CAR.LEXUS_ES_TSS2: {
(Ecu.engine, 0x700, None): [
b'\x01896630EC9100\x00\x00\x00\x00',
b'\x018966333T5000\x00\x00\x00\x00',
b'\x018966333T5100\x00\x00\x00\x00',
b'\x018966333X6000\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'\x01F152606281\x00\x00\x00\x00\x00\x00',
b'\x01F152606340\x00\x00\x00\x00\x00\x00',
b'\x01F15260E031\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B33252\x00\x00\x00\x00\x00\x00',
b'8965B33590\x00\x00\x00\x00\x00\x00',
b'8965B48271\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301100\x00\x00\x00\x00',
b'\x018821F3301200\x00\x00\x00\x00',
b'\x018821F3301400\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F33030D0\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F3303200\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F3304100\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F4810200\x00\x00\x00\x008646G2601400\x00\x00\x00\x00',
],
},
CAR.SIENNA: {
(Ecu.engine, 0x700, None): [
b'\x01896630832100\x00\x00\x00\x00',
b'\x01896630832200\x00\x00\x00\x00',
b'\x01896630838000\x00\x00\x00\x00',
b'\x01896630838100\x00\x00\x00\x00',
b'\x01896630842000\x00\x00\x00\x00',
b'\x01896630843000\x00\x00\x00\x00',
b'\x01896630851000\x00\x00\x00\x00',
b'\x01896630851100\x00\x00\x00\x00',
b'\x01896630852100\x00\x00\x00\x00',
b'\x01896630859000\x00\x00\x00\x00',
b'\x01896630860000\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B45070\x00\x00\x00\x00\x00\x00',
b'8965B45080\x00\x00\x00\x00\x00\x00',
b'8965B45082\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152608130\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881510801100\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702100\x00\x00\x00\x00',
b'8821F4702200\x00\x00\x00\x00',
b'8821F4702300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F0801100\x00\x00\x00\x00',
],
},
CAR.LEXUS_CTH: {
(Ecu.dsu, 0x791, None): [
b'881517601100\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152676144\x00\x00\x00\x00\x00\x00',
],
(Ecu.engine, 0x7e0, None): [
b'\x0237635000\x00\x00\x00\x00\x00\x00\x00\x00A4701000\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F7601100\x00\x00\x00\x00',
],
},
CAR.LEXUS_ESH_TSS2: {
(Ecu.engine, 0x700, None): [
b'\x028966333S8000\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
b'\x028966333T0100\x00\x00\x00\x00897CF3305001\x00\x00\x00\x00',
b'\x028966333V4000\x00\x00\x00\x00897CF3305001\x00\x00\x00\x00',
b'\x02896633T09000\x00\x00\x00\x00897CF3307001\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152633423\x00\x00\x00\x00\x00\x00',
b'F152633680\x00\x00\x00\x00\x00\x00',
b'F152633681\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B33252\x00\x00\x00\x00\x00\x00',
b'8965B33590\x00\x00\x00\x00\x00\x00',
b'8965B33690\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301100\x00\x00\x00\x00',
b'\x018821F3301200\x00\x00\x00\x00',
b'\x018821F3301300\x00\x00\x00\x00',
b'\x018821F3301400\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F33030D0\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F3303100\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F3304100\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F3304200\x00\x00\x00\x008646G2601400\x00\x00\x00\x00',
],
},
CAR.LEXUS_ESH: {
(Ecu.engine, 0x7e0, None): [
b'\x02333M4200\x00\x00\x00\x00\x00\x00\x00\x00A4701000\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152633171\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881513310400\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B33512\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4701100\x00\x00\x00\x00',
b'8821F4701300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F3302001\x00\x00\x00\x00',
b'8646F3302200\x00\x00\x00\x00',
],
},
CAR.LEXUS_NX: {
(Ecu.engine, 0x700, None): [
b'\x01896637851000\x00\x00\x00\x00',
b'\x01896637852000\x00\x00\x00\x00',
b'\x01896637854000\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152678130\x00\x00\x00\x00\x00\x00',
b'F152678140\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881517803100\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B78060\x00\x00\x00\x00\x00\x00',
b'8965B78080\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702100\x00\x00\x00\x00',
b'8821F4702300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F7801100\x00\x00\x00\x00',
],
},
CAR.LEXUS_NX_TSS2: {
(Ecu.engine, 0x700, None): [
b'\x018966378B2100\x00\x00\x00\x00',
b'\x018966378G3000\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'\x01F152678221\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B78120\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b"\x018821F3301400\x00\x00\x00\x00",
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F78030A0\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F7803100\x00\x00\x00\x008646G2601400\x00\x00\x00\x00',
],
},
CAR.LEXUS_NXH: {
(Ecu.engine, 0x7e0, None): [
b'\x0237882000\x00\x00\x00\x00\x00\x00\x00\x00A4701000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0237841000\x00\x00\x00\x00\x00\x00\x00\x00A4701000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0237886000\x00\x00\x00\x00\x00\x00\x00\x00A4701000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0237880000\x00\x00\x00\x00\x00\x00\x00\x00A4701000\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152678160\x00\x00\x00\x00\x00\x00',
b'F152678170\x00\x00\x00\x00\x00\x00',
b'F152678171\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881517804300\x00\x00\x00\x00',
b'881517804100\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B78060\x00\x00\x00\x00\x00\x00',
b'8965B78080\x00\x00\x00\x00\x00\x00',
b'8965B78100\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702300\x00\x00\x00\x00',
b'8821F4702100\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F7801300\x00\x00\x00\x00',
b'8646F7801100\x00\x00\x00\x00',
],
},
CAR.LEXUS_RX: {
(Ecu.engine, 0x700, None): [
b'\x01896630E36200\x00\x00\x00\x00',
b'\x01896630E36300\x00\x00\x00\x00',
b'\x01896630E37200\x00\x00\x00\x00',
b'\x01896630E37300\x00\x00\x00\x00',
b'\x01896630E41000\x00\x00\x00\x00',
b'\x01896630E41100\x00\x00\x00\x00',
b'\x01896630E41200\x00\x00\x00\x00',
b'\x01896630EA3100\x00\x00\x00\x00',
b'\x01896630EA4100\x00\x00\x00\x00',
b'\x01896630EA4300\x00\x00\x00\x00',
b'\x01896630EA6300\x00\x00\x00\x00',
b'\x018966348R1300\x00\x00\x00\x00',
b'\x018966348R8500\x00\x00\x00\x00',
b'\x018966348W1300\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152648472\x00\x00\x00\x00\x00\x00',
b'F152648473\x00\x00\x00\x00\x00\x00',
b'F152648492\x00\x00\x00\x00\x00\x00',
b'F152648493\x00\x00\x00\x00\x00\x00',
b'F152648474\x00\x00\x00\x00\x00\x00',
b'F152648630\x00\x00\x00\x00\x00\x00',
b'F152648494\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881514810300\x00\x00\x00\x00',
b'881514810500\x00\x00\x00\x00',
b'881514810700\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B0E011\x00\x00\x00\x00\x00\x00',
b'8965B0E012\x00\x00\x00\x00\x00\x00',
b'8965B48102\x00\x00\x00\x00\x00\x00',
b'8965B48112\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4701000\x00\x00\x00\x00',
b'8821F4701100\x00\x00\x00\x00',
b'8821F4701200\x00\x00\x00\x00',
b'8821F4701300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F4801100\x00\x00\x00\x00',
b'8646F4801200\x00\x00\x00\x00',
b'8646F4802001\x00\x00\x00\x00',
b'8646F4802100\x00\x00\x00\x00',
b'8646F4802200\x00\x00\x00\x00',
b'8646F4809000\x00\x00\x00\x00',
],
},
CAR.LEXUS_RXH: {
(Ecu.engine, 0x7e0, None): [
b'\x02348J7000\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02348N0000\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02348Q4000\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02348Q4100\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02348T1100\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02348T3000\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02348V6000\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02348Z3000\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152648361\x00\x00\x00\x00\x00\x00',
b'F152648501\x00\x00\x00\x00\x00\x00',
b'F152648502\x00\x00\x00\x00\x00\x00',
b'F152648504\x00\x00\x00\x00\x00\x00',
b'F152648740\x00\x00\x00\x00\x00\x00',
b'F152648A30\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881514811300\x00\x00\x00\x00',
b'881514811500\x00\x00\x00\x00',
b'881514811700\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B0E011\x00\x00\x00\x00\x00\x00',
b'8965B0E012\x00\x00\x00\x00\x00\x00',
b'8965B48111\x00\x00\x00\x00\x00\x00',
b'8965B48112\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4701000\x00\x00\x00\x00',
b'8821F4701100\x00\x00\x00\x00',
b'8821F4701200\x00\x00\x00\x00',
b'8821F4701300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F4801200\x00\x00\x00\x00',
b'8646F4802001\x00\x00\x00\x00',
b'8646F4802100\x00\x00\x00\x00',
b'8646F4802200\x00\x00\x00\x00',
b'8646F4809000\x00\x00\x00\x00',
],
},
CAR.LEXUS_RX_TSS2: {
(Ecu.engine, 0x700, None): [
b'\x01896630EC9000\x00\x00\x00\x00',
b'\x01896634D12000\x00\x00\x00\x00',
b'\x01896630EB0000\x00\x00\x00\x00',
b'\x01896630EA9000\x00\x00\x00\x00',
b'\x01896630ED0000\x00\x00\x00\x00',
b'\x018966348W9000\x00\x00\x00\x00',
b'\x01896634D12100\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'\x01F152648801\x00\x00\x00\x00\x00\x00',
b'\x01F15260E031\x00\x00\x00\x00\x00\x00',
b'\x01F15260E041\x00\x00\x00\x00\x00\x00',
b'\x01F152648781\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B48271\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301100\x00\x00\x00\x00',
b'\x018821F3301300\x00\x00\x00\x00',
b'\x018821F3301400\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F4810200\x00\x00\x00\x008646G2601400\x00\x00\x00\x00',
b'\x028646F4810100\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
],
},
CAR.LEXUS_RXH_TSS2: {
(Ecu.engine, 0x7e0, None): [
b'\x02348X8000\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0234D14000\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0234D16000\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152648831\x00\x00\x00\x00\x00\x00',
b'F152648D00\x00\x00\x00\x00\x00\x00',
b'F152648D60\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B48271\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301400\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F4810200\x00\x00\x00\x008646G2601400\x00\x00\x00\x00',
b'\x028646F4810100\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
],
},
CAR.PRIUS_TSS2: {
(Ecu.engine, 0x700, None): [
b'\x028966347C8000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x038966347C0000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4710101\x00\x00\x00\x00',
b'\x038966347C1000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4710101\x00\x00\x00\x00',
b'\x038966347C5000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4707101\x00\x00\x00\x00',
b'\x038966347C5100\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4707101\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152647500\x00\x00\x00\x00\x00\x00',
b'F152647510\x00\x00\x00\x00\x00\x00',
b'F152647520\x00\x00\x00\x00\x00\x00',
b'F152647521\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B47070\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301400\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F4707000\x00\x00\x00\x008646G2601400\x00\x00\x00\x00',
b'\x028646F4710000\x00\x00\x00\x008646G2601500\x00\x00\x00\x00',
],
},
CAR.MIRAI: {
(Ecu.esp, 0x7D1, None): [b'\x01898A36203000\x00\x00\x00\x00',],
(Ecu.esp, 0x7B0, None): [b'\x01F15266203200\x00\x00\x00\x00',], # a second ESP ECU
(Ecu.eps, 0x7A1, None): [b'\x028965B6204100\x00\x00\x00\x008965B6203100\x00\x00\x00\x00',],
(Ecu.fwdRadar, 0x750, 0xf): [b'\x018821F6201200\x00\x00\x00\x00',],
(Ecu.fwdCamera, 0x750, 0x6d): [b'\x028646F6201400\x00\x00\x00\x008646G5301200\x00\x00\x00\x00',],
},
CAR.ALPHARD_TSS2: {
(Ecu.engine, 0x7e0, None): [b'\x0235883000\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00',],
(Ecu.eps, 0x7a1, None): [b'8965B58040\x00\x00\x00\x00\x00\x00',],
(Ecu.fwdRadar, 0x750, 0xf): [b'\x018821F3301400\x00\x00\x00\x00',],
(Ecu.fwdCamera, 0x750, 0x6d): [b'\x028646F5803200\x00\x00\x00\x008646G2601400\x00\x00\x00\x00',],
},
}
STEER_THRESHOLD = 100
DBC = {
CAR.RAV4H: dbc_dict('toyota_rav4_hybrid_2017_pt_generated', 'toyota_adas'),
CAR.RAV4: dbc_dict('toyota_rav4_2017_pt_generated', 'toyota_adas'),
CAR.PRIUS: dbc_dict('toyota_prius_2017_pt_generated', 'toyota_adas'),
CAR.COROLLA: dbc_dict('toyota_corolla_2017_pt_generated', 'toyota_adas'),
CAR.LEXUS_RX: dbc_dict('lexus_rx_350_2016_pt_generated', 'toyota_adas'),
CAR.LEXUS_RXH: dbc_dict('lexus_rx_hybrid_2017_pt_generated', 'toyota_adas'),
CAR.LEXUS_RX_TSS2: dbc_dict('toyota_nodsu_pt_generated', 'toyota_tss2_adas'),
CAR.LEXUS_RXH_TSS2: dbc_dict('toyota_nodsu_hybrid_pt_generated', 'toyota_tss2_adas'),
CAR.CHR: dbc_dict('toyota_nodsu_pt_generated', 'toyota_adas'),
CAR.CHRH: dbc_dict('toyota_nodsu_hybrid_pt_generated', 'toyota_adas'),
CAR.CAMRY: dbc_dict('toyota_nodsu_pt_generated', 'toyota_adas'),
CAR.CAMRYH: dbc_dict('toyota_camry_hybrid_2018_pt_generated', 'toyota_adas'),
CAR.CAMRY_TSS2: dbc_dict('toyota_nodsu_pt_generated', 'toyota_tss2_adas'),
CAR.CAMRYH_TSS2: dbc_dict('toyota_nodsu_hybrid_pt_generated', 'toyota_tss2_adas'),
CAR.HIGHLANDER: dbc_dict('toyota_highlander_2017_pt_generated', 'toyota_adas'),
CAR.HIGHLANDER_TSS2: dbc_dict('toyota_nodsu_pt_generated', 'toyota_tss2_adas'),
CAR.HIGHLANDERH: dbc_dict('toyota_highlander_hybrid_2018_pt_generated', 'toyota_adas'),
CAR.HIGHLANDERH_TSS2: dbc_dict('toyota_nodsu_hybrid_pt_generated', 'toyota_tss2_adas'),
CAR.AVALON: dbc_dict('toyota_avalon_2017_pt_generated', 'toyota_adas'),
CAR.AVALON_2019: dbc_dict('toyota_nodsu_pt_generated', 'toyota_adas'),
CAR.AVALONH_2019: dbc_dict('toyota_nodsu_hybrid_pt_generated', 'toyota_adas'),
CAR.RAV4_TSS2: dbc_dict('toyota_nodsu_pt_generated', 'toyota_tss2_adas'),
CAR.COROLLA_TSS2: dbc_dict('toyota_nodsu_pt_generated', 'toyota_tss2_adas'),
CAR.COROLLAH_TSS2: dbc_dict('toyota_nodsu_hybrid_pt_generated', 'toyota_tss2_adas'),
CAR.LEXUS_ES_TSS2: dbc_dict('toyota_nodsu_pt_generated', 'toyota_tss2_adas'),
CAR.LEXUS_ESH_TSS2: dbc_dict('toyota_nodsu_hybrid_pt_generated', 'toyota_tss2_adas'),
CAR.LEXUS_ESH: dbc_dict('lexus_ct200h_2018_pt_generated', 'toyota_adas'),
CAR.SIENNA: dbc_dict('toyota_sienna_xle_2018_pt_generated', 'toyota_adas'),
CAR.LEXUS_IS: dbc_dict('lexus_is_2018_pt_generated', 'toyota_adas'),
CAR.LEXUS_CTH: dbc_dict('lexus_ct200h_2018_pt_generated', 'toyota_adas'),
CAR.RAV4H_TSS2: dbc_dict('toyota_nodsu_hybrid_pt_generated', 'toyota_tss2_adas'),
CAR.LEXUS_NXH: dbc_dict('lexus_nx300h_2018_pt_generated', 'toyota_adas'),
CAR.LEXUS_NX: dbc_dict('lexus_nx300_2018_pt_generated', 'toyota_adas'),
CAR.LEXUS_NX_TSS2: dbc_dict('toyota_nodsu_pt_generated', 'toyota_tss2_adas'),
CAR.PRIUS_TSS2: dbc_dict('toyota_nodsu_hybrid_pt_generated', 'toyota_tss2_adas'),
CAR.MIRAI: dbc_dict('toyota_nodsu_hybrid_pt_generated', 'toyota_tss2_adas'),
CAR.ALPHARD_TSS2: dbc_dict('toyota_nodsu_pt_generated', 'toyota_tss2_adas'),
CAR.LEXUS_ISH: dbc_dict('lexus_is300h_2017_pt_generated', 'toyota_adas'),
}
# Toyota/Lexus Safety Sense 2.0 and 2.5
TSS2_CAR = set([CAR.RAV4_TSS2, CAR.COROLLA_TSS2, CAR.COROLLAH_TSS2, CAR.LEXUS_ES_TSS2, CAR.LEXUS_ESH_TSS2, CAR.RAV4H_TSS2,
CAR.LEXUS_RX_TSS2, CAR.LEXUS_RXH_TSS2, CAR.HIGHLANDER_TSS2, CAR.HIGHLANDERH_TSS2, CAR.PRIUS_TSS2, CAR.CAMRY_TSS2, CAR.CAMRYH_TSS2,
CAR.MIRAI, CAR.LEXUS_NX_TSS2, CAR.ALPHARD_TSS2])
NO_DSU_CAR = TSS2_CAR | set([CAR.CHR, CAR.CHRH, CAR.CAMRY, CAR.CAMRYH])
# no resume button press required
NO_STOP_TIMER_CAR = TSS2_CAR | set([CAR.RAV4H, CAR.HIGHLANDERH, CAR.HIGHLANDER, CAR.SIENNA, CAR.LEXUS_ESH])
| 56.603118 | 1,108 | 0.61368 |
from cereal import car
from selfdrive.car import dbc_dict
from selfdrive.config import Conversions as CV
Ecu = car.CarParams.Ecu
MIN_ACC_SPEED = 19. * CV.MPH_TO_MS
PEDAL_HYST_GAP = 3. * CV.MPH_TO_MS
class CarControllerParams:
ACCEL_HYST_GAP = 0.02
ACCEL_MAX = 2.0 # m/s2
ACCEL_MIN = -3.5 # m/s2
ACCEL_SCALE = max(ACCEL_MAX, -ACCEL_MIN)
STEER_MAX = 1500
STEER_DELTA_UP = 10 # 1.5s time to peak torque
STEER_DELTA_DOWN = 25 # always lower than 45 otherwise the Rav4 faults (Prius seems ok with 50)
STEER_ERROR_MAX = 350 # max delta between torque cmd and torque motor
class CAR:
PRIUS = "TOYOTA PRIUS 2017"
PRIUS_TSS2 = "TOYOTA PRIUS TSS2 2021"
RAV4H = "TOYOTA RAV4 HYBRID 2017"
RAV4 = "TOYOTA RAV4 2017"
COROLLA = "TOYOTA COROLLA 2017"
LEXUS_RX = "LEXUS RX 2016"
LEXUS_RXH = "LEXUS RX HYBRID 2017"
LEXUS_RX_TSS2 = "LEXUS RX 2020"
LEXUS_RXH_TSS2 = "LEXUS RX HYBRID 2020"
CHR = "TOYOTA C-HR 2018"
CHRH = "TOYOTA C-HR HYBRID 2018"
CAMRY = "TOYOTA CAMRY 2018"
CAMRYH = "TOYOTA CAMRY HYBRID 2018"
CAMRY_TSS2 = "TOYOTA CAMRY 2021" # TSS 2.5
CAMRYH_TSS2 = "TOYOTA CAMRY HYBRID 2021"
HIGHLANDER = "TOYOTA HIGHLANDER 2017"
HIGHLANDER_TSS2 = "TOYOTA HIGHLANDER 2020"
HIGHLANDERH = "TOYOTA HIGHLANDER HYBRID 2018"
HIGHLANDERH_TSS2 = "TOYOTA HIGHLANDER HYBRID 2020"
AVALON = "TOYOTA AVALON 2016"
AVALON_2019 = "TOYOTA AVALON 2019"
AVALONH_2019 = "TOYOTA AVALON HYBRID 2019"
RAV4_TSS2 = "TOYOTA RAV4 2019"
COROLLA_TSS2 = "TOYOTA COROLLA TSS2 2019"
# LSS2 Lexus UX Hybrid is same as a TSS2 Corolla Hybrid
COROLLAH_TSS2 = "TOYOTA COROLLA HYBRID TSS2 2019"
LEXUS_ES_TSS2 = "LEXUS ES 2019"
LEXUS_ESH_TSS2 = "LEXUS ES HYBRID 2019"
LEXUS_ESH = "LEXUS ES HYBRID 2018"
SIENNA = "TOYOTA SIENNA 2018"
LEXUS_IS = "LEXUS IS 2018"
LEXUS_CTH = "LEXUS CT HYBRID 2018"
RAV4H_TSS2 = "TOYOTA RAV4 HYBRID 2019"
LEXUS_NXH = "LEXUS NX HYBRID 2018"
LEXUS_NX = "LEXUS NX 2018"
LEXUS_NX_TSS2 = "LEXUS NX 2020"
MIRAI = "TOYOTA MIRAI 2021" # TSS 2.5
ALPHARD_TSS2 = "TOYOTA ALPHARD 2020"
LEXUS_ISH = "LEXUS ISH 2017"
# (addr, cars, bus, 1/freq*100, vl)
STATIC_DSU_MSGS = [
(0x128, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.LEXUS_NXH, CAR.LEXUS_NX, CAR.RAV4, CAR.COROLLA, CAR.AVALON), 1, 3, b'\xf4\x01\x90\x83\x00\x37'),
(0x128, (CAR.HIGHLANDER, CAR.HIGHLANDERH, CAR.SIENNA, CAR.LEXUS_CTH, CAR.LEXUS_ESH), 1, 3, b'\x03\x00\x20\x00\x00\x52'),
(0x141, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.LEXUS_NXH, CAR.LEXUS_NX, CAR.RAV4, CAR.COROLLA, CAR.HIGHLANDER, CAR.HIGHLANDERH, CAR.AVALON, CAR.SIENNA, CAR.LEXUS_CTH, CAR.LEXUS_ESH, CAR.LEXUS_RX), 1, 2, b'\x00\x00\x00\x46'),
(0x160, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.LEXUS_NXH, CAR.LEXUS_NX, CAR.RAV4, CAR.COROLLA, CAR.HIGHLANDER, CAR.HIGHLANDERH, CAR.AVALON, CAR.SIENNA, CAR.LEXUS_CTH, CAR.LEXUS_ESH, CAR.LEXUS_RX), 1, 7, b'\x00\x00\x08\x12\x01\x31\x9c\x51'),
(0x161, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.LEXUS_NXH, CAR.LEXUS_NX, CAR.RAV4, CAR.COROLLA, CAR.AVALON, CAR.LEXUS_RX), 1, 7, b'\x00\x1e\x00\x00\x00\x80\x07'),
(0X161, (CAR.HIGHLANDERH, CAR.HIGHLANDER, CAR.SIENNA, CAR.LEXUS_CTH, CAR.LEXUS_ESH), 1, 7, b'\x00\x1e\x00\xd4\x00\x00\x5b'),
(0x283, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.LEXUS_NXH, CAR.LEXUS_NX, CAR.RAV4, CAR.COROLLA, CAR.HIGHLANDER, CAR.HIGHLANDERH, CAR.AVALON, CAR.SIENNA, CAR.LEXUS_CTH, CAR.LEXUS_ESH, CAR.LEXUS_RX), 0, 3, b'\x00\x00\x00\x00\x00\x00\x8c'),
(0x2E6, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH), 0, 3, b'\xff\xf8\x00\x08\x7f\xe0\x00\x4e'),
(0x2E7, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH), 0, 3, b'\xa8\x9c\x31\x9c\x00\x00\x00\x02'),
(0x33E, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH), 0, 20, b'\x0f\xff\x26\x40\x00\x1f\x00'),
(0x344, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.LEXUS_NXH, CAR.LEXUS_NX, CAR.RAV4, CAR.COROLLA, CAR.HIGHLANDER, CAR.HIGHLANDERH, CAR.AVALON, CAR.SIENNA, CAR.LEXUS_CTH, CAR.LEXUS_ESH, CAR.LEXUS_RX), 0, 5, b'\x00\x00\x01\x00\x00\x00\x00\x50'),
(0x365, (CAR.PRIUS, CAR.LEXUS_RXH, CAR.LEXUS_NXH, CAR.LEXUS_NX, CAR.HIGHLANDERH), 0, 20, b'\x00\x00\x00\x80\x03\x00\x08'),
(0x365, (CAR.RAV4, CAR.RAV4H, CAR.COROLLA, CAR.HIGHLANDER, CAR.AVALON, CAR.SIENNA, CAR.LEXUS_CTH, CAR.LEXUS_ESH, CAR.LEXUS_RX), 0, 20, b'\x00\x00\x00\x80\xfc\x00\x08'),
(0x366, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.LEXUS_NXH, CAR.LEXUS_NX, CAR.HIGHLANDERH), 0, 20, b'\x00\x00\x4d\x82\x40\x02\x00'),
(0x366, (CAR.RAV4, CAR.COROLLA, CAR.HIGHLANDER, CAR.AVALON, CAR.SIENNA, CAR.LEXUS_CTH, CAR.LEXUS_ESH, CAR.LEXUS_RX), 0, 20, b'\x00\x72\x07\xff\x09\xfe\x00'),
(0x470, (CAR.PRIUS, CAR.LEXUS_RXH), 1, 100, b'\x00\x00\x02\x7a'),
(0x470, (CAR.HIGHLANDER, CAR.HIGHLANDERH, CAR.RAV4H, CAR.SIENNA, CAR.LEXUS_CTH, CAR.LEXUS_ESH), 1, 100, b'\x00\x00\x01\x79'),
(0x4CB, (CAR.PRIUS, CAR.RAV4H, CAR.LEXUS_RXH, CAR.LEXUS_NXH, CAR.LEXUS_NX, CAR.RAV4, CAR.COROLLA, CAR.HIGHLANDERH, CAR.HIGHLANDER, CAR.AVALON, CAR.SIENNA, CAR.LEXUS_CTH, CAR.LEXUS_ESH, CAR.LEXUS_RX), 0, 100, b'\x0c\x00\x00\x00\x00\x00\x00\x00'),
]
FINGERPRINTS = {
CAR.RAV4: [{
36: 8, 37: 8, 170: 8, 180: 8, 186: 4, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 512: 6, 513: 6, 547: 8, 548: 8, 552: 4, 562: 4, 608: 8, 610: 5, 643: 7, 705: 8, 725: 2, 740: 5, 767: 4, 800: 8, 835: 8, 836: 8, 849: 4, 869: 7, 870: 7, 871: 2, 896: 8, 897: 8, 900: 6, 902: 6, 905: 8, 911: 8, 916: 3, 918: 7, 921: 8, 933: 8, 944: 8, 945: 8, 951: 8, 955: 4, 956: 8, 979: 2, 998: 5, 999: 7, 1000: 8, 1001: 8, 1005: 2, 1008: 2, 1014: 8, 1017: 8, 1041: 8, 1042: 8, 1043: 8, 1044: 8, 1056: 8, 1059: 1, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1176: 8, 1177: 8, 1178: 8, 1179: 8, 1180: 8, 1181: 8, 1190: 8, 1191: 8, 1192: 8, 1196: 8, 1227: 8, 1228: 8, 1235: 8, 1237: 8, 1263: 8, 1264: 8, 1279: 8, 1408: 8, 1409: 8, 1410: 8, 1552: 8, 1553: 8, 1554: 8, 1555: 8, 1556: 8, 1557: 8, 1561: 8, 1562: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1584: 8, 1589: 8, 1592: 8, 1593: 8, 1595: 8, 1596: 8, 1597: 8, 1600: 8, 1656: 8, 1664: 8, 1728: 8, 1745: 8, 1779: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8, 2015: 8, 2024: 8
}],
CAR.RAV4H: [{
36: 8, 37: 8, 170: 8, 180: 8, 186: 4, 296: 8, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 547: 8, 548: 8, 550: 8, 552: 4, 560: 7, 562: 4, 581: 5, 608: 8, 610: 5, 643: 7, 705: 8, 713: 8, 725: 2, 740: 5, 767: 4, 800: 8, 835: 8, 836: 8, 849: 4, 869: 7, 870: 7, 871: 2, 896: 8, 897: 8, 900: 6, 902: 6, 905: 8, 911: 8, 916: 3, 918: 7, 921: 8, 933: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 3, 955: 8, 956: 8, 979: 2, 998: 5, 999: 7, 1000: 8, 1001: 8, 1005: 2, 1008: 2, 1014: 8, 1017: 8, 1041: 8, 1042: 8, 1043: 8, 1044: 8, 1056: 8, 1059: 1, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1176: 8, 1177: 8, 1178: 8, 1179: 8, 1180: 8, 1181: 8, 1184: 8, 1185: 8, 1186: 8, 1190: 8, 1191: 8, 1192: 8, 1196: 8, 1197: 8, 1198: 8, 1199: 8, 1212: 8, 1227: 8, 1228: 8, 1232: 8, 1235: 8, 1237: 8, 1263: 8, 1264: 8, 1279: 8, 1408: 8, 1409: 8, 1410: 8, 1552: 8, 1553: 8, 1554: 8, 1555: 8, 1556: 8, 1557: 8, 1561: 8, 1562: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1584: 8, 1589: 8, 1592: 8, 1593: 8, 1595: 8, 1596: 8, 1597: 8, 1600: 8, 1656: 8, 1664: 8, 1728: 8, 1745: 8, 1779: 8, 1872: 8, 1880: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8
},
# Chinese RAV4
{
36: 8, 37: 8, 170: 8, 180: 8, 186: 4, 355: 5, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 512: 6, 513: 6, 547: 8, 548: 8, 552: 4, 562: 4, 608: 8, 610: 5, 643: 7, 705: 8, 725: 2, 740: 5, 742: 8, 743: 8, 767: 4, 800: 8, 830: 7, 835: 8, 836: 8, 849: 4, 869: 7, 870: 7, 871: 2, 896: 8, 897: 8, 900: 6, 902: 6, 905: 8, 911: 8, 916: 3, 921: 8, 922: 8, 933: 8, 944: 8, 945: 8, 951: 8, 955: 8, 956: 8, 979: 2, 998: 5, 999: 7, 1000: 8, 1001: 8, 1008: 2, 1017: 8, 1041: 8, 1042: 8, 1043: 8, 1044: 8, 1056: 8, 1059: 1, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1176: 8, 1177: 8, 1178: 8, 1179: 8, 1180: 8, 1181: 8, 1190: 8, 1191: 8, 1192: 8, 1196: 8, 1207: 8, 1227: 8, 1235: 8, 1263: 8, 1279: 8, 1552: 8, 1553: 8, 1554: 8, 1555: 8, 1556: 8, 1557: 8, 1561: 8, 1562: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1584: 8, 1589: 8, 1592: 8, 1593: 8, 1595: 8, 1596: 8, 1597: 8, 1600: 8, 1664: 8, 1728: 8, 1745: 8, 1779: 8
}],
CAR.PRIUS: [{
35: 8, 36: 8, 37: 8, 166: 8, 170: 8, 180: 8, 295: 8, 296: 8, 426: 6, 452: 8, 466: 8, 467: 8, 512: 6, 513: 6, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 8, 614: 8, 643: 7, 658: 8, 713: 8, 740: 5, 742: 8, 743: 8, 767: 4, 800: 8, 810: 2, 814: 8, 824: 2, 829: 2, 830: 7, 835: 8, 836: 8, 845: 5, 863: 8, 869: 7, 870: 7, 871: 2, 898: 8, 900: 6, 902: 6, 905: 8, 913: 8, 918: 8, 921: 8, 933: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 8, 955: 8, 956: 8, 971: 7, 974: 8, 975: 5, 993: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1005: 2, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1071: 8, 1076: 8, 1077: 8, 1082: 8, 1083: 8, 1084: 8, 1085: 8, 1086: 8, 1114: 8, 1132: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1175: 8, 1227: 8, 1228: 8, 1235: 8, 1237: 8, 1264: 8, 1279: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1595: 8, 1777: 8, 1779: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8
},
#2019 LE
{
35: 8, 36: 8, 37: 8, 166: 8, 170: 8, 180: 8, 295: 8, 296: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 8, 614: 8, 643: 7, 658: 8, 713: 8, 740: 5, 742: 8, 743: 8, 767: 4, 800: 8, 810: 2, 814: 8, 829: 2, 830: 7, 835: 8, 836: 8, 863: 8, 865: 8, 869: 7, 870: 7, 871: 2, 896: 8, 898: 8, 900: 6, 902: 6, 905: 8, 918: 8, 921: 8, 933: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 8, 955: 8, 956: 8, 971: 7, 975: 5, 993: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1071: 8, 1076: 8, 1077: 8, 1082: 8, 1083: 8, 1084: 8, 1085: 8, 1086: 8, 1114: 8, 1132: 8, 1161: 8, 1162: 8, 1163: 8, 1175: 8, 1227: 8, 1228: 8, 1235: 8, 1237: 8, 1279: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1592: 8, 1595: 8, 1777: 8, 1779: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8
},
# 2020 Prius Prime LE
{
35: 8, 36: 8, 37: 8, 166: 8, 170: 8, 180: 8, 295: 8, 296: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 8, 643: 7, 713: 8, 740: 5, 742: 8, 743: 8, 764: 8, 767: 4, 800: 8, 810: 2, 824: 8, 829: 2, 830: 7, 835: 8, 836: 8, 863: 8, 865: 8, 869: 7, 870: 7, 871: 2, 896: 8, 898: 8, 900: 6, 902: 6, 905: 8, 913: 8, 921: 8, 933: 8, 934: 8, 935: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 8, 955: 8, 956: 8, 971: 7, 974: 8, 975: 5, 993: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1071: 8, 1076: 8, 1077: 8, 1084: 8, 1085: 8, 1086: 8, 1114: 8, 1132: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1227: 8, 1235: 8, 1279: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1592: 8, 1595: 8, 1649: 8, 1777: 8, 1779: 8, 1786: 8, 1787: 8, 1788: 8, 1789: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8
},
#2020 Prius Prime Limited
{
35: 8, 36: 8, 37: 8, 166: 8, 170: 8, 180: 8, 295: 8, 296: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 8, 614: 8, 643: 7, 658: 8, 713: 8, 740: 5, 742: 8, 743: 8, 767: 4, 800: 8, 810: 2, 814: 8, 824: 2, 829: 2, 830: 7, 835: 8, 836: 8, 863: 8, 865: 8, 869: 7, 870: 7, 871: 2, 896: 8, 898: 8, 900: 6, 902: 6, 905: 8, 913: 8, 918: 8, 921: 8, 933: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 8, 955: 8, 956: 8, 971: 7, 974: 8, 975: 5, 993: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1071: 8, 1076: 8, 1077: 8, 1082: 8, 1083: 8, 1084: 8, 1085: 8, 1086: 8, 1114: 8, 1132: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1175: 8, 1227: 8, 1228: 8, 1235: 8, 1237: 8, 1279: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1592: 8, 1595: 8, 1649: 8, 1777: 8, 1779: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8, 2015: 8, 2024: 8, 2026: 8, 2027: 8, 2029: 8, 2030: 8, 2031: 8
},
#2020 Central Europe Prime
{
35: 8, 36: 8, 37: 8, 166: 8, 170: 8, 180: 8, 295: 8, 296: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 8, 643: 7, 713: 8, 740: 5, 742: 8, 743: 8, 764: 8, 767: 4, 800: 8, 810: 2, 818: 8, 824: 8, 829: 2, 830: 7, 835: 8, 836: 8, 845: 5, 863: 8, 865: 8, 869: 7, 870: 7, 871: 2, 889: 8, 896: 8, 898: 8, 900: 6, 902: 6, 905: 8, 913: 8, 921: 8, 933: 8, 934: 8, 935: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 8, 955: 8, 956: 8, 971: 8, 974: 8, 975: 5, 993: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1056: 8, 1057: 8, 1059: 1, 1071: 8, 1076: 8, 1077: 8, 1084: 8, 1085: 8, 1086: 8, 1114: 8, 1132: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1227: 8, 1235: 8, 1237: 8, 1264: 8, 1279: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1592: 8, 1595: 8, 1777: 8, 1779: 8, 1786: 8, 1787: 8, 1788: 8, 1789: 8
},
#2017 German Prius
{
35: 8, 36: 8, 37: 8, 166: 8, 170: 8, 180: 8, 295: 8, 296: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 8, 614: 8, 643: 7, 658: 8, 713: 8, 740: 5, 742: 8, 743: 8, 767: 4, 800: 8, 810: 2, 814: 8, 829: 2, 830: 7, 835: 8, 836: 8, 845: 5, 863: 8, 869: 7, 870: 7, 871: 2, 898: 8, 900: 6, 902: 6, 905: 8, 913: 8, 918: 8, 921: 8, 933: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 8, 955: 8, 956: 8, 971: 7, 975: 5, 993: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1005: 2, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1071: 8, 1077: 8, 1082: 8, 1083: 8, 1084: 8, 1085: 8, 1086: 8, 1114: 8, 1132: 8, 1161: 8, 1162: 8, 1163: 8, 1175: 8, 1227: 8, 1228: 8, 1235: 8, 1237: 8, 1264: 8, 1279: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1595: 8, 1777: 8, 1779: 8, 1792: 8, 1767: 4, 1863: 8, 1904: 8, 1912: 8, 1984: 8, 1988: 8, 1990: 8, 1992: 8, 1996: 8, 1998: 8, 2002: 8, 2010: 8, 2015: 8, 2016: 8, 2018: 8, 2024: 8, 2026: 8, 2030: 8
}],
CAR.PRIUS_TSS2: [{
36: 8, 37: 8, 166: 8, 170: 8, 180: 8, 295: 8, 296: 8, 401: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 8, 614: 8, 643: 7, 658: 8, 713: 8, 740: 5, 742: 8, 743: 8, 764: 8, 765: 8, 800: 8, 810: 2, 814: 8, 824: 8, 829: 2, 830: 7, 835: 8, 836: 8, 863: 8, 865: 8, 869: 7, 870: 7, 871: 2, 877: 8, 881: 8, 882: 8, 896: 8, 898: 8, 900: 6, 902: 6, 905: 8, 918: 8, 921: 8, 933: 8, 934: 8, 935: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 8, 955: 8, 956: 8, 971: 7, 975: 5, 993: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1071: 8, 1076: 8, 1077: 8, 1082: 8, 1083: 8, 1084: 8, 1085: 8, 1086: 8, 1114: 8, 1132: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1172: 8, 1175: 8, 1228: 8, 1235: 8, 1237: 8, 1279: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1592: 8, 1593: 8, 1595: 8, 1649: 8, 1653: 8, 1654: 8, 1655: 8, 1775: 8, 1777: 8, 1779: 8, 1786: 8, 1787: 8, 1788: 8, 1789: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8
}],
#Corolla w/ added Pedal Support (512L and 513L)
CAR.COROLLA: [{
36: 8, 37: 8, 170: 8, 180: 8, 186: 4, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 512: 6, 513: 6, 547: 8, 548: 8, 552: 4, 608: 8, 610: 5, 643: 7, 705: 8, 740: 5, 767: 4, 800: 8, 835: 8, 836: 8, 849: 4, 869: 7, 870: 7, 871: 2, 896: 8, 897: 8, 900: 6, 902: 6, 905: 8, 911: 8, 916: 2, 921: 8, 933: 8, 944: 8, 945: 8, 951: 8, 955: 4, 956: 8, 979: 2, 992: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1017: 8, 1041: 8, 1042: 8, 1043: 8, 1044: 8, 1056: 8, 1059: 1, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1196: 8, 1227: 8, 1235: 8, 1279: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1561: 8, 1562: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1584: 8, 1589: 8, 1592: 8, 1596: 8, 1597: 8, 1600: 8, 1664: 8, 1728: 8, 1779: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8, 2016: 8, 2017: 8, 2018: 8, 2019: 8, 2020: 8, 2021: 8, 2022: 8, 2023: 8, 2024: 8
}],
CAR.CAMRY: [
#XLE and LE
{
36: 8, 37: 8, 119: 6, 170: 8, 180: 8, 186: 4, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 544: 4, 550: 8, 552: 4, 562: 6, 608: 8, 610: 8, 643: 7, 658: 8, 705: 8, 728: 8, 740: 5, 761: 8, 764: 8, 767: 4, 800: 8, 810: 2, 812: 8, 814: 8, 818: 8, 822: 8, 824: 8, 830: 7, 835: 8, 836: 8, 869: 7, 870: 7, 871: 2, 888: 8, 889: 8, 891: 8, 898: 8, 900: 6, 902: 6, 905: 8, 918: 8, 921: 8, 933: 8, 934: 8, 935: 8, 944: 8, 945: 8, 951: 8, 955: 8, 956: 8, 976: 1, 983: 8, 984: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1011: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1059: 1, 1076: 8, 1077: 8, 1082: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1228: 8, 1235: 8, 1237: 8, 1263: 8, 1264: 8, 1279: 8, 1412: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1595: 8, 1745: 8, 1779: 8, 1786: 8, 1787: 8, 1788: 8, 1789: 8, 1808: 8, 1816: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8
},
#XSE and SE
# TODO: get proper fingerprint in stock mode
{
36: 8, 37: 8, 114: 5, 119: 6, 120: 4, 170: 8, 180: 8, 186: 4, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 544: 4, 550: 8, 552: 4, 562: 6, 608: 8, 610: 8, 643: 7, 658: 8, 705: 8, 728: 8, 740: 5, 761: 8, 764: 8, 767: 4, 800: 8, 810: 2, 812: 8, 814: 8, 818: 8, 822: 8, 824: 8, 830: 7, 835: 8, 836: 8, 869: 7, 870: 7, 888: 8, 889: 8, 891: 8, 898: 8, 900: 6, 902: 6, 905: 8, 918: 8, 921: 8, 933: 8, 934: 8, 935: 8, 944: 8, 945: 8, 951: 8, 955: 8, 956: 8, 976: 1, 983: 8, 984: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1011: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1056: 8, 1059: 1, 1076: 8, 1077: 8, 1082: 8, 1114: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1228: 8, 1237: 8, 1263: 8, 1264: 8, 1279: 8, 1412: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1595: 8, 1745: 8, 1779: 8, 1786: 8, 1787: 8, 1788: 8, 1789: 8, 1808: 8, 1816: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8
},
{
# 2019 XSE
36: 8, 37: 8, 170: 8, 180: 8, 186: 4, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 544: 4, 550: 8, 552: 4, 562: 6, 608: 8, 610: 8, 643: 7, 658: 8, 705: 8, 728: 8, 740: 5, 761: 8, 764: 8, 767: 4, 800: 8, 810: 2, 812: 8, 814: 8, 818: 8, 822: 8, 824: 8, 830: 7, 835: 8, 836: 8, 865: 8, 869: 7, 870: 7, 871: 2, 888: 8, 889: 8, 891: 8, 896: 8, 898: 8, 900: 6, 902: 6, 905: 8, 918: 8, 921: 8, 933: 8, 934: 8, 935: 8, 942: 8, 944: 8, 945: 8, 951: 8, 955: 8, 956: 8, 976: 1, 983: 8, 984: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1011: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1059: 1, 1076: 8, 1077: 8, 1082: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1228: 8, 1235: 8, 1237: 8, 1263: 8, 1264: 8, 1279: 8, 1412: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1592: 8, 1594: 8, 1595: 8, 1649: 8, 1745: 8, 1779: 8, 1786: 8, 1787: 8, 1788: 8, 1789: 8, 1792: 8, 1767: 4, 1808: 8, 1816: 8, 1872: 8, 1880: 8, 1904: 8, 1912: 8, 1937: 8, 1945: 8, 1953: 8, 1961: 8, 1968: 8, 1976: 8, 1990: 8, 1998: 8, 2015: 8, 2016: 8, 2024: 8
}],
CAR.CAMRYH: [
#SE, LE and LE with Blindspot Monitor
{
36: 8, 37: 8, 166: 8, 170: 8, 180: 8, 295: 8, 296: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 8, 643: 7, 713: 8, 728: 8, 740: 5, 761: 8, 764: 8, 767: 4, 800: 8, 810: 2, 812: 8, 818: 8, 824: 8, 829: 2, 830: 7, 835: 8, 836: 8, 865: 8, 869: 7, 870: 7, 871: 2, 889: 8, 896: 8, 898: 8, 900: 6, 902: 6, 905: 8, 921: 8, 933: 8, 934: 8, 935: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 8, 955: 8, 956: 8, 971: 7, 975: 5, 983: 8, 984: 8, 993: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1011: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1071: 8, 1076: 8, 1077: 8, 1084: 8, 1085: 8, 1086: 8, 1114: 8, 1132: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1235: 8, 1237: 8, 1264: 8, 1279: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1592: 8, 1594: 8, 1595: 8, 1649: 8, 1745: 8, 1779: 8, 1786: 8, 1787: 8, 1788: 8, 1789: 8, 1808: 8, 1810: 8, 1816: 8, 1818: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8
},
#SL
{
36: 8, 37: 8, 166: 8, 170: 8, 180: 8, 295: 8, 296: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 8, 643: 7, 713: 8, 728: 8, 740: 5, 761: 8, 764: 8, 767: 4, 800: 8, 810: 2, 812: 8, 818: 8, 824: 8, 829: 2, 830: 7, 835: 8, 836: 8, 869: 7, 870: 7, 871: 2, 888: 8, 889: 8, 898: 8, 900: 6, 902: 6, 905: 8, 913: 8, 918: 8, 921: 8, 933: 8, 934: 8, 935: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 8, 955: 8, 956: 8, 971: 7, 975: 5, 993: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1071: 8, 1076: 8, 1077: 8, 1084: 8, 1085: 8, 1086: 8, 1114: 8, 1132: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1228: 8, 1235: 8, 1237: 8, 1264: 8, 1279: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1595: 8, 1745: 8, 1779: 8, 1786: 8, 1787: 8, 1788: 8, 1789: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8
},
#XLE
{
36: 8, 37: 8, 166: 8, 170: 8, 180: 8, 295: 8, 296: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 8, 643: 7, 658: 8, 713: 8, 728: 8, 740: 5, 761: 8, 764: 8, 767: 4, 800: 8, 810: 2, 812: 8, 814: 8, 818: 8, 824: 8, 829: 2, 830: 7, 835: 8, 836: 8, 869: 7, 870: 7, 871: 2, 888: 8, 889: 8, 898: 8, 900: 6, 902: 6, 905: 8, 918: 8, 921: 8, 933: 8, 934: 8, 935: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 8, 955: 8, 956: 8, 971: 7, 975: 5, 983: 8, 984: 8, 993: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1011: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1071: 8, 1076: 8, 1077: 8, 1082: 8, 1084: 8, 1085: 8, 1086: 8, 1114: 8, 1132: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1228: 8, 1235: 8, 1237: 8, 1264: 8, 1279: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1595: 8, 1745: 8, 1779: 8, 1786: 8, 1787: 8, 1788: 8, 1789: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8
},
# 2018 Chinese Camry Hybrid
{
36: 8, 37: 8, 166: 8, 170: 8, 180: 8, 295: 8, 296: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 8, 643: 7, 713: 8, 728: 8, 740: 5, 761: 8, 764: 8, 800: 8, 810: 2, 812: 8, 818: 8, 824: 8, 829: 2, 830: 7, 835: 8, 836: 8, 869: 7, 870: 7, 871: 2, 888: 8, 889: 8, 898: 8, 900: 6, 902: 6, 905: 8, 913: 8, 918: 8, 921: 8, 933: 8, 934: 8, 935: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 8, 955: 8, 956: 8, 971: 7, 975: 5, 993: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1071: 8, 1076: 8, 1077: 8, 1084: 8, 1085: 8, 1086: 8, 1112: 8, 1114: 8, 1132: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1235: 8, 1264: 8, 1279: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1595: 8, 1745: 8, 1779: 8, 1786: 8, 1787: 8, 1788: 8, 1789: 8
}],
CAR.HIGHLANDER: [{
36: 8, 37: 8, 114: 5, 119: 6, 120: 4, 170: 8, 180: 8, 186: 4, 238: 4, 355: 5, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 544: 4, 545: 5, 550: 8, 552: 4, 608: 8, 610: 5, 643: 7, 705: 8, 725: 2, 740: 5, 767: 4, 800: 8, 835: 8, 836: 8, 849: 4, 869: 7, 870: 7, 871: 2, 896: 8, 900: 6, 902: 6, 905: 8, 911: 8, 916: 3, 921: 8, 922: 8, 933: 8, 944: 8, 945: 8, 951: 8, 955: 8, 956: 8, 979: 2, 998: 5, 999: 7, 1000: 8, 1001: 8, 1008: 2, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1043: 8, 1044: 8, 1056: 8, 1059: 1, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1176: 8, 1177: 8, 1178: 8, 1179: 8, 1180: 8, 1181: 8, 1182: 8, 1183: 8, 1189: 8, 1190: 8, 1191: 8, 1192: 8, 1196: 8, 1197: 8, 1198: 8, 1199: 8, 1206: 8, 1207: 8, 1212: 8, 1227: 8, 1235: 8, 1237: 8, 1279: 8, 1408: 8, 1409: 8, 1410: 8, 1552: 8, 1553: 8, 1554: 8, 1556: 8, 1557: 8, 1561: 8, 1562: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1584: 8, 1589: 8, 1592: 8, 1593: 8, 1595: 8, 1599: 8, 1656: 8, 1666: 8, 1667: 8, 1728: 8, 1745: 8, 1779: 8, 1872: 8, 1880: 8, 1904: 8, 1912: 8, 1984: 8, 1988: 8, 1992: 8, 1996: 8, 1990: 8, 1998: 8
},
# 2019 Highlander XLE
{
36: 8, 37: 8, 114: 5, 119: 6, 120: 4, 170: 8, 180: 8, 186: 4, 238: 4, 355: 5, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 544: 4, 545: 5, 550: 8, 552: 4, 608: 8, 610: 5, 643: 7, 705: 8, 725: 2, 740: 5, 767: 4, 800: 8, 835: 8, 836: 8, 849: 4, 869: 7, 870: 7, 871: 2, 896: 8, 900: 6, 902: 6, 905: 8, 911: 8, 916: 3, 921: 8, 922: 8, 933: 8, 944: 8, 945: 8, 951: 8, 955: 8, 956: 8, 979: 2, 992: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1008: 2, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1043: 8, 1044: 8, 1056: 8, 1059: 1, 1076: 8, 1077: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1176: 8, 1177: 8, 1178: 8, 1179: 8, 1180: 8, 1181: 8, 1182: 8, 1183: 8, 1189: 8, 1190: 8, 1191: 8, 1192: 8, 1196: 8, 1197: 8, 1198: 8, 1199: 8, 1206: 8, 1207: 8, 1212: 8, 1227: 8, 1235: 8, 1237: 8, 1279: 8, 1408: 8, 1409: 8, 1410: 8, 1552: 8, 1553: 8, 1554: 8, 1556: 8, 1557: 8, 1561: 8, 1562: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1584: 8, 1589: 8, 1592: 8, 1593: 8, 1595: 8, 1599: 8, 1656: 8, 1728: 8, 1745: 8, 1779: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8
},
# 2017 Highlander Limited
{
36: 8, 37: 8, 114: 5, 119: 6, 120: 4, 170: 8, 180: 8, 186: 4, 238: 4, 355: 5, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 544: 4, 545: 5, 550: 8, 552: 4, 608: 8, 610: 5, 643: 7, 705: 8, 725: 2, 740: 5, 767: 4, 800: 8, 835: 8, 836: 8, 849: 4, 869: 7, 870: 7, 871: 2, 896: 8, 900: 6, 902: 6, 905: 8, 911: 8, 916: 3, 918: 7, 921: 8, 922: 8, 933: 8, 944: 8, 945: 8, 951: 8, 955: 8, 956: 8, 979: 2, 998: 5, 999: 7, 1000: 8, 1001: 8, 1005: 2, 1008: 2, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1043: 8, 1044: 8, 1056: 8, 1059: 1, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1176: 8, 1177: 8, 1178: 8, 1179: 8, 1180: 8, 1181: 8, 1182: 8, 1183: 8, 1189: 8, 1190: 8, 1191: 8, 1192: 8, 1196: 8, 1197: 8, 1198: 8, 1199: 8, 1206: 8, 1207: 8, 1212: 8, 1227: 8, 1235: 8, 1237: 8, 1264: 8, 1279: 8, 1408: 8, 1409: 8, 1410: 8, 1552: 8, 1553: 8, 1554: 8, 1556: 8, 1557: 8, 1561: 8, 1562: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1584: 8, 1589: 8, 1592: 8, 1593: 8, 1595: 8, 1599: 8, 1656: 8, 1728: 8, 1745: 8, 1779: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8
},
# 2018 Highlander Limited Platinum
{
36: 8, 37: 8, 114: 5, 119: 6, 120: 4, 170: 8, 180: 8, 186: 4, 238: 4, 355: 5, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 544: 4, 545: 5, 550: 8, 552: 4, 608: 8, 610: 5, 643: 7, 705: 8, 725: 2, 740: 5, 767: 4, 800: 8, 835: 8, 836: 8, 849: 4, 869: 7, 870: 7, 871: 2, 896: 8, 900: 6, 902: 6, 905: 8, 911: 8, 916: 3, 918: 7, 921: 8, 922: 8, 933: 8, 944: 8, 945: 8, 951: 8, 955: 8, 956: 8, 979: 2, 998: 5, 999: 7, 1000: 8, 1001: 8, 1008: 2, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1043: 8, 1044: 8, 1056: 8, 1059: 1, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1176: 8, 1177: 8, 1178: 8, 1179: 8, 1180: 8, 1181: 8, 1182: 8, 1183: 8, 1189: 8, 1190: 8, 1191: 8, 1192: 8, 1196: 8, 1197: 8, 1198: 8, 1199: 8, 1206: 8, 1207: 8, 1212: 8, 1227: 8, 1235: 8, 1237: 8, 1263: 8, 1279: 8, 1408: 8, 1409: 8, 1410: 8, 1552: 8, 1553: 8, 1554: 8, 1556: 8, 1557: 8, 1561: 8, 1562: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1584: 8, 1585: 8, 1589: 8, 1592: 8, 1593: 8, 1595: 8, 1599: 8, 1656: 8, 1728: 8, 1745: 8, 1779: 8, 1872: 8, 1880: 8, 1904: 8, 1912: 8, 1988: 8, 1990: 8, 1996: 8, 1998: 8, 2015: 8, 2016: 8, 2024: 8
}],
CAR.HIGHLANDERH: [{
36: 8, 37: 8, 170: 8, 180: 8, 296: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 581: 5, 608: 8, 610: 5, 643: 7, 713: 8, 740: 5, 767: 4, 800: 8, 835: 8, 836: 8, 849: 4, 869: 7, 870: 7, 871: 2, 896: 8, 897: 8, 900: 6, 902: 6, 905: 8, 911: 8, 916: 3, 918: 7, 921: 8, 933: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 3, 955: 8, 956: 8, 979: 2, 998: 5, 999: 7, 1000: 8, 1001: 8, 1005: 2, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1043: 8, 1044: 8, 1056: 8, 1059: 1, 1112: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1176: 8, 1177: 8, 1178: 8, 1179: 8, 1180: 8, 1181: 8, 1184: 8, 1185: 8, 1186: 8, 1189: 8, 1190: 8, 1191: 8, 1192: 8, 1196: 8, 1197: 8, 1198: 8, 1199: 8, 1206: 8, 1212: 8, 1227: 8, 1232: 8, 1235: 8, 1237: 8, 1263: 8, 1264: 8, 1279: 8, 1552: 8, 1553: 8, 1554: 8, 1556: 8, 1557: 8, 1561: 8, 1562: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1584: 8, 1589: 8, 1592: 8, 1593: 8, 1595: 8, 1599: 8, 1656: 8, 1728: 8, 1745: 8, 1779: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8
},
{
# 2019 Highlander Hybrid Limited Platinum
36: 8, 37: 8, 170: 8, 180: 8, 296: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 581: 5, 608: 8, 610: 5, 643: 7, 713: 8, 740: 5, 767: 4, 800: 8, 835: 8, 836: 8, 849: 4, 869: 7, 870: 7, 871: 2, 896: 8, 897: 8, 900: 6, 902: 6, 905: 8, 911: 8, 916: 3, 918: 7, 921: 8, 933: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 3, 955: 8, 956: 8, 979: 2, 992: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1043: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1076: 8, 1077: 8, 1112: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1176: 8, 1177: 8, 1178: 8, 1179: 8, 1180: 8, 1181: 8, 1184: 8, 1185: 8, 1186: 8, 1189: 8, 1190: 8, 1191: 8, 1192: 8, 1196: 8, 1197: 8, 1198: 8, 1199: 8, 1206: 8, 1212: 8, 1227: 8, 1232: 8, 1235: 8, 1237: 8, 1263: 8, 1279: 8, 1552: 8, 1553: 8, 1554: 8, 1556: 8, 1557: 8, 1561: 8, 1562: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1584: 8, 1589: 8, 1592: 8, 1593: 8, 1595: 8, 1599: 8, 1656: 8, 1666: 8, 1667: 8, 1728: 8, 1745: 8, 1779: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8
}],
CAR.COROLLAH_TSS2: [
# 2019 Taiwan Altis Hybrid
{
36: 8, 37: 8, 166: 8, 170: 8, 180: 8, 295: 8, 296: 8, 401: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 8, 643: 7, 713: 8, 728: 8, 740: 5, 742: 8, 743: 8, 761: 8, 765: 8, 767: 4, 800: 8, 810: 2, 829: 2, 830: 7, 835: 8, 836: 8, 865: 8, 869: 7, 870: 7, 871: 2, 877: 8, 881: 8, 885: 8, 896: 8, 898: 8, 918: 7, 921: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 8, 955: 8, 956: 8, 971: 7, 975: 5, 987: 8, 993: 8, 1002: 8, 1014: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1071: 8, 1082: 8, 1112: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1172: 8, 1235: 8, 1237: 8, 1279: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1592: 8, 1594: 8, 1595: 8, 1745: 8, 1775: 8, 1779: 8
},
# 2019 Chinese Levin Hybrid
{
36: 8, 37: 8, 166: 8, 170: 8, 180: 8, 295: 8, 296: 8, 401: 8, 426: 6, 452: 8, 466: 8, 467: 8, 550: 8, 552: 4, 560: 7, 562: 6, 581: 5, 608: 8, 610: 8, 643: 7, 713: 8, 728: 8, 740: 5, 742: 8, 743: 8, 761: 8, 765: 8, 767: 4, 800: 8, 810: 2, 812: 8, 829: 2, 830: 7, 835: 8, 836: 8, 865: 8, 869: 7, 870: 7, 871: 2, 877: 8, 881: 8, 885: 8, 896: 8, 898: 8, 921: 8, 944: 8, 945: 8, 950: 8, 951: 8, 953: 8, 955: 8, 956: 8, 971: 7, 975: 5, 993: 8, 1002: 8, 1017: 8, 1020: 8, 1041: 8, 1042: 8, 1044: 8, 1056: 8, 1057: 8, 1059: 1, 1071: 8, 1114: 8, 1161: 8, 1162: 8, 1163: 8, 1172: 8, 1235: 8, 1279: 8, 1541: 8, 1552: 8, 1553: 8, 1556: 8, 1557: 8, 1568: 8, 1570: 8, 1571: 8, 1572: 8, 1592: 8, 1594: 8, 1595: 8, 1600: 8, 1649: 8, 1745: 8, 1775: 8, 1779: 8
}
],
CAR.SIENNA: [
{
36: 8, 37: 8, 114: 5, 119: 6, 120: 4, 170: 8, 180: 8, 186: 4, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 544: 4, 545: 5, 548: 8, 550: 8, 552: 4, 562: 4, 608: 8, 610: 5, 643: 7, 705: 8, 725: 2, 740: 5, 764: 8, 767: 4, 800: 8, 824: 8, 835: 8, 836: 8, 849: 4, 869: 7, 870: 7, 871: 2, 888: 8, 896: 8, 900: 6, 902: 6, 905: 8, 911: 8, 916: 1, 918: 7, 921: 8, 933: 8, 944: 6, 945: 8, 951: 8, 955: 8, 956: 8, 979: 2, 992: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1008: 2, 1014: 8, 1017: 8, 1041: 8, 1042: 8, 1043: 8, 1056: 8, 1059: 1, 1076: 8, 1077: 8, 1114: 8, 1160: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1176: 8, 1177: 8, 1178: 8, 1179: 8, 1180: 8, 1181: 8, 1182: 8, 1183: 8, 1191: 8, 1192: 8, 1196: 8, 1197: 8, 1198: 8, 1199: 8, 1200: 8, 1201: 8, 1202: 8, 1203: 8, 1212: 8, 1227: 8, 1228: 8, 1235: 8, 1237: 8, 1279: 8, 1552: 8, 1553: 8, 1555: 8, 1556: 8, 1557: 8, 1561: 8, 1562: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1584: 8, 1589: 8, 1592: 8, 1593: 8, 1595: 8, 1656: 8, 1664: 8, 1666: 8, 1667: 8, 1728: 8, 1745: 8, 1779: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8
},
# XLE AWD 2018
{
36: 8, 37: 8, 114: 5, 119: 6, 120: 4, 170: 8, 180: 8, 186: 4, 238: 4, 426: 6, 452: 8, 464: 8, 466: 8, 467: 8, 544: 4, 545: 5, 548: 8, 550: 8, 552: 4, 562: 4, 608: 8, 610: 5, 643: 7, 705: 8, 725: 2, 740: 5, 764: 8, 767: 4, 800: 8, 824: 8, 835: 8, 836: 8, 849: 4, 869: 7, 870: 7, 871: 2, 896: 8, 900: 6, 902: 6, 905: 8, 911: 8, 916: 1, 921: 8, 933: 8, 944: 6, 945: 8, 951: 8, 955: 8, 956: 8, 979: 2, 992: 8, 998: 5, 999: 7, 1000: 8, 1001: 8, 1002: 8, 1008: 2, 1014: 8, 1017: 8, 1041: 8, 1042: 8, 1043: 8, 1056: 8, 1059: 1, 1076: 8, 1077: 8, 1114: 8, 1160: 8, 1161: 8, 1162: 8, 1163: 8, 1164: 8, 1165: 8, 1166: 8, 1167: 8, 1176: 8, 1177: 8, 1178: 8, 1179: 8, 1180: 8, 1181: 8, 1182: 8, 1183: 8, 1191: 8, 1192: 8, 1196: 8, 1197: 8, 1198: 8, 1199: 8, 1200: 8, 1201: 8, 1202: 8, 1203: 8, 1212: 8, 1227: 8, 1235: 8, 1237: 8, 1279: 8, 1552: 8, 1553: 8, 1555: 8, 1556: 8, 1557: 8, 1561: 8, 1562: 8, 1568: 8, 1569: 8, 1570: 8, 1571: 8, 1572: 8, 1584: 8, 1589: 8, 1592: 8, 1593: 8, 1595: 8, 1656: 8, 1664: 8, 1666: 8, 1667: 8, 1728: 8, 1745: 8, 1779: 8, 1904: 8, 1912: 8, 1990: 8, 1998: 8
}],
# dp - fake values, for generate car selection
CAR.LEXUS_ISH: [{ 65535: 1 }],
}
FW_VERSIONS = {
CAR.AVALON: {
(Ecu.esp, 0x7b0, None): [
b'F152607060\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881510701300\x00\x00\x00\x00',
b'881510705100\x00\x00\x00\x00',
b'881510705200\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B41051\x00\x00\x00\x00\x00\x00',
],
(Ecu.engine, 0x7e0, None): [
b'\x0230721100\x00\x00\x00\x00\x00\x00\x00\x00A0C01000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230721200\x00\x00\x00\x00\x00\x00\x00\x00A0C01000\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702000\x00\x00\x00\x00',
b'8821F4702100\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F0701100\x00\x00\x00\x00',
b'8646F0703000\x00\x00\x00\x00',
],
},
CAR.AVALON_2019: {
(Ecu.esp, 0x7b0, None): [
b'F152607140\x00\x00\x00\x00\x00\x00',
b'F152607171\x00\x00\x00\x00\x00\x00',
b'F152607110\x00\x00\x00\x00\x00\x00',
b'F152607180\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881510703200\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B41080\x00\x00\x00\x00\x00\x00',
b'8965B07010\x00\x00\x00\x00\x00\x00',
b'8965B41090\x00\x00\x00\x00\x00\x00',
],
(Ecu.engine, 0x700, None): [
b'\x01896630735100\x00\x00\x00\x00',
b'\x01896630725300\x00\x00\x00\x00',
b'\x01896630738000\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F0702100\x00\x00\x00\x00',
],
},
CAR.AVALONH_2019: {
(Ecu.esp, 0x7b0, None): [
b'F152641040\x00\x00\x00\x00\x00\x00',
b'F152641061\x00\x00\x00\x00\x00\x00',
b'F152641050\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881510704200\x00\x00\x00\x00',
b'881514107100\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B07010\x00\x00\x00\x00\x00\x00',
b'8965B41090\x00\x00\x00\x00\x00\x00',
b'8965B41070\x00\x00\x00\x00\x00\x00',
],
(Ecu.engine, 0x700, None): [
b'\x02896630724000\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
b'\x02896630737000\x00\x00\x00\x00897CF3305001\x00\x00\x00\x00',
b'\x02896630728000\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F0702100\x00\x00\x00\x00',
],
},
CAR.CAMRY: {
(Ecu.engine, 0x700, None): [
b'\x018966306L3100\x00\x00\x00\x00',
b'\x018966306L4200\x00\x00\x00\x00',
b'\x018966306L5200\x00\x00\x00\x00',
b'\x018966306P8000\x00\x00\x00\x00',
b'\x018966306Q3100\x00\x00\x00\x00',
b'\x018966306Q4000\x00\x00\x00\x00',
b'\x018966306Q4100\x00\x00\x00\x00',
b'\x018966306Q4200\x00\x00\x00\x00',
b'\x018966333Q9200\x00\x00\x00\x00',
b'\x018966333P3100\x00\x00\x00\x00',
b'\x018966333P3200\x00\x00\x00\x00',
b'\x018966333P4200\x00\x00\x00\x00',
b'\x018966333P4300\x00\x00\x00\x00',
b'\x018966333P4400\x00\x00\x00\x00',
b'\x018966333P4500\x00\x00\x00\x00',
b'\x018966333P4700\x00\x00\x00\x00',
b'\x018966333Q6000\x00\x00\x00\x00',
b'\x018966333Q6200\x00\x00\x00\x00',
b'\x018966333Q6300\x00\x00\x00\x00',
b'\x018966333W6000\x00\x00\x00\x00',
],
(Ecu.engine, 0x7e0, None): [
b'\x02333P1100\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'8821F0601200 ',
b'8821F0601300 ',
b'8821F0602000 ',
b'8821F0603300 ',
b'8821F0604100 ',
b'8821F0605200 ',
b'8821F0607200 ',
b'8821F0608000 ',
b'8821F0608200 ',
b'8821F0609100 ',
],
(Ecu.esp, 0x7b0, None): [
b'F152606210\x00\x00\x00\x00\x00\x00',
b'F152606230\x00\x00\x00\x00\x00\x00',
b'F152606270\x00\x00\x00\x00\x00\x00',
b'F152606290\x00\x00\x00\x00\x00\x00',
b'F152606410\x00\x00\x00\x00\x00\x00',
b'F152633540\x00\x00\x00\x00\x00\x00',
b'F152633A10\x00\x00\x00\x00\x00\x00',
b'F152633A20\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B33540\x00\x00\x00\x00\x00\x00',
b'8965B33542\x00\x00\x00\x00\x00\x00',
b'8965B33580\x00\x00\x00\x00\x00\x00',
b'8965B33581\x00\x00\x00\x00\x00\x00',
b'8965B33621\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [ # Same as 0x791
b'8821F0601200 ',
b'8821F0601300 ',
b'8821F0602000 ',
b'8821F0603300 ',
b'8821F0604100 ',
b'8821F0605200 ',
b'8821F0607200 ',
b'8821F0608000 ',
b'8821F0608200 ',
b'8821F0609100 ',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F0601200 ',
b'8646F0601300 ',
b'8646F0601400 ',
b'8646F0603400 ',
b'8646F0604100 ',
b'8646F0605000 ',
b'8646F0606000 ',
b'8646F0606100 ',
b'8646F0607100 ',
],
},
CAR.CAMRYH: {
(Ecu.engine, 0x700, None): [
b'\x018966306Q6000\x00\x00\x00\x00',
b'\x018966333N1100\x00\x00\x00\x00',
b'\x018966333N4300\x00\x00\x00\x00',
b'\x018966333X0000\x00\x00\x00\x00',
b'\x018966333X4000\x00\x00\x00\x00',
b'\x01896633T16000\x00\x00\x00\x00',
b'\x028966306B2100\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
b'\x028966306B2300\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
b'\x028966306N8100\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
b'\x028966306N8200\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
b'\x028966306N8300\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
b'\x028966306N8400\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
b'\x028966306R5000\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
b'\x028966306R5000\x00\x00\x00\x00897CF3305001\x00\x00\x00\x00',
b'\x028966306R6000\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
b'\x028966306R6000\x00\x00\x00\x00897CF3305001\x00\x00\x00\x00',
b'\x028966306S0000\x00\x00\x00\x00897CF3305001\x00\x00\x00\x00',
b'\x028966306S0100\x00\x00\x00\x00897CF3305001\x00\x00\x00\x00',
b'\x028966306S1100\x00\x00\x00\x00897CF3305001\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152633214\x00\x00\x00\x00\x00\x00',
b'F152633660\x00\x00\x00\x00\x00\x00',
b'F152633712\x00\x00\x00\x00\x00\x00',
b'F152633713\x00\x00\x00\x00\x00\x00',
b'F152633B51\x00\x00\x00\x00\x00\x00',
b'F152633B60\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'8821F0601200 ',
b'8821F0601300 ',
b'8821F0603400 ',
b'8821F0604000 ',
b'8821F0604200 ',
b'8821F0605200 ',
b'8821F0606200 ',
b'8821F0607200 ',
b'8821F0608000 ',
b'8821F0608200 ',
b'8821F0609000 ',
b'8821F0609100 ',
],
(Ecu.eps, 0x7a1, None): [
b'8965B33540\x00\x00\x00\x00\x00\x00',
b'8965B33542\x00\x00\x00\x00\x00\x00',
b'8965B33550\x00\x00\x00\x00\x00\x00',
b'8965B33551\x00\x00\x00\x00\x00\x00',
b'8965B33580\x00\x00\x00\x00\x00\x00',
b'8965B33581\x00\x00\x00\x00\x00\x00',
b'8965B33611\x00\x00\x00\x00\x00\x00',
b'8965B33621\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [ # Same as 0x791
b'8821F0601200 ',
b'8821F0601300 ',
b'8821F0603400 ',
b'8821F0604000 ',
b'8821F0604200 ',
b'8821F0605200 ',
b'8821F0606200 ',
b'8821F0607200 ',
b'8821F0608000 ',
b'8821F0608200 ',
b'8821F0609000 ',
b'8821F0609100 ',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F0601200 ',
b'8646F0601300 ',
b'8646F0601400 ',
b'8646F0603400 ',
b'8646F0603500 ',
b'8646F0604100 ',
b'8646F0605000 ',
b'8646F0606000 ',
b'8646F0606100 ',
b'8646F0607000 ',
b'8646F0607100 ',
],
},
CAR.CAMRY_TSS2: {
(Ecu.eps, 0x7a1, None): [
b'8965B33630\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'\x01F152606370\x00\x00\x00\x00\x00\x00',
b'\x01F152606390\x00\x00\x00\x00\x00\x00',
b'\x01F152606400\x00\x00\x00\x00\x00\x00',
],
(Ecu.engine, 0x700, None): [
b'\x018966306Q5000\x00\x00\x00\x00',
b'\x018966306T3100\x00\x00\x00\x00',
b'\x018966306T3200\x00\x00\x00\x00',
b'\x018966306T4100\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 15): [
b'\x018821F6201200\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 109): [
b'\x028646F3305200\x00\x00\x00\x008646G5301200\x00\x00\x00\x00',
b'\x028646F3305300\x00\x00\x00\x008646G5301200\x00\x00\x00\x00',
],
},
CAR.CAMRYH_TSS2: {
(Ecu.eps, 0x7a1, None): [
b'8965B33630\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152633D00\x00\x00\x00\x00\x00\x00',
],
(Ecu.engine, 0x700, None): [
b'\x018966306Q6000\x00\x00\x00\x00',
b'\x018966306Q7000\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 15): [
b'\x018821F6201200\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 109): [
b'\x028646F3305200\x00\x00\x00\x008646G5301200\x00\x00\x00\x00',
b'\x028646F3305300\x00\x00\x00\x008646G5301200\x00\x00\x00\x00',
],
},
CAR.CHR: {
(Ecu.engine, 0x700, None): [
b'\x01896631017100\x00\x00\x00\x00',
b'\x01896631017200\x00\x00\x00\x00',
b'\x0189663F413100\x00\x00\x00\x00',
b'\x0189663F414100\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'8821F0W01000 ',
b'8821F0W01100 ',
b'8821FF401600 ',
b'8821FF404000 ',
b'8821FF404100 ',
b'8821FF405100 ',
b'8821FF406000 ',
b'8821FF407100 ',
],
(Ecu.esp, 0x7b0, None): [
b'F152610020\x00\x00\x00\x00\x00\x00',
b'F152610153\x00\x00\x00\x00\x00\x00',
b'F152610210\x00\x00\x00\x00\x00\x00',
b'F1526F4034\x00\x00\x00\x00\x00\x00',
b'F1526F4044\x00\x00\x00\x00\x00\x00',
b'F1526F4073\x00\x00\x00\x00\x00\x00',
b'F1526F4121\x00\x00\x00\x00\x00\x00',
b'F1526F4122\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B10011\x00\x00\x00\x00\x00\x00',
b'8965B10040\x00\x00\x00\x00\x00\x00',
b'8965B10070\x00\x00\x00\x00\x00\x00',
],
(Ecu.engine, 0x7e0, None): [
b'\x0331024000\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00895231203202\x00\x00\x00\x00',
b'\x0331024000\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00895231203302\x00\x00\x00\x00',
b'\x0331036000\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00895231203302\x00\x00\x00\x00',
b'\x033F401100\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00895231203102\x00\x00\x00\x00',
b'\x033F424000\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00895231203202\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F0W01000 ',
b'8821FF401600 ',
b'8821FF404000 ',
b'8821FF404100 ',
b'8821FF405100 ',
b'8821FF406000 ',
b'8821FF407100 ',
b'8821F0W01100 ',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646FF401800 ',
b'8646FF404000 ',
b'8646FF406000 ',
b'8646FF407000 ',
],
},
CAR.CHRH: {
(Ecu.engine, 0x700, None): [
b'\x02896631013200\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x0289663F405000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x0289663F418000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x0289663F423000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x0289663F431000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x0189663F438000\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152610013\x00\x00\x00\x00\x00\x00',
b'F152610014\x00\x00\x00\x00\x00\x00',
b'F152610040\x00\x00\x00\x00\x00\x00',
b'F152610190\x00\x00\x00\x00\x00\x00',
b'F152610200\x00\x00\x00\x00\x00\x00',
b'F152610230\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'8821F0W01000 ',
b'8821FF402300 ',
b'8821FF402400 ',
b'8821FF404000 ',
b'8821FF406000 ',
b'8821FF407100 ',
],
(Ecu.eps, 0x7a1, None): [
b'8965B10011\x00\x00\x00\x00\x00\x00',
b'8965B10020\x00\x00\x00\x00\x00\x00',
b'8965B10040\x00\x00\x00\x00\x00\x00',
b'8965B10050\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F0W01000 ',
b'8821FF402300 ',
b'8821FF402400 ',
b'8821FF404000 ',
b'8821FF406000 ',
b'8821FF407100 ',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646FF402100 ',
b'8646FF404000 ',
b'8646FF406000 ',
b'8646FF407000 ',
],
},
CAR.COROLLA: {
(Ecu.engine, 0x7e0, None): [
b'\x0230ZC2000\x00\x00\x00\x00\x00\x00\x00\x0050212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230ZC2100\x00\x00\x00\x00\x00\x00\x00\x0050212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230ZC2200\x00\x00\x00\x00\x00\x00\x00\x0050212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230ZC2300\x00\x00\x00\x00\x00\x00\x00\x0050212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230ZC3000\x00\x00\x00\x00\x00\x00\x00\x0050212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230ZC3100\x00\x00\x00\x00\x00\x00\x00\x0050212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230ZC3200\x00\x00\x00\x00\x00\x00\x00\x0050212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230ZC3300\x00\x00\x00\x00\x00\x00\x00\x0050212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0330ZC1200\x00\x00\x00\x00\x00\x00\x00\x0050212000\x00\x00\x00\x00\x00\x00\x00\x00895231203202\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881510201100\x00\x00\x00\x00',
b'881510201200\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152602190\x00\x00\x00\x00\x00\x00',
b'F152602191\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B02181\x00\x00\x00\x00\x00\x00',
b'8965B02191\x00\x00\x00\x00\x00\x00',
b'8965B48150\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702100\x00\x00\x00\x00',
b'8821F4702300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F0201101\x00\x00\x00\x00',
b'8646F0201200\x00\x00\x00\x00',
],
},
CAR.COROLLA_TSS2: {
(Ecu.engine, 0x700, None): [
b'\x01896630ZG2000\x00\x00\x00\x00',
b'\x01896630ZG5000\x00\x00\x00\x00',
b'\x01896630ZG5100\x00\x00\x00\x00',
b'\x01896630ZG5200\x00\x00\x00\x00',
b'\x01896630ZG5300\x00\x00\x00\x00',
b'\x01896630ZP2000\x00\x00\x00\x00',
b'\x01896630ZQ5000\x00\x00\x00\x00',
b'\x018966312L8000\x00\x00\x00\x00',
b'\x018966312M0000\x00\x00\x00\x00',
b'\x018966312M9000\x00\x00\x00\x00',
b'\x018966312P9000\x00\x00\x00\x00',
b'\x018966312P9100\x00\x00\x00\x00',
b'\x018966312P9200\x00\x00\x00\x00',
b'\x018966312Q2300\x00\x00\x00\x00',
b'\x018966312R0100\x00\x00\x00\x00',
b'\x018966312R1000\x00\x00\x00\x00',
b'\x018966312R1100\x00\x00\x00\x00',
b'\x018966312R3100\x00\x00\x00\x00',
b'\x018966312S5000\x00\x00\x00\x00',
b'\x018966312S7000\x00\x00\x00\x00',
b'\x018966312W3000\x00\x00\x00\x00',
],
(Ecu.engine, 0x7e0, None): [
b'\x0230ZN4000\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x03312M3000\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00895231203402\x00\x00\x00\x00',
b'\x03312N6000\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00895231203202\x00\x00\x00\x00',
b'\x03312N6000\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00895231203302\x00\x00\x00\x00',
b'\x03312N6000\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00895231203402\x00\x00\x00\x00',
b'\x03312N6100\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00895231203302\x00\x00\x00\x00',
b'\x03312N6100\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00895231203402\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'\x018965B12350\x00\x00\x00\x00\x00\x00',
b'\x018965B12470\x00\x00\x00\x00\x00\x00',
b'\x018965B12490\x00\x00\x00\x00\x00\x00',
b'\x018965B12500\x00\x00\x00\x00\x00\x00',
b'\x018965B12520\x00\x00\x00\x00\x00\x00',
b'\x018965B12530\x00\x00\x00\x00\x00\x00',
b'\x018965B1255000\x00\x00\x00\x00',
b'8965B12361\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'\x01F152602280\x00\x00\x00\x00\x00\x00',
b'\x01F152602560\x00\x00\x00\x00\x00\x00',
b'\x01F152602590\x00\x00\x00\x00\x00\x00',
b'\x01F152602650\x00\x00\x00\x00\x00\x00',
b'\x01F152612641\x00\x00\x00\x00\x00\x00',
b'\x01F152612651\x00\x00\x00\x00\x00\x00',
b'\x01F152612B10\x00\x00\x00\x00\x00\x00',
b'\x01F152612B51\x00\x00\x00\x00\x00\x00',
b'\x01F152612B60\x00\x00\x00\x00\x00\x00',
b'\x01F152612B61\x00\x00\x00\x00\x00\x00',
b'\x01F152612B90\x00\x00\x00\x00\x00\x00',
b'\x01F152612C00\x00\x00\x00\x00\x00\x00',
b'F152602191\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301100\x00\x00\x00\x00',
b'\x018821F3301200\x00\x00\x00\x00',
b'\x018821F3301300\x00\x00\x00\x00',
b'\x018821F3301400\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F12010D0\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F1201100\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F1201200\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F1201300\x00\x00\x00\x008646G2601400\x00\x00\x00\x00',
b'\x028646F1201400\x00\x00\x00\x008646G2601500\x00\x00\x00\x00',
b'\x028646F1202000\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F1202100\x00\x00\x00\x008646G2601400\x00\x00\x00\x00',
b'\x028646F1202200\x00\x00\x00\x008646G2601500\x00\x00\x00\x00',
],
},
CAR.COROLLAH_TSS2: {
(Ecu.engine, 0x700, None): [
b'\x01896630ZJ1000\x00\x00\x00\x00',
b'\x01896630ZU8000\x00\x00\x00\x00',
b'\x01896637621000\x00\x00\x00\x00',
b'\x01896637624000\x00\x00\x00\x00',
b'\x01896637626000\x00\x00\x00\x00',
b'\x01896637648000\x00\x00\x00\x00',
b'\x02896630ZJ5000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896630ZN8000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896630ZQ3000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896630ZR2000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896630ZT8000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896630ZT9000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x028966312Q3000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x028966312Q4000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x038966312L7000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF1205001\x00\x00\x00\x00',
b'\x038966312N1000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF1203001\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B12361\x00\x00\x00\x00\x00\x00',
b'8965B12451\x00\x00\x00\x00\x00\x00',
b'8965B76012\x00\x00\x00\x00\x00\x00',
b'8965B76050\x00\x00\x00\x00\x00\x00',
b'\x018965B12350\x00\x00\x00\x00\x00\x00',
b'\x018965B12470\x00\x00\x00\x00\x00\x00',
b'\x018965B12490\x00\x00\x00\x00\x00\x00',
b'\x018965B12500\x00\x00\x00\x00\x00\x00',
b'\x018965B12520\x00\x00\x00\x00\x00\x00',
b'\x018965B12530\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152612590\x00\x00\x00\x00\x00\x00',
b'F152612691\x00\x00\x00\x00\x00\x00',
b'F152612692\x00\x00\x00\x00\x00\x00',
b'F152612700\x00\x00\x00\x00\x00\x00',
b'F152612710\x00\x00\x00\x00\x00\x00',
b'F152612790\x00\x00\x00\x00\x00\x00',
b'F152612800\x00\x00\x00\x00\x00\x00',
b'F152612820\x00\x00\x00\x00\x00\x00',
b'F152612840\x00\x00\x00\x00\x00\x00',
b'F152612A00\x00\x00\x00\x00\x00\x00',
b'F152612A10\x00\x00\x00\x00\x00\x00',
b'F152642540\x00\x00\x00\x00\x00\x00',
b'F152676293\x00\x00\x00\x00\x00\x00',
b'F152676303\x00\x00\x00\x00\x00\x00',
b'F152676304\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301100\x00\x00\x00\x00',
b'\x018821F3301200\x00\x00\x00\x00',
b'\x018821F3301300\x00\x00\x00\x00',
b'\x018821F3301400\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F12010D0\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F1201100\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F1201300\x00\x00\x00\x008646G2601400\x00\x00\x00\x00',
b'\x028646F1201400\x00\x00\x00\x008646G2601500\x00\x00\x00\x00',
b'\x028646F1202000\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F1202100\x00\x00\x00\x008646G2601400\x00\x00\x00\x00',
b'\x028646F4203400\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F76020C0\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F7603100\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F7603200\x00\x00\x00\x008646G2601400\x00\x00\x00\x00',
],
},
CAR.HIGHLANDER: {
(Ecu.engine, 0x700, None): [
b'\x01896630E09000\x00\x00\x00\x00',
b'\x01896630E43000\x00\x00\x00\x00',
b'\x01896630E43100\x00\x00\x00\x00',
b'\x01896630E43200\x00\x00\x00\x00',
b'\x01896630E44200\x00\x00\x00\x00',
b'\x01896630E45000\x00\x00\x00\x00',
b'\x01896630E45100\x00\x00\x00\x00',
b'\x01896630E45200\x00\x00\x00\x00',
b'\x01896630E46200\x00\x00\x00\x00',
b'\x01896630E74000\x00\x00\x00\x00',
b'\x01896630E75000\x00\x00\x00\x00',
b'\x01896630E76000\x00\x00\x00\x00',
b'\x01896630E77000\x00\x00\x00\x00',
b'\x01896630E83000\x00\x00\x00\x00',
b'\x01896630E84000\x00\x00\x00\x00',
b'\x01896630E85000\x00\x00\x00\x00',
b'\x01896630E86000\x00\x00\x00\x00',
b'\x01896630E88000\x00\x00\x00\x00',
b'\x01896630EA0000\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B48140\x00\x00\x00\x00\x00\x00',
b'8965B48150\x00\x00\x00\x00\x00\x00',
b'8965B48210\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [b'F15260E011\x00\x00\x00\x00\x00\x00'],
(Ecu.dsu, 0x791, None): [
b'881510E01100\x00\x00\x00\x00',
b'881510E01200\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702100\x00\x00\x00\x00',
b'8821F4702300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F0E01200\x00\x00\x00\x00',
b'8646F0E01300\x00\x00\x00\x00',
],
},
CAR.HIGHLANDERH: {
(Ecu.eps, 0x7a1, None): [
b'8965B48160\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152648541\x00\x00\x00\x00\x00\x00',
b'F152648542\x00\x00\x00\x00\x00\x00',
],
(Ecu.engine, 0x7e0, None): [
b'\x0230E40000\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230E40100\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230EA2000\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0230EA2100\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702100\x00\x00\x00\x00',
b'8821F4702300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F0E01200\x00\x00\x00\x00',
b'8646F0E01300\x00\x00\x00\x00',
],
},
CAR.HIGHLANDER_TSS2: {
(Ecu.eps, 0x7a1, None): [
b'8965B48241\x00\x00\x00\x00\x00\x00',
b'8965B48310\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'\x01F15260E051\x00\x00\x00\x00\x00\x00',
b'\x01F15260E110\x00\x00\x00\x00\x00\x00',
],
(Ecu.engine, 0x700, None): [
b'\x01896630E62100\x00\x00\x00\x00',
b'\x01896630E62200\x00\x00\x00\x00',
b'\x01896630E64100\x00\x00\x00\x00',
b'\x01896630E64200\x00\x00\x00\x00',
b'\x01896630EB1000\x00\x00\x00\x00',
b'\x01896630EB1100\x00\x00\x00\x00',
b'\x01896630EB2000\x00\x00\x00\x00',
b'\x01896630EB2100\x00\x00\x00\x00',
b'\x01896630EB2200\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301400\x00\x00\x00\x00',
b'\x018821F6201200\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F0E02100\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F4803000\x00\x00\x00\x008646G5301200\x00\x00\x00\x00',
],
},
CAR.HIGHLANDERH_TSS2: {
(Ecu.eps, 0x7a1, None): [
b'8965B48241\x00\x00\x00\x00\x00\x00',
b'8965B48310\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'\x01F15264872300\x00\x00\x00\x00',
b'\x01F15264872400\x00\x00\x00\x00',
b'\x01F15264872500\x00\x00\x00\x00',
b'\x01F152648C6300\x00\x00\x00\x00',
],
(Ecu.engine, 0x700, None): [
b'\x01896630EA1000\000\000\000\000',
b'\x01896630EA1000\x00\x00\x00\x00897CF4801001\x00\x00\x00\x00',
b'\x02896630E66000\x00\x00\x00\x00897CF4801001\x00\x00\x00\x00',
b'\x02896630EB3000\x00\x00\x00\x00897CF4801001\x00\x00\x00\x00',
b'\x02896630EB3100\x00\x00\x00\x00897CF4801001\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301400\x00\x00\x00\x00',
b'\x018821F6201200\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F0E02100\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F4803000\x00\x00\x00\x008646G5301200\x00\x00\x00\x00',
],
},
CAR.LEXUS_IS: {
(Ecu.engine, 0x700, None): [
b'\x018966353M7100\x00\x00\x00\x00',
b'\x018966353Q2000\x00\x00\x00\x00',
b'\x018966353Q2300\x00\x00\x00\x00',
b'\x018966353R1100\x00\x00\x00\x00',
b'\x018966353R7100\x00\x00\x00\x00',
b'\x018966353R8100\x00\x00\x00\x00',
b'\x018966353Q4000\x00\x00\x00\x00',
],
(Ecu.engine, 0x7e0, None): [
b'\x0232480000\x00\x00\x00\x00\x00\x00\x00\x00A4701000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02353P7000\x00\x00\x00\x00\x00\x00\x00\x00530J5000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02353P9000\x00\x00\x00\x00\x00\x00\x00\x00553C1000\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152653301\x00\x00\x00\x00\x00\x00',
b'F152653310\x00\x00\x00\x00\x00\x00',
b'F152653330\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881515306200\x00\x00\x00\x00',
b'881515306400\x00\x00\x00\x00',
b'881515306500\x00\x00\x00\x00',
b'881515307400\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B53270\x00\x00\x00\x00\x00\x00',
b'8965B53271\x00\x00\x00\x00\x00\x00',
b'8965B53280\x00\x00\x00\x00\x00\x00',
b'8965B53281\x00\x00\x00\x00\x00\x00',
b'8965B53311\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702300\x00\x00\x00\x00',
b'8821F4702100\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F5301300\x00\x00\x00\x00',
b'8646F5301400\x00\x00\x00\x00',
b'8646F5301200\x00\x00\x00\x00',
],
},
CAR.PRIUS: {
(Ecu.engine, 0x700, None): [
b'\x02896634761000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634761100\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634761200\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634762000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634763000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634763100\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634765000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634765100\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634769100\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634769200\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634770000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634774000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634774100\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634774200\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634782000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x02896634784000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x028966347A0000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x028966347A5000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x028966347A8000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x028966347B0000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x03896634759100\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701003\x00\x00\x00\x00',
b'\x03896634759200\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701003\x00\x00\x00\x00',
b'\x03896634759200\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701004\x00\x00\x00\x00',
b'\x03896634759300\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701004\x00\x00\x00\x00',
b'\x03896634760000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701002\x00\x00\x00\x00',
b'\x03896634760000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701003\x00\x00\x00\x00',
b'\x03896634760000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701004\x00\x00\x00\x00',
b'\x03896634760100\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701003\x00\x00\x00\x00',
b'\x03896634760200\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701003\x00\x00\x00\x00',
b'\x03896634760200\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701004\x00\x00\x00\x00',
b'\x03896634760300\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701004\x00\x00\x00\x00',
b'\x03896634768000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4703001\x00\x00\x00\x00',
b'\x03896634768000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4703002\x00\x00\x00\x00',
b'\x03896634768100\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4703002\x00\x00\x00\x00',
b'\x03896634785000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4705001\x00\x00\x00\x00',
b'\x03896634785000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4710001\x00\x00\x00\x00',
b'\x03896634786000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4705001\x00\x00\x00\x00',
b'\x03896634786000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4710001\x00\x00\x00\x00',
b'\x03896634789000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4703002\x00\x00\x00\x00',
b'\x038966347A3000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4701003\x00\x00\x00\x00',
b'\x038966347A3000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4707001\x00\x00\x00\x00',
b'\x038966347B6000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4710001\x00\x00\x00\x00',
b'\x038966347B7000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4710001\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B47021\x00\x00\x00\x00\x00\x00',
b'8965B47022\x00\x00\x00\x00\x00\x00',
b'8965B47023\x00\x00\x00\x00\x00\x00',
b'8965B47050\x00\x00\x00\x00\x00\x00',
b'8965B47060\x00\x00\x00\x00\x00\x00', # This is the EPS with good angle sensor
],
(Ecu.esp, 0x7b0, None): [
b'F152647290\x00\x00\x00\x00\x00\x00',
b'F152647300\x00\x00\x00\x00\x00\x00',
b'F152647310\x00\x00\x00\x00\x00\x00',
b'F152647414\x00\x00\x00\x00\x00\x00',
b'F152647415\x00\x00\x00\x00\x00\x00',
b'F152647416\x00\x00\x00\x00\x00\x00',
b'F152647417\x00\x00\x00\x00\x00\x00',
b'F152647470\x00\x00\x00\x00\x00\x00',
b'F152647490\x00\x00\x00\x00\x00\x00',
b'F152647683\x00\x00\x00\x00\x00\x00',
b'F152647684\x00\x00\x00\x00\x00\x00',
b'F152647862\x00\x00\x00\x00\x00\x00',
b'F152647863\x00\x00\x00\x00\x00\x00',
b'F152647864\x00\x00\x00\x00\x00\x00',
b'F152647865\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881514702300\x00\x00\x00\x00',
b'881514703100\x00\x00\x00\x00',
b'881514704100\x00\x00\x00\x00',
b'881514706000\x00\x00\x00\x00',
b'881514706100\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702000\x00\x00\x00\x00',
b'8821F4702100\x00\x00\x00\x00',
b'8821F4702300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F4701300\x00\x00\x00\x00',
b'8646F4702001\x00\x00\x00\x00',
b'8646F4702100\x00\x00\x00\x00',
b'8646F4702200\x00\x00\x00\x00',
b'8646F4705000\x00\x00\x00\x00',
b'8646F4705200\x00\x00\x00\x00',
],
},
CAR.RAV4: {
(Ecu.engine, 0x7e0, None): [
b'\x02342Q1000\x00\x00\x00\x00\x00\x00\x00\x0054212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02342Q1100\x00\x00\x00\x00\x00\x00\x00\x0054212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02342Q1200\x00\x00\x00\x00\x00\x00\x00\x0054212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02342Q1300\x00\x00\x00\x00\x00\x00\x00\x0054212000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02342Q2000\x00\x00\x00\x00\x00\x00\x00\x0054213000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02342Q2100\x00\x00\x00\x00\x00\x00\x00\x0054213000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02342Q2200\x00\x00\x00\x00\x00\x00\x00\x0054213000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02342Q4000\x00\x00\x00\x00\x00\x00\x00\x0054215000\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B42063\x00\x00\x00\x00\x00\x00',
b'8965B42073\x00\x00\x00\x00\x00\x00',
b'8965B42082\x00\x00\x00\x00\x00\x00',
b'8965B42083\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F15260R102\x00\x00\x00\x00\x00\x00',
b'F15260R103\x00\x00\x00\x00\x00\x00',
b'F152642493\x00\x00\x00\x00\x00\x00',
b'F152642492\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881514201200\x00\x00\x00\x00',
b'881514201300\x00\x00\x00\x00',
b'881514201400\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702000\x00\x00\x00\x00',
b'8821F4702100\x00\x00\x00\x00',
b'8821F4702300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F4201200\x00\x00\x00\x00',
b'8646F4202001\x00\x00\x00\x00',
b'8646F4202100\x00\x00\x00\x00',
b'8646F4204000\x00\x00\x00\x00',
],
},
CAR.RAV4H: {
(Ecu.engine, 0x7e0, None): [
b'\x02342N9000\x00\x00\x00\x00\x00\x00\x00\x00A4701000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02342N9100\x00\x00\x00\x00\x00\x00\x00\x00A4701000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02342P0000\x00\x00\x00\x00\x00\x00\x00\x00A4701000\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B42102\x00\x00\x00\x00\x00\x00',
b'8965B42103\x00\x00\x00\x00\x00\x00',
b'8965B42112\x00\x00\x00\x00\x00\x00',
b'8965B42162\x00\x00\x00\x00\x00\x00',
b'8965B42163\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152642090\x00\x00\x00\x00\x00\x00',
b'F152642110\x00\x00\x00\x00\x00\x00',
b'F152642120\x00\x00\x00\x00\x00\x00',
b'F152642400\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881514202200\x00\x00\x00\x00',
b'881514202300\x00\x00\x00\x00',
b'881514202400\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702000\x00\x00\x00\x00',
b'8821F4702100\x00\x00\x00\x00',
b'8821F4702300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F4201100\x00\x00\x00\x00',
b'8646F4201200\x00\x00\x00\x00',
b'8646F4202001\x00\x00\x00\x00',
b'8646F4202100\x00\x00\x00\x00',
b'8646F4204000\x00\x00\x00\x00',
],
},
CAR.RAV4_TSS2: {
(Ecu.engine, 0x700, None): [
b'\x01896630R58000\x00\x00\x00\x00',
b'\x01896630R58100\x00\x00\x00\x00',
b'\x018966342E2000\x00\x00\x00\x00',
b'\x018966342M8000\x00\x00\x00\x00',
b'\x018966342S9000\x00\x00\x00\x00',
b'\x018966342T1000\x00\x00\x00\x00',
b'\x018966342T6000\x00\x00\x00\x00',
b'\x018966342T9000\x00\x00\x00\x00',
b'\x018966342U4000\x00\x00\x00\x00',
b'\x018966342U4100\x00\x00\x00\x00',
b'\x018966342V3000\x00\x00\x00\x00',
b'\x018966342V3100\x00\x00\x00\x00',
b'\x018966342V3200\x00\x00\x00\x00',
b'\x01896634A05000\x00\x00\x00\x00',
b'\x01896634A19000\x00\x00\x00\x00',
b'\x01896634A19100\x00\x00\x00\x00',
b'\x01896634A20000\x00\x00\x00\x00',
b'\x01896634A20100\x00\x00\x00\x00',
b'\x01896634A22000\x00\x00\x00\x00',
b'\x01896634A22100\x00\x00\x00\x00',
b'\x01896634A30000\x00\x00\x00\x00',
b'\x01896634A44000\x00\x00\x00\x00',
b'\x01896634A45000\x00\x00\x00\x00',
b'\x01896634A46000\x00\x00\x00\x00',
b'\x028966342M7000\x00\x00\x00\x00897CF1201001\x00\x00\x00\x00',
b'\x028966342T0000\x00\x00\x00\x00897CF1201001\x00\x00\x00\x00',
b'\x028966342V1000\x00\x00\x00\x00897CF1202001\x00\x00\x00\x00',
b'\x028966342Y8000\x00\x00\x00\x00897CF1201001\x00\x00\x00\x00',
b'\x02896634A18000\x00\x00\x00\x00897CF1201001\x00\x00\x00\x00',
b'\x02896634A18100\x00\x00\x00\x00897CF1201001\x00\x00\x00\x00',
b'\x02896634A43000\x00\x00\x00\x00897CF4201001\x00\x00\x00\x00',
b'\x02896634A47000\x00\x00\x00\x00897CF4201001\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'\x01F15260R210\x00\x00\x00\x00\x00\x00',
b'\x01F15260R220\x00\x00\x00\x00\x00\x00',
b'\x01F15260R290\x00\x00\x00\x00\x00\x00',
b'\x01F15260R300\x00\x00\x00\x00\x00\x00',
b'\x01F152642551\x00\x00\x00\x00\x00\x00',
b'\x01F152642561\x00\x00\x00\x00\x00\x00',
b'\x01F152642700\x00\x00\x00\x00\x00\x00',
b'\x01F152642701\x00\x00\x00\x00\x00\x00',
b'\x01F152642710\x00\x00\x00\x00\x00\x00',
b'\x01F152642711\x00\x00\x00\x00\x00\x00',
b'\x01F152642750\x00\x00\x00\x00\x00\x00',
b'\x01F152642751\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B42170\x00\x00\x00\x00\x00\x00',
b'8965B42171\x00\x00\x00\x00\x00\x00',
b'8965B42180\x00\x00\x00\x00\x00\x00',
b'8965B42181\x00\x00\x00\x00\x00\x00',
b'\x028965B0R01200\x00\x00\x00\x008965B0R02200\x00\x00\x00\x00',
b'\x028965B0R01300\x00\x00\x00\x008965B0R02300\x00\x00\x00\x00',
b'\x028965B0R01400\x00\x00\x00\x008965B0R02400\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301100\x00\x00\x00\x00',
b'\x018821F3301200\x00\x00\x00\x00',
b'\x018821F3301300\x00\x00\x00\x00',
b'\x018821F3301400\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F4203200\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F4203300\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F4203400\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F4203500\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F4203700\x00\x00\x00\x008646G2601400\x00\x00\x00\x00',
b'\x028646F4203800\x00\x00\x00\x008646G2601500\x00\x00\x00\x00',
],
},
CAR.RAV4H_TSS2: {
(Ecu.engine, 0x700, None): [
b'\x01896634A15000\x00\x00\x00\x00',
b'\x018966342M5000\x00\x00\x00\x00',
b'\x018966342W8000\x00\x00\x00\x00',
b'\x018966342X5000\x00\x00\x00\x00',
b'\x018966342X6000\x00\x00\x00\x00',
b'\x01896634A25000\x00\x00\x00\x00',
b'\x018966342W5000\x00\x00\x00\x00',
b'\x028966342W4001\x00\x00\x00\x00897CF1203001\x00\x00\x00\x00',
b'\x02896634A13001\x00\x00\x00\x00897CF4801001\x00\x00\x00\x00',
b'\x02896634A13101\x00\x00\x00\x00897CF4801001\x00\x00\x00\x00',
b'\x02896634A14001\x00\x00\x00\x00897CF1203001\x00\x00\x00\x00',
b'\x02896634A23001\x00\x00\x00\x00897CF1203001\x00\x00\x00\x00',
b'\x02896634A14001\x00\x00\x00\x00897CF4801001\x00\x00\x00\x00',
b'\x02896634A14101\x00\x00\x00\x00897CF4801001\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152642291\x00\x00\x00\x00\x00\x00',
b'F152642290\x00\x00\x00\x00\x00\x00',
b'F152642330\x00\x00\x00\x00\x00\x00',
b'F152642331\x00\x00\x00\x00\x00\x00',
b'F152642531\x00\x00\x00\x00\x00\x00',
b'F152642532\x00\x00\x00\x00\x00\x00',
b'F152642520\x00\x00\x00\x00\x00\x00',
b'F152642521\x00\x00\x00\x00\x00\x00',
b'F152642540\x00\x00\x00\x00\x00\x00',
b'F152642541\x00\x00\x00\x00\x00\x00',
b'F152642542\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B42170\x00\x00\x00\x00\x00\x00',
b'8965B42171\x00\x00\x00\x00\x00\x00',
b'8965B42180\x00\x00\x00\x00\x00\x00',
b'8965B42181\x00\x00\x00\x00\x00\x00',
b'\x028965B0R01200\x00\x00\x00\x008965B0R02200\x00\x00\x00\x00',
b'\x028965B0R01300\x00\x00\x00\x008965B0R02300\x00\x00\x00\x00',
b'\x028965B0R01400\x00\x00\x00\x008965B0R02400\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301100\x00\x00\x00\x00',
b'\x018821F3301200\x00\x00\x00\x00',
b'\x018821F3301300\x00\x00\x00\x00',
b'\x018821F3301400\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F4203200\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F4203300\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F4203400\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F4203500\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F4203700\x00\x00\x00\x008646G2601400\x00\x00\x00\x00',
b'\x028646F4203800\x00\x00\x00\x008646G2601500\x00\x00\x00\x00',
],
},
CAR.LEXUS_ES_TSS2: {
(Ecu.engine, 0x700, None): [
b'\x01896630EC9100\x00\x00\x00\x00',
b'\x018966333T5000\x00\x00\x00\x00',
b'\x018966333T5100\x00\x00\x00\x00',
b'\x018966333X6000\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'\x01F152606281\x00\x00\x00\x00\x00\x00',
b'\x01F152606340\x00\x00\x00\x00\x00\x00',
b'\x01F15260E031\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B33252\x00\x00\x00\x00\x00\x00',
b'8965B33590\x00\x00\x00\x00\x00\x00',
b'8965B48271\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301100\x00\x00\x00\x00',
b'\x018821F3301200\x00\x00\x00\x00',
b'\x018821F3301400\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F33030D0\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F3303200\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F3304100\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F4810200\x00\x00\x00\x008646G2601400\x00\x00\x00\x00',
],
},
CAR.SIENNA: {
(Ecu.engine, 0x700, None): [
b'\x01896630832100\x00\x00\x00\x00',
b'\x01896630832200\x00\x00\x00\x00',
b'\x01896630838000\x00\x00\x00\x00',
b'\x01896630838100\x00\x00\x00\x00',
b'\x01896630842000\x00\x00\x00\x00',
b'\x01896630843000\x00\x00\x00\x00',
b'\x01896630851000\x00\x00\x00\x00',
b'\x01896630851100\x00\x00\x00\x00',
b'\x01896630852100\x00\x00\x00\x00',
b'\x01896630859000\x00\x00\x00\x00',
b'\x01896630860000\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B45070\x00\x00\x00\x00\x00\x00',
b'8965B45080\x00\x00\x00\x00\x00\x00',
b'8965B45082\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152608130\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881510801100\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702100\x00\x00\x00\x00',
b'8821F4702200\x00\x00\x00\x00',
b'8821F4702300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F0801100\x00\x00\x00\x00',
],
},
CAR.LEXUS_CTH: {
(Ecu.dsu, 0x791, None): [
b'881517601100\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152676144\x00\x00\x00\x00\x00\x00',
],
(Ecu.engine, 0x7e0, None): [
b'\x0237635000\x00\x00\x00\x00\x00\x00\x00\x00A4701000\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F7601100\x00\x00\x00\x00',
],
},
CAR.LEXUS_ESH_TSS2: {
(Ecu.engine, 0x700, None): [
b'\x028966333S8000\x00\x00\x00\x00897CF3302002\x00\x00\x00\x00',
b'\x028966333T0100\x00\x00\x00\x00897CF3305001\x00\x00\x00\x00',
b'\x028966333V4000\x00\x00\x00\x00897CF3305001\x00\x00\x00\x00',
b'\x02896633T09000\x00\x00\x00\x00897CF3307001\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152633423\x00\x00\x00\x00\x00\x00',
b'F152633680\x00\x00\x00\x00\x00\x00',
b'F152633681\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B33252\x00\x00\x00\x00\x00\x00',
b'8965B33590\x00\x00\x00\x00\x00\x00',
b'8965B33690\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301100\x00\x00\x00\x00',
b'\x018821F3301200\x00\x00\x00\x00',
b'\x018821F3301300\x00\x00\x00\x00',
b'\x018821F3301400\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F33030D0\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F3303100\x00\x00\x00\x008646G26011A0\x00\x00\x00\x00',
b'\x028646F3304100\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F3304200\x00\x00\x00\x008646G2601400\x00\x00\x00\x00',
],
},
CAR.LEXUS_ESH: {
(Ecu.engine, 0x7e0, None): [
b'\x02333M4200\x00\x00\x00\x00\x00\x00\x00\x00A4701000\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152633171\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881513310400\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B33512\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4701100\x00\x00\x00\x00',
b'8821F4701300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F3302001\x00\x00\x00\x00',
b'8646F3302200\x00\x00\x00\x00',
],
},
CAR.LEXUS_NX: {
(Ecu.engine, 0x700, None): [
b'\x01896637851000\x00\x00\x00\x00',
b'\x01896637852000\x00\x00\x00\x00',
b'\x01896637854000\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152678130\x00\x00\x00\x00\x00\x00',
b'F152678140\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881517803100\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B78060\x00\x00\x00\x00\x00\x00',
b'8965B78080\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702100\x00\x00\x00\x00',
b'8821F4702300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F7801100\x00\x00\x00\x00',
],
},
CAR.LEXUS_NX_TSS2: {
(Ecu.engine, 0x700, None): [
b'\x018966378B2100\x00\x00\x00\x00',
b'\x018966378G3000\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'\x01F152678221\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B78120\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b"\x018821F3301400\x00\x00\x00\x00",
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F78030A0\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
b'\x028646F7803100\x00\x00\x00\x008646G2601400\x00\x00\x00\x00',
],
},
CAR.LEXUS_NXH: {
(Ecu.engine, 0x7e0, None): [
b'\x0237882000\x00\x00\x00\x00\x00\x00\x00\x00A4701000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0237841000\x00\x00\x00\x00\x00\x00\x00\x00A4701000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0237886000\x00\x00\x00\x00\x00\x00\x00\x00A4701000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0237880000\x00\x00\x00\x00\x00\x00\x00\x00A4701000\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152678160\x00\x00\x00\x00\x00\x00',
b'F152678170\x00\x00\x00\x00\x00\x00',
b'F152678171\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881517804300\x00\x00\x00\x00',
b'881517804100\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B78060\x00\x00\x00\x00\x00\x00',
b'8965B78080\x00\x00\x00\x00\x00\x00',
b'8965B78100\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4702300\x00\x00\x00\x00',
b'8821F4702100\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F7801300\x00\x00\x00\x00',
b'8646F7801100\x00\x00\x00\x00',
],
},
CAR.LEXUS_RX: {
(Ecu.engine, 0x700, None): [
b'\x01896630E36200\x00\x00\x00\x00',
b'\x01896630E36300\x00\x00\x00\x00',
b'\x01896630E37200\x00\x00\x00\x00',
b'\x01896630E37300\x00\x00\x00\x00',
b'\x01896630E41000\x00\x00\x00\x00',
b'\x01896630E41100\x00\x00\x00\x00',
b'\x01896630E41200\x00\x00\x00\x00',
b'\x01896630EA3100\x00\x00\x00\x00',
b'\x01896630EA4100\x00\x00\x00\x00',
b'\x01896630EA4300\x00\x00\x00\x00',
b'\x01896630EA6300\x00\x00\x00\x00',
b'\x018966348R1300\x00\x00\x00\x00',
b'\x018966348R8500\x00\x00\x00\x00',
b'\x018966348W1300\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152648472\x00\x00\x00\x00\x00\x00',
b'F152648473\x00\x00\x00\x00\x00\x00',
b'F152648492\x00\x00\x00\x00\x00\x00',
b'F152648493\x00\x00\x00\x00\x00\x00',
b'F152648474\x00\x00\x00\x00\x00\x00',
b'F152648630\x00\x00\x00\x00\x00\x00',
b'F152648494\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881514810300\x00\x00\x00\x00',
b'881514810500\x00\x00\x00\x00',
b'881514810700\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B0E011\x00\x00\x00\x00\x00\x00',
b'8965B0E012\x00\x00\x00\x00\x00\x00',
b'8965B48102\x00\x00\x00\x00\x00\x00',
b'8965B48112\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4701000\x00\x00\x00\x00',
b'8821F4701100\x00\x00\x00\x00',
b'8821F4701200\x00\x00\x00\x00',
b'8821F4701300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F4801100\x00\x00\x00\x00',
b'8646F4801200\x00\x00\x00\x00',
b'8646F4802001\x00\x00\x00\x00',
b'8646F4802100\x00\x00\x00\x00',
b'8646F4802200\x00\x00\x00\x00',
b'8646F4809000\x00\x00\x00\x00',
],
},
CAR.LEXUS_RXH: {
(Ecu.engine, 0x7e0, None): [
b'\x02348J7000\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02348N0000\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02348Q4000\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02348Q4100\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02348T1100\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02348T3000\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02348V6000\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x02348Z3000\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152648361\x00\x00\x00\x00\x00\x00',
b'F152648501\x00\x00\x00\x00\x00\x00',
b'F152648502\x00\x00\x00\x00\x00\x00',
b'F152648504\x00\x00\x00\x00\x00\x00',
b'F152648740\x00\x00\x00\x00\x00\x00',
b'F152648A30\x00\x00\x00\x00\x00\x00',
],
(Ecu.dsu, 0x791, None): [
b'881514811300\x00\x00\x00\x00',
b'881514811500\x00\x00\x00\x00',
b'881514811700\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B0E011\x00\x00\x00\x00\x00\x00',
b'8965B0E012\x00\x00\x00\x00\x00\x00',
b'8965B48111\x00\x00\x00\x00\x00\x00',
b'8965B48112\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'8821F4701000\x00\x00\x00\x00',
b'8821F4701100\x00\x00\x00\x00',
b'8821F4701200\x00\x00\x00\x00',
b'8821F4701300\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'8646F4801200\x00\x00\x00\x00',
b'8646F4802001\x00\x00\x00\x00',
b'8646F4802100\x00\x00\x00\x00',
b'8646F4802200\x00\x00\x00\x00',
b'8646F4809000\x00\x00\x00\x00',
],
},
CAR.LEXUS_RX_TSS2: {
(Ecu.engine, 0x700, None): [
b'\x01896630EC9000\x00\x00\x00\x00',
b'\x01896634D12000\x00\x00\x00\x00',
b'\x01896630EB0000\x00\x00\x00\x00',
b'\x01896630EA9000\x00\x00\x00\x00',
b'\x01896630ED0000\x00\x00\x00\x00',
b'\x018966348W9000\x00\x00\x00\x00',
b'\x01896634D12100\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'\x01F152648801\x00\x00\x00\x00\x00\x00',
b'\x01F15260E031\x00\x00\x00\x00\x00\x00',
b'\x01F15260E041\x00\x00\x00\x00\x00\x00',
b'\x01F152648781\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B48271\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301100\x00\x00\x00\x00',
b'\x018821F3301300\x00\x00\x00\x00',
b'\x018821F3301400\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F4810200\x00\x00\x00\x008646G2601400\x00\x00\x00\x00',
b'\x028646F4810100\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
],
},
CAR.LEXUS_RXH_TSS2: {
(Ecu.engine, 0x7e0, None): [
b'\x02348X8000\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0234D14000\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
b'\x0234D16000\x00\x00\x00\x00\x00\x00\x00\x00A4802000\x00\x00\x00\x00\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152648831\x00\x00\x00\x00\x00\x00',
b'F152648D00\x00\x00\x00\x00\x00\x00',
b'F152648D60\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B48271\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301400\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F4810200\x00\x00\x00\x008646G2601400\x00\x00\x00\x00',
b'\x028646F4810100\x00\x00\x00\x008646G2601200\x00\x00\x00\x00',
],
},
CAR.PRIUS_TSS2: {
(Ecu.engine, 0x700, None): [
b'\x028966347C8000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00',
b'\x038966347C0000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4710101\x00\x00\x00\x00',
b'\x038966347C1000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4710101\x00\x00\x00\x00',
b'\x038966347C5000\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4707101\x00\x00\x00\x00',
b'\x038966347C5100\x00\x00\x00\x008966A4703000\x00\x00\x00\x00897CF4707101\x00\x00\x00\x00',
],
(Ecu.esp, 0x7b0, None): [
b'F152647500\x00\x00\x00\x00\x00\x00',
b'F152647510\x00\x00\x00\x00\x00\x00',
b'F152647520\x00\x00\x00\x00\x00\x00',
b'F152647521\x00\x00\x00\x00\x00\x00',
],
(Ecu.eps, 0x7a1, None): [
b'8965B47070\x00\x00\x00\x00\x00\x00',
],
(Ecu.fwdRadar, 0x750, 0xf): [
b'\x018821F3301400\x00\x00\x00\x00',
],
(Ecu.fwdCamera, 0x750, 0x6d): [
b'\x028646F4707000\x00\x00\x00\x008646G2601400\x00\x00\x00\x00',
b'\x028646F4710000\x00\x00\x00\x008646G2601500\x00\x00\x00\x00',
],
},
CAR.MIRAI: {
(Ecu.esp, 0x7D1, None): [b'\x01898A36203000\x00\x00\x00\x00',],
(Ecu.esp, 0x7B0, None): [b'\x01F15266203200\x00\x00\x00\x00',], # a second ESP ECU
(Ecu.eps, 0x7A1, None): [b'\x028965B6204100\x00\x00\x00\x008965B6203100\x00\x00\x00\x00',],
(Ecu.fwdRadar, 0x750, 0xf): [b'\x018821F6201200\x00\x00\x00\x00',],
(Ecu.fwdCamera, 0x750, 0x6d): [b'\x028646F6201400\x00\x00\x00\x008646G5301200\x00\x00\x00\x00',],
},
CAR.ALPHARD_TSS2: {
(Ecu.engine, 0x7e0, None): [b'\x0235883000\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00',],
(Ecu.eps, 0x7a1, None): [b'8965B58040\x00\x00\x00\x00\x00\x00',],
(Ecu.fwdRadar, 0x750, 0xf): [b'\x018821F3301400\x00\x00\x00\x00',],
(Ecu.fwdCamera, 0x750, 0x6d): [b'\x028646F5803200\x00\x00\x00\x008646G2601400\x00\x00\x00\x00',],
},
}
STEER_THRESHOLD = 100
DBC = {
CAR.RAV4H: dbc_dict('toyota_rav4_hybrid_2017_pt_generated', 'toyota_adas'),
CAR.RAV4: dbc_dict('toyota_rav4_2017_pt_generated', 'toyota_adas'),
CAR.PRIUS: dbc_dict('toyota_prius_2017_pt_generated', 'toyota_adas'),
CAR.COROLLA: dbc_dict('toyota_corolla_2017_pt_generated', 'toyota_adas'),
CAR.LEXUS_RX: dbc_dict('lexus_rx_350_2016_pt_generated', 'toyota_adas'),
CAR.LEXUS_RXH: dbc_dict('lexus_rx_hybrid_2017_pt_generated', 'toyota_adas'),
CAR.LEXUS_RX_TSS2: dbc_dict('toyota_nodsu_pt_generated', 'toyota_tss2_adas'),
CAR.LEXUS_RXH_TSS2: dbc_dict('toyota_nodsu_hybrid_pt_generated', 'toyota_tss2_adas'),
CAR.CHR: dbc_dict('toyota_nodsu_pt_generated', 'toyota_adas'),
CAR.CHRH: dbc_dict('toyota_nodsu_hybrid_pt_generated', 'toyota_adas'),
CAR.CAMRY: dbc_dict('toyota_nodsu_pt_generated', 'toyota_adas'),
CAR.CAMRYH: dbc_dict('toyota_camry_hybrid_2018_pt_generated', 'toyota_adas'),
CAR.CAMRY_TSS2: dbc_dict('toyota_nodsu_pt_generated', 'toyota_tss2_adas'),
CAR.CAMRYH_TSS2: dbc_dict('toyota_nodsu_hybrid_pt_generated', 'toyota_tss2_adas'),
CAR.HIGHLANDER: dbc_dict('toyota_highlander_2017_pt_generated', 'toyota_adas'),
CAR.HIGHLANDER_TSS2: dbc_dict('toyota_nodsu_pt_generated', 'toyota_tss2_adas'),
CAR.HIGHLANDERH: dbc_dict('toyota_highlander_hybrid_2018_pt_generated', 'toyota_adas'),
CAR.HIGHLANDERH_TSS2: dbc_dict('toyota_nodsu_hybrid_pt_generated', 'toyota_tss2_adas'),
CAR.AVALON: dbc_dict('toyota_avalon_2017_pt_generated', 'toyota_adas'),
CAR.AVALON_2019: dbc_dict('toyota_nodsu_pt_generated', 'toyota_adas'),
CAR.AVALONH_2019: dbc_dict('toyota_nodsu_hybrid_pt_generated', 'toyota_adas'),
CAR.RAV4_TSS2: dbc_dict('toyota_nodsu_pt_generated', 'toyota_tss2_adas'),
CAR.COROLLA_TSS2: dbc_dict('toyota_nodsu_pt_generated', 'toyota_tss2_adas'),
CAR.COROLLAH_TSS2: dbc_dict('toyota_nodsu_hybrid_pt_generated', 'toyota_tss2_adas'),
CAR.LEXUS_ES_TSS2: dbc_dict('toyota_nodsu_pt_generated', 'toyota_tss2_adas'),
CAR.LEXUS_ESH_TSS2: dbc_dict('toyota_nodsu_hybrid_pt_generated', 'toyota_tss2_adas'),
CAR.LEXUS_ESH: dbc_dict('lexus_ct200h_2018_pt_generated', 'toyota_adas'),
CAR.SIENNA: dbc_dict('toyota_sienna_xle_2018_pt_generated', 'toyota_adas'),
CAR.LEXUS_IS: dbc_dict('lexus_is_2018_pt_generated', 'toyota_adas'),
CAR.LEXUS_CTH: dbc_dict('lexus_ct200h_2018_pt_generated', 'toyota_adas'),
CAR.RAV4H_TSS2: dbc_dict('toyota_nodsu_hybrid_pt_generated', 'toyota_tss2_adas'),
CAR.LEXUS_NXH: dbc_dict('lexus_nx300h_2018_pt_generated', 'toyota_adas'),
CAR.LEXUS_NX: dbc_dict('lexus_nx300_2018_pt_generated', 'toyota_adas'),
CAR.LEXUS_NX_TSS2: dbc_dict('toyota_nodsu_pt_generated', 'toyota_tss2_adas'),
CAR.PRIUS_TSS2: dbc_dict('toyota_nodsu_hybrid_pt_generated', 'toyota_tss2_adas'),
CAR.MIRAI: dbc_dict('toyota_nodsu_hybrid_pt_generated', 'toyota_tss2_adas'),
CAR.ALPHARD_TSS2: dbc_dict('toyota_nodsu_pt_generated', 'toyota_tss2_adas'),
CAR.LEXUS_ISH: dbc_dict('lexus_is300h_2017_pt_generated', 'toyota_adas'),
}
# Toyota/Lexus Safety Sense 2.0 and 2.5
TSS2_CAR = set([CAR.RAV4_TSS2, CAR.COROLLA_TSS2, CAR.COROLLAH_TSS2, CAR.LEXUS_ES_TSS2, CAR.LEXUS_ESH_TSS2, CAR.RAV4H_TSS2,
CAR.LEXUS_RX_TSS2, CAR.LEXUS_RXH_TSS2, CAR.HIGHLANDER_TSS2, CAR.HIGHLANDERH_TSS2, CAR.PRIUS_TSS2, CAR.CAMRY_TSS2, CAR.CAMRYH_TSS2,
CAR.MIRAI, CAR.LEXUS_NX_TSS2, CAR.ALPHARD_TSS2])
NO_DSU_CAR = TSS2_CAR | set([CAR.CHR, CAR.CHRH, CAR.CAMRY, CAR.CAMRYH])
# no resume button press required
NO_STOP_TIMER_CAR = TSS2_CAR | set([CAR.RAV4H, CAR.HIGHLANDERH, CAR.HIGHLANDER, CAR.SIENNA, CAR.LEXUS_ESH])
| true | true |
f7f467cd151229f394535966299954701d485511 | 1,216 | py | Python | staffs/urls.py | Landgate/Staff-Calibration | d4a929526604e4de086e3d7f8783ea66a68b4ece | [
"Apache-2.0"
] | 1 | 2021-11-23T05:48:41.000Z | 2021-11-23T05:48:41.000Z | staffs/urls.py | Landgate/Staff-Calibration | d4a929526604e4de086e3d7f8783ea66a68b4ece | [
"Apache-2.0"
] | null | null | null | staffs/urls.py | Landgate/Staff-Calibration | d4a929526604e4de086e3d7f8783ea66a68b4ece | [
"Apache-2.0"
] | null | null | null | from django.urls import path
from . import views
app_name = 'staffs'
urlpatterns = [
# path('', views.index, name='home-page'),
path('', views.staff_list, name = 'staff-list'),
path('stafftype/', views.stafftype_list, name = 'stafftype-list'),
path('levels/', views.level_list, name = 'level-list'),
path('stafftype_create/', views.stafftype_create, name = 'stafftype-create'),
path('staff_create/', views.staff_create, name = 'staff-create'),
path('levels/create/', views.level_create, name = 'level-create'),
path('stafftype/<id>/', views.stafftype_detail, name = 'stafftype-detail'),
path('levels/<id>/delete', views.level_delete, name = 'level-delete'),
path('levels/<id>/', views.level_detail, name = 'level-detail'),
path('stafftype/<id>/delete', views.stafftype_delete, name = 'stafftype-delete'),
path('stafftype/<id>/update', views.stafftype_update, name = 'stafftype-update'),
path('levels/<id>/update', views.level_update, name = 'level-update'),
# path('<id>/', views.staff_detail, name = 'staff-detail'),
path('<id>/update', views.staff_update, name = 'staff-update'),
path('<id>/delete', views.staff_delete, name = 'staff-delete'),
]
| 46.769231 | 85 | 0.667763 | from django.urls import path
from . import views
app_name = 'staffs'
urlpatterns = [
path('', views.staff_list, name = 'staff-list'),
path('stafftype/', views.stafftype_list, name = 'stafftype-list'),
path('levels/', views.level_list, name = 'level-list'),
path('stafftype_create/', views.stafftype_create, name = 'stafftype-create'),
path('staff_create/', views.staff_create, name = 'staff-create'),
path('levels/create/', views.level_create, name = 'level-create'),
path('stafftype/<id>/', views.stafftype_detail, name = 'stafftype-detail'),
path('levels/<id>/delete', views.level_delete, name = 'level-delete'),
path('levels/<id>/', views.level_detail, name = 'level-detail'),
path('stafftype/<id>/delete', views.stafftype_delete, name = 'stafftype-delete'),
path('stafftype/<id>/update', views.stafftype_update, name = 'stafftype-update'),
path('levels/<id>/update', views.level_update, name = 'level-update'),
path('<id>/update', views.staff_update, name = 'staff-update'),
path('<id>/delete', views.staff_delete, name = 'staff-delete'),
]
| true | true |
f7f4682bb1ff9d2905f7276a06d5ff73ee1451f3 | 618 | py | Python | HomeComponents/PathImage.py | Snake-GGJJWP/osr2mp4-app | cee93a29ed6b426b33e5d849b8679075cc60456b | [
"MIT"
] | null | null | null | HomeComponents/PathImage.py | Snake-GGJJWP/osr2mp4-app | cee93a29ed6b426b33e5d849b8679075cc60456b | [
"MIT"
] | null | null | null | HomeComponents/PathImage.py | Snake-GGJJWP/osr2mp4-app | cee93a29ed6b426b33e5d849b8679075cc60456b | [
"MIT"
] | null | null | null | from PyQt5.QtWidgets import QLabel
from Parents import Button, PathImage
class OsrPath(PathImage):
def __init__(self, parent):
super(OsrPath, self).__init__(parent)
self.default_x = 544
self.default_y = 165
self.default_size = 4.5
self.img = "res/OsrPath.png"
self.img_shadow = "res/OsrPath_Shadow.png"
super().setup()
class MapSetPath(PathImage):
def __init__(self, parent):
super(MapSetPath, self).__init__(parent)
self.default_x = 544
self.default_y = 200
self.default_size = 4.5
self.img = "res/MapsetPath.png"
self.img_shadow = "res/MapsetPath_Shadow.png"
super().setup()
| 19.3125 | 47 | 0.721683 | from PyQt5.QtWidgets import QLabel
from Parents import Button, PathImage
class OsrPath(PathImage):
def __init__(self, parent):
super(OsrPath, self).__init__(parent)
self.default_x = 544
self.default_y = 165
self.default_size = 4.5
self.img = "res/OsrPath.png"
self.img_shadow = "res/OsrPath_Shadow.png"
super().setup()
class MapSetPath(PathImage):
def __init__(self, parent):
super(MapSetPath, self).__init__(parent)
self.default_x = 544
self.default_y = 200
self.default_size = 4.5
self.img = "res/MapsetPath.png"
self.img_shadow = "res/MapsetPath_Shadow.png"
super().setup()
| true | true |
f7f468a38ca608590b664dbccab5f34a070f450e | 2,315 | py | Python | dependencies/amitools-0.1.0/amitools/vamos/lib/lexec/SemaphoreManager.py | limi/AGSImager | d3771800308e61a7a07df4a9b361e5bd5ba9e409 | [
"MIT"
] | null | null | null | dependencies/amitools-0.1.0/amitools/vamos/lib/lexec/SemaphoreManager.py | limi/AGSImager | d3771800308e61a7a07df4a9b361e5bd5ba9e409 | [
"MIT"
] | null | null | null | dependencies/amitools-0.1.0/amitools/vamos/lib/lexec/SemaphoreManager.py | limi/AGSImager | d3771800308e61a7a07df4a9b361e5bd5ba9e409 | [
"MIT"
] | null | null | null | from ExecStruct import SignalSemaphoreDef
from amitools.vamos.AccessStruct import AccessStruct
from amitools.vamos.Exceptions import *
class SemaphoreManager:
NT_SIGNALSEM = 15
def __init__(self, alloc, mem):
self.alloc = alloc
self.mem = mem
self.semaphores = {}
self.semaphores_by_name = {}
def InitSemaphore(self,addr):
semaphore = AccessStruct(self.mem,SignalSemaphoreDef,struct_addr=addr)
semaphore.w_s("ss_Owner",0)
semaphore.w_s("ss_NestCount",0)
semaphore.w_s("ss_QueueCount",0xffff)
semaphore.w_s("ss_Link.ln_Type",self.NT_SIGNALSEM)
semaphore.w_s("ss_WaitQueue.mlh_Head",semaphore.s_get_addr("ss_WaitQueue.mlh_Tail"))
semaphore.w_s("ss_WaitQueue.mlh_Tail",0)
semaphore.w_s("ss_WaitQueue.mlh_TailPred",semaphore.s_get_addr("ss_WaitQueue.mlh_Head"))
return self.register_semaphore(addr)
def AddSemaphore(self,addr,name):
semaphore = self.InitSemaphore(addr)
semaphore.name = name
self.semaphores_by_name[name] = semaphore
return semaphore
def RemSemaphore(self,addr):
if self.has_semaphore(addr):
semaphore = self.semaphores[addr]
del self.semaphores_by_name[semaphore.name]
def FindSemaphore(self,name):
if name in self.semaphores_by_name:
semaphore = self.semaphores_by_name[name]
return semaphore
else:
return None
def has_semaphore(self, addr):
return addr in self.semaphores
def register_semaphore(self,addr):
if not self.has_semaphore(addr):
name = "Semaphore@%06x" % addr
semaphore = Semaphore(name,self,addr = addr)
self.semaphores[addr] = semaphore
return semaphore
else:
return self.semaphores[addr]
def unregister_semaphore(self, addr):
if addr in self.semaphores:
semaphore = self.semaphores[addr]
del self.semaphores_by_name[semaphore.name]
del self.semaphores[addr]
else:
raise VamosInternalError("Invalid Semaphore remove: %06x" % addr)
class Semaphore:
def __init__(self, name, semaphore_mgr, addr=None, mem=None):
self.name = name
self.semaphore_mgr = semaphore_mgr
if mem is None:
self.addr = addr
else:
self.addr = mem.addr
self.mem = mem
def __str__(self):
return "<Semaphore:name=%s,addr=%06x>" % (self.name, self.addr)
| 30.460526 | 92 | 0.708423 | from ExecStruct import SignalSemaphoreDef
from amitools.vamos.AccessStruct import AccessStruct
from amitools.vamos.Exceptions import *
class SemaphoreManager:
NT_SIGNALSEM = 15
def __init__(self, alloc, mem):
self.alloc = alloc
self.mem = mem
self.semaphores = {}
self.semaphores_by_name = {}
def InitSemaphore(self,addr):
semaphore = AccessStruct(self.mem,SignalSemaphoreDef,struct_addr=addr)
semaphore.w_s("ss_Owner",0)
semaphore.w_s("ss_NestCount",0)
semaphore.w_s("ss_QueueCount",0xffff)
semaphore.w_s("ss_Link.ln_Type",self.NT_SIGNALSEM)
semaphore.w_s("ss_WaitQueue.mlh_Head",semaphore.s_get_addr("ss_WaitQueue.mlh_Tail"))
semaphore.w_s("ss_WaitQueue.mlh_Tail",0)
semaphore.w_s("ss_WaitQueue.mlh_TailPred",semaphore.s_get_addr("ss_WaitQueue.mlh_Head"))
return self.register_semaphore(addr)
def AddSemaphore(self,addr,name):
semaphore = self.InitSemaphore(addr)
semaphore.name = name
self.semaphores_by_name[name] = semaphore
return semaphore
def RemSemaphore(self,addr):
if self.has_semaphore(addr):
semaphore = self.semaphores[addr]
del self.semaphores_by_name[semaphore.name]
def FindSemaphore(self,name):
if name in self.semaphores_by_name:
semaphore = self.semaphores_by_name[name]
return semaphore
else:
return None
def has_semaphore(self, addr):
return addr in self.semaphores
def register_semaphore(self,addr):
if not self.has_semaphore(addr):
name = "Semaphore@%06x" % addr
semaphore = Semaphore(name,self,addr = addr)
self.semaphores[addr] = semaphore
return semaphore
else:
return self.semaphores[addr]
def unregister_semaphore(self, addr):
if addr in self.semaphores:
semaphore = self.semaphores[addr]
del self.semaphores_by_name[semaphore.name]
del self.semaphores[addr]
else:
raise VamosInternalError("Invalid Semaphore remove: %06x" % addr)
class Semaphore:
def __init__(self, name, semaphore_mgr, addr=None, mem=None):
self.name = name
self.semaphore_mgr = semaphore_mgr
if mem is None:
self.addr = addr
else:
self.addr = mem.addr
self.mem = mem
def __str__(self):
return "<Semaphore:name=%s,addr=%06x>" % (self.name, self.addr)
| true | true |
f7f46912996ab22bb0a1986cf67d8293a3079047 | 364 | py | Python | _bak/v0.2.0/Util/Message.py | hello-sea/DeepLearning_Wavelet-LSTM | 1606c16005a5338333b4943f782f57311c6b5e49 | [
"MIT"
] | 95 | 2018-04-13T03:34:51.000Z | 2022-03-30T10:10:28.000Z | _bak/v0.2.0/Util/Message.py | Dlaiven/DeepLearning_Wavelet-LSTM | 1606c16005a5338333b4943f782f57311c6b5e49 | [
"MIT"
] | 3 | 2019-07-18T11:19:53.000Z | 2020-12-28T05:45:19.000Z | _bak/v0.2.0/Util/Message.py | Dlaiven/DeepLearning_Wavelet-LSTM | 1606c16005a5338333b4943f782f57311c6b5e49 | [
"MIT"
] | 35 | 2018-07-27T09:21:18.000Z | 2021-11-30T02:13:01.000Z | # -*- coding: utf-8 -*-
class Message():
def __init__(self):
self.dict = {
# 正常
0 : '0',
# 软件运行错误
'Error(0001)': 'Error(0001): 软件运行错误,请连续管理员!',
# 文件数据错误
'Error(1000)': 'Error(1000): Seg文件读取错误,请检测Seg数据!',
'Error(1001)': 'Error(1001): Seg文件数据为空,请检测Seg数据!',
}
| 22.75 | 62 | 0.442308 |
class Message():
def __init__(self):
self.dict = {
0 : '0',
'Error(0001)': 'Error(0001): 软件运行错误,请连续管理员!',
'Error(1000)': 'Error(1000): Seg文件读取错误,请检测Seg数据!',
'Error(1001)': 'Error(1001): Seg文件数据为空,请检测Seg数据!',
}
| true | true |
f7f469142409e4dc1d3fc30988bf4b7a4cbd4538 | 1,231 | py | Python | analyze_results.py | tawatts1/chess | cb2917ec689bb8db1dc2436ed2ef6463319876a7 | [
"MIT"
] | null | null | null | analyze_results.py | tawatts1/chess | cb2917ec689bb8db1dc2436ed2ef6463319876a7 | [
"MIT"
] | null | null | null | analyze_results.py | tawatts1/chess | cb2917ec689bb8db1dc2436ed2ef6463319876a7 | [
"MIT"
] | null | null | null | import os
import numpy as np
import matplotlib.pyplot as plt
def get_data(fname):
out = []
with open(fname, 'r') as f:
for line in f:
datum = []
for entry in line.split(','):
datum.append(float(entry))
out.append(datum)
return np.array(out)
files = os.listdir("game_results")
for file in files:
if file[0] != '.':
print(file)
data = get_data(f"game_results/{file}")
fig, (ax1, ax2) = plt.subplots(1,2)
fig.suptitle(file)
mean = np.mean(data[:,0])
print(np.shape(data))
# deviation for 95 pct confidence interval:
dev = 1.96*np.std(data[:,0])/ np.sqrt( np.shape(data)[0] )
c0,c1 = mean-dev, mean+dev
ax1.hist(data[:,0])
ax1.set_title("White performance")
#ax1.figtext(.5,.01,f"{file} and such and such")
ax2.hist(data[:,1])
ax2.set_title("Game length")
#plt.figtext(.5,.01,f"{file} and such and such")
plt.figtext(.5,.03,f"The mean of white's performance is {mean:.3f}, with CI ({c0:.3f}, {c1:.3f}). ", wrap=True, ha="center")
plt.savefig("images/" + file+".png", dpi = 300)
#plt.show()
| 29.309524 | 133 | 0.538587 | import os
import numpy as np
import matplotlib.pyplot as plt
def get_data(fname):
out = []
with open(fname, 'r') as f:
for line in f:
datum = []
for entry in line.split(','):
datum.append(float(entry))
out.append(datum)
return np.array(out)
files = os.listdir("game_results")
for file in files:
if file[0] != '.':
print(file)
data = get_data(f"game_results/{file}")
fig, (ax1, ax2) = plt.subplots(1,2)
fig.suptitle(file)
mean = np.mean(data[:,0])
print(np.shape(data))
dev = 1.96*np.std(data[:,0])/ np.sqrt( np.shape(data)[0] )
c0,c1 = mean-dev, mean+dev
ax1.hist(data[:,0])
ax1.set_title("White performance")
ax2.hist(data[:,1])
ax2.set_title("Game length")
plt.figtext(.5,.03,f"The mean of white's performance is {mean:.3f}, with CI ({c0:.3f}, {c1:.3f}). ", wrap=True, ha="center")
plt.savefig("images/" + file+".png", dpi = 300)
#plt.show()
| true | true |
f7f469558fc3165e13feacf537a2a5e512eb5e76 | 81,670 | py | Python | ironic/tests/unit/common/test_pxe_utils.py | mpardhi23/ironic | 66b07398310db1b4c26e1ba9fda247328478ed67 | [
"Apache-2.0"
] | null | null | null | ironic/tests/unit/common/test_pxe_utils.py | mpardhi23/ironic | 66b07398310db1b4c26e1ba9fda247328478ed67 | [
"Apache-2.0"
] | null | null | null | ironic/tests/unit/common/test_pxe_utils.py | mpardhi23/ironic | 66b07398310db1b4c26e1ba9fda247328478ed67 | [
"Apache-2.0"
] | null | null | null | #
# Copyright 2014 Rackspace, Inc
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import tempfile
from ironic_lib import utils as ironic_utils
import mock
from oslo_config import cfg
from oslo_utils import fileutils
from oslo_utils import uuidutils
import six
from ironic.common import exception
from ironic.common.glance_service import image_service
from ironic.common import pxe_utils
from ironic.common import states
from ironic.common import utils
from ironic.conductor import task_manager
from ironic.drivers.modules import deploy_utils
from ironic.drivers.modules import ipxe
from ironic.drivers.modules import pxe
from ironic.tests.unit.db import base as db_base
from ironic.tests.unit.db import utils as db_utils
from ironic.tests.unit.objects import utils as object_utils
CONF = cfg.CONF
INST_INFO_DICT = db_utils.get_test_pxe_instance_info()
DRV_INFO_DICT = db_utils.get_test_pxe_driver_info()
DRV_INTERNAL_INFO_DICT = db_utils.get_test_pxe_driver_internal_info()
# Prevent /httpboot validation on creating the node
@mock.patch('ironic.drivers.modules.pxe.PXEBoot.__init__', lambda self: None)
class TestPXEUtils(db_base.DbTestCase):
def setUp(self):
super(TestPXEUtils, self).setUp()
self.pxe_options = {
'deployment_aki_path': u'/tftpboot/1be26c0b-03f2-4d2e-ae87-'
u'c02d7f33c123/deploy_kernel',
'aki_path': u'/tftpboot/1be26c0b-03f2-4d2e-ae87-c02d7f33c123/'
u'kernel',
'ari_path': u'/tftpboot/1be26c0b-03f2-4d2e-ae87-c02d7f33c123/'
u'ramdisk',
'pxe_append_params': 'test_param',
'deployment_ari_path': u'/tftpboot/1be26c0b-03f2-4d2e-ae87-c02d7'
u'f33c123/deploy_ramdisk',
'ipa-api-url': 'http://192.168.122.184:6385',
'ipxe_timeout': 0,
'ramdisk_opts': 'ramdisk_param',
}
self.ipxe_options = self.pxe_options.copy()
self.ipxe_options.update({
'deployment_aki_path': 'http://1.2.3.4:1234/deploy_kernel',
'deployment_ari_path': 'http://1.2.3.4:1234/deploy_ramdisk',
'aki_path': 'http://1.2.3.4:1234/kernel',
'ari_path': 'http://1.2.3.4:1234/ramdisk',
'initrd_filename': 'deploy_ramdisk',
})
self.ipxe_options_timeout = self.ipxe_options.copy()
self.ipxe_options_timeout.update({
'ipxe_timeout': 120
})
self.ipxe_options_boot_from_volume_no_extra_volume = \
self.ipxe_options.copy()
self.ipxe_options_boot_from_volume_no_extra_volume.update({
'boot_from_volume': True,
'iscsi_boot_url': 'iscsi:fake_host::3260:0:fake_iqn',
'iscsi_initiator_iqn': 'fake_iqn',
'iscsi_volumes': [],
'username': 'fake_username',
'password': 'fake_password',
})
self.ipxe_options_boot_from_volume_extra_volume = \
self.ipxe_options.copy()
self.ipxe_options_boot_from_volume_extra_volume.update({
'boot_from_volume': True,
'iscsi_boot_url': 'iscsi:fake_host::3260:0:fake_iqn',
'iscsi_initiator_iqn': 'fake_iqn',
'iscsi_volumes': [{'url': 'iscsi:fake_host::3260:1:fake_iqn',
'username': 'fake_username_1',
'password': 'fake_password_1',
}],
'username': 'fake_username',
'password': 'fake_password',
})
self.ipxe_options_boot_from_volume_no_extra_volume.pop(
'initrd_filename', None)
self.ipxe_options_boot_from_volume_extra_volume.pop(
'initrd_filename', None)
self.node = object_utils.create_test_node(self.context)
def test_default_pxe_config(self):
rendered_template = utils.render_template(
CONF.pxe.pxe_config_template,
{'pxe_options': self.pxe_options,
'ROOT': '{{ ROOT }}',
'DISK_IDENTIFIER': '{{ DISK_IDENTIFIER }}'})
with open('ironic/tests/unit/drivers/pxe_config.template') as f:
expected_template = f.read().rstrip()
self.assertEqual(six.text_type(expected_template), rendered_template)
def test_default_ipxe_boot_script(self):
rendered_template = utils.render_template(
CONF.pxe.ipxe_boot_script,
{'ipxe_for_mac_uri': 'pxelinux.cfg/'})
with open('ironic/tests/unit/drivers/boot.ipxe') as f:
expected_template = f.read().rstrip()
self.assertEqual(six.text_type(expected_template), rendered_template)
def test_default_ipxe_config(self):
# NOTE(lucasagomes): iPXE is just an extension of the PXE driver,
# it doesn't have it's own configuration option for template.
# More info:
# https://docs.openstack.org/ironic/latest/install/
self.config(
pxe_config_template='ironic/drivers/modules/ipxe_config.template',
group='pxe'
)
self.config(http_url='http://1.2.3.4:1234', group='deploy')
rendered_template = utils.render_template(
CONF.pxe.pxe_config_template,
{'pxe_options': self.ipxe_options,
'ROOT': '{{ ROOT }}',
'DISK_IDENTIFIER': '{{ DISK_IDENTIFIER }}'})
templ_file = 'ironic/tests/unit/drivers/ipxe_config.template'
with open(templ_file) as f:
expected_template = f.read().rstrip()
self.assertEqual(six.text_type(expected_template), rendered_template)
def test_default_ipxe_timeout_config(self):
# NOTE(lucasagomes): iPXE is just an extension of the PXE driver,
# it doesn't have it's own configuration option for template.
# More info:
# https://docs.openstack.org/ironic/latest/install/
self.config(
pxe_config_template='ironic/drivers/modules/ipxe_config.template',
group='pxe'
)
self.config(http_url='http://1.2.3.4:1234', group='deploy')
rendered_template = utils.render_template(
CONF.pxe.pxe_config_template,
{'pxe_options': self.ipxe_options_timeout,
'ROOT': '{{ ROOT }}',
'DISK_IDENTIFIER': '{{ DISK_IDENTIFIER }}'})
templ_file = 'ironic/tests/unit/drivers/ipxe_config_timeout.template'
with open(templ_file) as f:
expected_template = f.read().rstrip()
self.assertEqual(six.text_type(expected_template), rendered_template)
def test_default_ipxe_boot_from_volume_config(self):
self.config(
pxe_config_template='ironic/drivers/modules/ipxe_config.template',
group='pxe'
)
self.config(http_url='http://1.2.3.4:1234', group='deploy')
rendered_template = utils.render_template(
CONF.pxe.pxe_config_template,
{'pxe_options': self.ipxe_options_boot_from_volume_extra_volume,
'ROOT': '{{ ROOT }}',
'DISK_IDENTIFIER': '{{ DISK_IDENTIFIER }}'})
templ_file = 'ironic/tests/unit/drivers/' \
'ipxe_config_boot_from_volume_extra_volume.template'
with open(templ_file) as f:
expected_template = f.read().rstrip()
self.assertEqual(six.text_type(expected_template), rendered_template)
def test_default_ipxe_boot_from_volume_config_no_extra_volumes(self):
self.config(
pxe_config_template='ironic/drivers/modules/ipxe_config.template',
group='pxe'
)
self.config(http_url='http://1.2.3.4:1234', group='deploy')
pxe_options = self.ipxe_options_boot_from_volume_no_extra_volume
pxe_options['iscsi_volumes'] = []
rendered_template = utils.render_template(
CONF.pxe.pxe_config_template,
{'pxe_options': pxe_options,
'ROOT': '{{ ROOT }}',
'DISK_IDENTIFIER': '{{ DISK_IDENTIFIER }}'})
templ_file = 'ironic/tests/unit/drivers/' \
'ipxe_config_boot_from_volume_no_extra_volumes.template'
with open(templ_file) as f:
expected_template = f.read().rstrip()
self.assertEqual(six.text_type(expected_template), rendered_template)
def test_default_grub_config(self):
pxe_opts = self.pxe_options
pxe_opts['boot_mode'] = 'uefi'
pxe_opts['tftp_server'] = '192.0.2.1'
rendered_template = utils.render_template(
CONF.pxe.uefi_pxe_config_template,
{'pxe_options': pxe_opts,
'ROOT': '(( ROOT ))',
'DISK_IDENTIFIER': '(( DISK_IDENTIFIER ))'})
templ_file = 'ironic/tests/unit/drivers/pxe_grub_config.template'
with open(templ_file) as f:
expected_template = f.read().rstrip()
self.assertEqual(six.text_type(expected_template), rendered_template)
@mock.patch('ironic.common.utils.create_link_without_raise', autospec=True)
@mock.patch('ironic_lib.utils.unlink_without_raise', autospec=True)
def test__write_mac_pxe_configs(self, unlink_mock, create_link_mock):
port_1 = object_utils.create_test_port(
self.context, node_id=self.node.id,
address='11:22:33:44:55:66', uuid=uuidutils.generate_uuid())
port_2 = object_utils.create_test_port(
self.context, node_id=self.node.id,
address='11:22:33:44:55:67', uuid=uuidutils.generate_uuid())
create_link_calls = [
mock.call(u'../1be26c0b-03f2-4d2e-ae87-c02d7f33c123/config',
'/tftpboot/pxelinux.cfg/01-11-22-33-44-55-66'),
mock.call(u'1be26c0b-03f2-4d2e-ae87-c02d7f33c123/config',
'/tftpboot/11:22:33:44:55:66.conf'),
mock.call(u'../1be26c0b-03f2-4d2e-ae87-c02d7f33c123/config',
'/tftpboot/pxelinux.cfg/01-11-22-33-44-55-67'),
mock.call(u'1be26c0b-03f2-4d2e-ae87-c02d7f33c123/config',
'/tftpboot/11:22:33:44:55:67.conf')
]
unlink_calls = [
mock.call('/tftpboot/pxelinux.cfg/01-11-22-33-44-55-66'),
mock.call('/tftpboot/11:22:33:44:55:66.conf'),
mock.call('/tftpboot/pxelinux.cfg/01-11-22-33-44-55-67'),
mock.call('/tftpboot/11:22:33:44:55:67.conf')
]
with task_manager.acquire(self.context, self.node.uuid) as task:
task.ports = [port_1, port_2]
pxe_utils._link_mac_pxe_configs(task)
unlink_mock.assert_has_calls(unlink_calls)
create_link_mock.assert_has_calls(create_link_calls)
@mock.patch('ironic.common.utils.create_link_without_raise', autospec=True)
@mock.patch('ironic_lib.utils.unlink_without_raise', autospec=True)
def test__write_infiniband_mac_pxe_configs(
self, unlink_mock, create_link_mock):
client_id1 = (
'20:00:55:04:01:fe:80:00:00:00:00:00:00:00:02:c9:02:00:23:13:92')
port_1 = object_utils.create_test_port(
self.context, node_id=self.node.id,
address='11:22:33:44:55:66', uuid=uuidutils.generate_uuid(),
extra={'client-id': client_id1})
client_id2 = (
'20:00:55:04:01:fe:80:00:00:00:00:00:00:00:02:c9:02:00:23:45:12')
port_2 = object_utils.create_test_port(
self.context, node_id=self.node.id,
address='11:22:33:44:55:67', uuid=uuidutils.generate_uuid(),
extra={'client-id': client_id2})
create_link_calls = [
mock.call(u'../1be26c0b-03f2-4d2e-ae87-c02d7f33c123/config',
'/tftpboot/pxelinux.cfg/20-11-22-33-44-55-66'),
mock.call(u'1be26c0b-03f2-4d2e-ae87-c02d7f33c123/config',
'/tftpboot/11:22:33:44:55:66.conf'),
mock.call(u'../1be26c0b-03f2-4d2e-ae87-c02d7f33c123/config',
'/tftpboot/pxelinux.cfg/20-11-22-33-44-55-67'),
mock.call(u'1be26c0b-03f2-4d2e-ae87-c02d7f33c123/config',
'/tftpboot/11:22:33:44:55:67.conf')
]
unlink_calls = [
mock.call('/tftpboot/pxelinux.cfg/20-11-22-33-44-55-66'),
mock.call('/tftpboot/11:22:33:44:55:66.conf'),
mock.call('/tftpboot/pxelinux.cfg/20-11-22-33-44-55-67'),
mock.call('/tftpboot/11:22:33:44:55:67.conf')
]
with task_manager.acquire(self.context, self.node.uuid) as task:
task.ports = [port_1, port_2]
pxe_utils._link_mac_pxe_configs(task)
unlink_mock.assert_has_calls(unlink_calls)
create_link_mock.assert_has_calls(create_link_calls)
@mock.patch('ironic.common.utils.create_link_without_raise', autospec=True)
@mock.patch('ironic_lib.utils.unlink_without_raise', autospec=True)
def test__write_mac_ipxe_configs(self, unlink_mock, create_link_mock):
self.config(ipxe_enabled=True, group='pxe')
port_1 = object_utils.create_test_port(
self.context, node_id=self.node.id,
address='11:22:33:44:55:66', uuid=uuidutils.generate_uuid())
port_2 = object_utils.create_test_port(
self.context, node_id=self.node.id,
address='11:22:33:44:55:67', uuid=uuidutils.generate_uuid())
create_link_calls = [
mock.call(u'../1be26c0b-03f2-4d2e-ae87-c02d7f33c123/config',
'/httpboot/pxelinux.cfg/11-22-33-44-55-66'),
mock.call(u'1be26c0b-03f2-4d2e-ae87-c02d7f33c123/config',
'/httpboot/11:22:33:44:55:66.conf'),
mock.call(u'../1be26c0b-03f2-4d2e-ae87-c02d7f33c123/config',
'/httpboot/pxelinux.cfg/11-22-33-44-55-67'),
mock.call(u'1be26c0b-03f2-4d2e-ae87-c02d7f33c123/config',
'/httpboot/11:22:33:44:55:67.conf')
]
unlink_calls = [
mock.call('/httpboot/pxelinux.cfg/11-22-33-44-55-66'),
mock.call('/httpboot/11:22:33:44:55:66.conf'),
mock.call('/httpboot/pxelinux.cfg/11-22-33-44-55-67'),
mock.call('/httpboot/11:22:33:44:55:67.conf'),
]
with task_manager.acquire(self.context, self.node.uuid) as task:
task.ports = [port_1, port_2]
pxe_utils._link_mac_pxe_configs(task, ipxe_enabled=True)
unlink_mock.assert_has_calls(unlink_calls)
create_link_mock.assert_has_calls(create_link_calls)
@mock.patch('ironic.common.utils.create_link_without_raise', autospec=True)
@mock.patch('ironic_lib.utils.unlink_without_raise', autospec=True)
@mock.patch('ironic.common.dhcp_factory.DHCPFactory.provider',
autospec=True)
def test__link_ip_address_pxe_configs(self, provider_mock, unlink_mock,
create_link_mock):
ip_address = '10.10.0.1'
address = "aa:aa:aa:aa:aa:aa"
object_utils.create_test_port(self.context, node_id=self.node.id,
address=address)
provider_mock.get_ip_addresses.return_value = [ip_address]
create_link_calls = [
mock.call(u'1be26c0b-03f2-4d2e-ae87-c02d7f33c123/config',
u'/tftpboot/10.10.0.1.conf'),
]
with task_manager.acquire(self.context, self.node.uuid) as task:
pxe_utils._link_ip_address_pxe_configs(task, False)
unlink_mock.assert_called_once_with('/tftpboot/10.10.0.1.conf')
create_link_mock.assert_has_calls(create_link_calls)
@mock.patch.object(os, 'chmod', autospec=True)
@mock.patch('ironic.common.utils.write_to_file', autospec=True)
@mock.patch('ironic.common.utils.render_template', autospec=True)
@mock.patch('oslo_utils.fileutils.ensure_tree', autospec=True)
def test_create_pxe_config(self, ensure_tree_mock, render_mock,
write_mock, chmod_mock):
with task_manager.acquire(self.context, self.node.uuid) as task:
pxe_utils.create_pxe_config(task, self.pxe_options,
CONF.pxe.pxe_config_template)
render_mock.assert_called_with(
CONF.pxe.pxe_config_template,
{'pxe_options': self.pxe_options,
'ROOT': '{{ ROOT }}',
'DISK_IDENTIFIER': '{{ DISK_IDENTIFIER }}'}
)
node_dir = os.path.join(CONF.pxe.tftp_root, self.node.uuid)
pxe_dir = os.path.join(CONF.pxe.tftp_root, 'pxelinux.cfg')
ensure_calls = [
mock.call(node_dir), mock.call(pxe_dir),
]
ensure_tree_mock.assert_has_calls(ensure_calls)
chmod_mock.assert_not_called()
pxe_cfg_file_path = pxe_utils.get_pxe_config_file_path(self.node.uuid)
write_mock.assert_called_with(pxe_cfg_file_path,
render_mock.return_value)
@mock.patch.object(os, 'chmod', autospec=True)
@mock.patch('ironic.common.utils.write_to_file', autospec=True)
@mock.patch('ironic.common.utils.render_template', autospec=True)
@mock.patch('oslo_utils.fileutils.ensure_tree', autospec=True)
def test_create_pxe_config_set_dir_permission(self, ensure_tree_mock,
render_mock,
write_mock, chmod_mock):
self.config(dir_permission=0o755, group='pxe')
with task_manager.acquire(self.context, self.node.uuid) as task:
pxe_utils.create_pxe_config(task, self.pxe_options,
CONF.pxe.pxe_config_template)
render_mock.assert_called_with(
CONF.pxe.pxe_config_template,
{'pxe_options': self.pxe_options,
'ROOT': '{{ ROOT }}',
'DISK_IDENTIFIER': '{{ DISK_IDENTIFIER }}'}
)
node_dir = os.path.join(CONF.pxe.tftp_root, self.node.uuid)
pxe_dir = os.path.join(CONF.pxe.tftp_root, 'pxelinux.cfg')
ensure_calls = [
mock.call(node_dir), mock.call(pxe_dir),
]
ensure_tree_mock.assert_has_calls(ensure_calls)
chmod_calls = [mock.call(node_dir, 0o755), mock.call(pxe_dir, 0o755)]
chmod_mock.assert_has_calls(chmod_calls)
pxe_cfg_file_path = pxe_utils.get_pxe_config_file_path(self.node.uuid)
write_mock.assert_called_with(pxe_cfg_file_path,
render_mock.return_value)
@mock.patch.object(os.path, 'isdir', autospec=True)
@mock.patch.object(os, 'chmod', autospec=True)
@mock.patch('ironic.common.utils.write_to_file', autospec=True)
@mock.patch('ironic.common.utils.render_template', autospec=True)
@mock.patch('oslo_utils.fileutils.ensure_tree', autospec=True)
def test_create_pxe_config_existing_dirs(self, ensure_tree_mock,
render_mock,
write_mock, chmod_mock,
isdir_mock):
self.config(dir_permission=0o755, group='pxe')
with task_manager.acquire(self.context, self.node.uuid) as task:
isdir_mock.return_value = True
pxe_utils.create_pxe_config(task, self.pxe_options,
CONF.pxe.pxe_config_template)
render_mock.assert_called_with(
CONF.pxe.pxe_config_template,
{'pxe_options': self.pxe_options,
'ROOT': '{{ ROOT }}',
'DISK_IDENTIFIER': '{{ DISK_IDENTIFIER }}'}
)
ensure_tree_mock.assert_has_calls([])
chmod_mock.assert_not_called()
isdir_mock.assert_has_calls([])
pxe_cfg_file_path = pxe_utils.get_pxe_config_file_path(self.node.uuid)
write_mock.assert_called_with(pxe_cfg_file_path,
render_mock.return_value)
@mock.patch.object(os, 'chmod', autospec=True)
@mock.patch('ironic.common.pxe_utils._link_ip_address_pxe_configs',
autospec=True)
@mock.patch('ironic.common.utils.write_to_file', autospec=True)
@mock.patch('ironic.common.utils.render_template', autospec=True)
@mock.patch('oslo_utils.fileutils.ensure_tree', autospec=True)
def test_create_pxe_config_uefi_grub(self, ensure_tree_mock, render_mock,
write_mock, link_ip_configs_mock,
chmod_mock):
grub_tmplte = "ironic/drivers/modules/pxe_grub_config.template"
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node.properties['capabilities'] = 'boot_mode:uefi'
pxe_utils.create_pxe_config(task, self.pxe_options,
grub_tmplte)
ensure_calls = [
mock.call(os.path.join(CONF.pxe.tftp_root, self.node.uuid)),
mock.call(os.path.join(CONF.pxe.tftp_root, 'pxelinux.cfg')),
]
ensure_tree_mock.assert_has_calls(ensure_calls)
chmod_mock.assert_not_called()
render_mock.assert_called_with(
grub_tmplte,
{'pxe_options': self.pxe_options,
'ROOT': '(( ROOT ))',
'DISK_IDENTIFIER': '(( DISK_IDENTIFIER ))'})
link_ip_configs_mock.assert_called_once_with(task, False)
pxe_cfg_file_path = pxe_utils.get_pxe_config_file_path(self.node.uuid)
write_mock.assert_called_with(pxe_cfg_file_path,
render_mock.return_value)
@mock.patch.object(os, 'chmod', autospec=True)
@mock.patch('ironic.common.pxe_utils._link_mac_pxe_configs',
autospec=True)
@mock.patch('ironic.common.pxe_utils._link_ip_address_pxe_configs',
autospec=True)
@mock.patch('ironic.common.utils.write_to_file', autospec=True)
@mock.patch('ironic.common.utils.render_template', autospec=True)
@mock.patch('oslo_utils.fileutils.ensure_tree', autospec=True)
def test_create_pxe_config_uefi_mac_address(
self, ensure_tree_mock, render_mock,
write_mock, link_ip_configs_mock,
link_mac_pxe_configs_mock, chmod_mock):
# TODO(TheJulia): We should... like... fix the template to
# enable mac address usage.....
grub_tmplte = "ironic/drivers/modules/pxe_grub_config.template"
link_ip_configs_mock.side_efect = exception.FailedToGetIPAddressOnPort(
port_id='blah')
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node.properties['capabilities'] = 'boot_mode:uefi'
pxe_utils.create_pxe_config(task, self.pxe_options,
grub_tmplte)
ensure_calls = [
mock.call(os.path.join(CONF.pxe.tftp_root, self.node.uuid)),
mock.call(os.path.join(CONF.pxe.tftp_root, 'pxelinux.cfg')),
]
ensure_tree_mock.assert_has_calls(ensure_calls)
chmod_mock.assert_not_called()
render_mock.assert_called_with(
grub_tmplte,
{'pxe_options': self.pxe_options,
'ROOT': '(( ROOT ))',
'DISK_IDENTIFIER': '(( DISK_IDENTIFIER ))'})
link_mac_pxe_configs_mock.assert_called_once_with(
task, ipxe_enabled=False)
link_ip_configs_mock.assert_called_once_with(task, False)
pxe_cfg_file_path = pxe_utils.get_pxe_config_file_path(self.node.uuid)
write_mock.assert_called_with(pxe_cfg_file_path,
render_mock.return_value)
@mock.patch.object(os, 'chmod', autospec=True)
@mock.patch('ironic.common.pxe_utils._link_mac_pxe_configs', autospec=True)
@mock.patch('ironic.common.utils.write_to_file', autospec=True)
@mock.patch('ironic.common.utils.render_template', autospec=True)
@mock.patch('oslo_utils.fileutils.ensure_tree', autospec=True)
def test_create_pxe_config_uefi_ipxe(self, ensure_tree_mock, render_mock,
write_mock, link_mac_pxe_mock,
chmod_mock):
self.config(ipxe_enabled=True, group='pxe')
ipxe_template = "ironic/drivers/modules/ipxe_config.template"
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node.properties['capabilities'] = 'boot_mode:uefi'
pxe_utils.create_pxe_config(task, self.ipxe_options,
ipxe_template, ipxe_enabled=True)
ensure_calls = [
mock.call(os.path.join(CONF.deploy.http_root, self.node.uuid)),
mock.call(os.path.join(CONF.deploy.http_root, 'pxelinux.cfg')),
]
ensure_tree_mock.assert_has_calls(ensure_calls)
chmod_mock.assert_not_called()
render_mock.assert_called_with(
ipxe_template,
{'pxe_options': self.ipxe_options,
'ROOT': '{{ ROOT }}',
'DISK_IDENTIFIER': '{{ DISK_IDENTIFIER }}'})
link_mac_pxe_mock.assert_called_once_with(task, ipxe_enabled=True)
pxe_cfg_file_path = pxe_utils.get_pxe_config_file_path(
self.node.uuid, ipxe_enabled=True)
write_mock.assert_called_with(pxe_cfg_file_path,
render_mock.return_value)
@mock.patch('ironic.common.utils.rmtree_without_raise', autospec=True)
@mock.patch('ironic_lib.utils.unlink_without_raise', autospec=True)
def test_clean_up_pxe_config(self, unlink_mock, rmtree_mock):
address = "aa:aa:aa:aa:aa:aa"
object_utils.create_test_port(self.context, node_id=self.node.id,
address=address)
with task_manager.acquire(self.context, self.node.uuid) as task:
pxe_utils.clean_up_pxe_config(task)
ensure_calls = [
mock.call("/tftpboot/pxelinux.cfg/01-%s"
% address.replace(':', '-')),
mock.call("/tftpboot/%s.conf" % address)
]
unlink_mock.assert_has_calls(ensure_calls)
rmtree_mock.assert_called_once_with(
os.path.join(CONF.pxe.tftp_root, self.node.uuid))
@mock.patch.object(os.path, 'isfile', lambda path: False)
@mock.patch('ironic.common.utils.file_has_content', autospec=True)
@mock.patch('ironic.common.utils.write_to_file', autospec=True)
@mock.patch('ironic.common.utils.render_template', autospec=True)
def test_create_ipxe_boot_script(self, render_mock, write_mock,
file_has_content_mock):
render_mock.return_value = 'foo'
pxe_utils.create_ipxe_boot_script()
self.assertFalse(file_has_content_mock.called)
write_mock.assert_called_once_with(
os.path.join(CONF.deploy.http_root,
os.path.basename(CONF.pxe.ipxe_boot_script)),
'foo')
render_mock.assert_called_once_with(
CONF.pxe.ipxe_boot_script,
{'ipxe_for_mac_uri': 'pxelinux.cfg/'})
@mock.patch.object(os.path, 'isfile', lambda path: True)
@mock.patch('ironic.common.utils.file_has_content', autospec=True)
@mock.patch('ironic.common.utils.write_to_file', autospec=True)
@mock.patch('ironic.common.utils.render_template', autospec=True)
def test_create_ipxe_boot_script_copy_file_different(
self, render_mock, write_mock, file_has_content_mock):
file_has_content_mock.return_value = False
render_mock.return_value = 'foo'
pxe_utils.create_ipxe_boot_script()
file_has_content_mock.assert_called_once_with(
os.path.join(CONF.deploy.http_root,
os.path.basename(CONF.pxe.ipxe_boot_script)),
'foo')
write_mock.assert_called_once_with(
os.path.join(CONF.deploy.http_root,
os.path.basename(CONF.pxe.ipxe_boot_script)),
'foo')
render_mock.assert_called_once_with(
CONF.pxe.ipxe_boot_script,
{'ipxe_for_mac_uri': 'pxelinux.cfg/'})
@mock.patch.object(os.path, 'isfile', lambda path: True)
@mock.patch('ironic.common.utils.file_has_content', autospec=True)
@mock.patch('ironic.common.utils.write_to_file', autospec=True)
@mock.patch('ironic.common.utils.render_template', autospec=True)
def test_create_ipxe_boot_script_already_exists(self, render_mock,
write_mock,
file_has_content_mock):
file_has_content_mock.return_value = True
pxe_utils.create_ipxe_boot_script()
self.assertFalse(write_mock.called)
def test__get_pxe_mac_path(self):
mac = '00:11:22:33:44:55:66'
self.assertEqual('/tftpboot/pxelinux.cfg/01-00-11-22-33-44-55-66',
pxe_utils._get_pxe_mac_path(mac))
def test__get_pxe_mac_path_ipxe(self):
self.config(http_root='/httpboot', group='deploy')
mac = '00:11:22:33:AA:BB:CC'
self.assertEqual('/httpboot/pxelinux.cfg/00-11-22-33-aa-bb-cc',
pxe_utils._get_pxe_mac_path(mac, ipxe_enabled=True))
def test__get_pxe_ip_address_path(self):
ipaddress = '10.10.0.1'
self.assertEqual('/tftpboot/10.10.0.1.conf',
pxe_utils._get_pxe_ip_address_path(ipaddress))
def test_get_root_dir(self):
expected_dir = '/tftproot'
self.config(ipxe_enabled=False, group='pxe')
self.config(tftp_root=expected_dir, group='pxe')
self.assertEqual(expected_dir, pxe_utils.get_root_dir())
def test_get_root_dir_ipxe(self):
expected_dir = '/httpboot'
self.config(ipxe_enabled=True, group='pxe')
self.config(http_root=expected_dir, group='deploy')
self.assertEqual(expected_dir, pxe_utils.get_root_dir())
def test_get_pxe_config_file_path(self):
self.assertEqual(os.path.join(CONF.pxe.tftp_root,
self.node.uuid,
'config'),
pxe_utils.get_pxe_config_file_path(self.node.uuid))
def _dhcp_options_for_instance(self, ip_version=4):
self.config(ip_version=ip_version, group='pxe')
if ip_version == 4:
self.config(tftp_server='192.0.2.1', group='pxe')
elif ip_version == 6:
self.config(tftp_server='ff80::1', group='pxe')
self.config(pxe_bootfile_name='fake-bootfile', group='pxe')
self.config(tftp_root='/tftp-path/', group='pxe')
if ip_version == 6:
# NOTE(TheJulia): DHCPv6 RFCs seem to indicate that the prior
# options are not imported, although they may be supported
# by vendors. The apparent proper option is to return a
# URL in the field https://tools.ietf.org/html/rfc5970#section-3
expected_info = [{'opt_name': '59',
'opt_value': 'tftp://[ff80::1]/fake-bootfile',
'ip_version': ip_version}]
elif ip_version == 4:
expected_info = [{'opt_name': '67',
'opt_value': 'fake-bootfile',
'ip_version': ip_version},
{'opt_name': '210',
'opt_value': '/tftp-path/',
'ip_version': ip_version},
{'opt_name': '66',
'opt_value': '192.0.2.1',
'ip_version': ip_version},
{'opt_name': '150',
'opt_value': '192.0.2.1',
'ip_version': ip_version},
{'opt_name': 'server-ip-address',
'opt_value': '192.0.2.1',
'ip_version': ip_version}
]
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertEqual(expected_info,
pxe_utils.dhcp_options_for_instance(task))
def test_dhcp_options_for_instance(self):
self._dhcp_options_for_instance(ip_version=4)
def test_dhcp_options_for_instance_ipv6(self):
self.config(tftp_server='ff80::1', group='pxe')
self._dhcp_options_for_instance(ip_version=6)
def _test_get_kernel_ramdisk_info(self, expected_dir, mode='deploy'):
node_uuid = 'fake-node'
driver_info = {
'%s_kernel' % mode: 'glance://%s-kernel' % mode,
'%s_ramdisk' % mode: 'glance://%s-ramdisk' % mode,
}
expected = {}
for k, v in driver_info.items():
expected[k] = (v, expected_dir + '/fake-node/%s' % k)
kr_info = pxe_utils.get_kernel_ramdisk_info(node_uuid,
driver_info,
mode=mode)
self.assertEqual(expected, kr_info)
def test_get_kernel_ramdisk_info(self):
expected_dir = '/tftp'
self.config(tftp_root=expected_dir, group='pxe')
self._test_get_kernel_ramdisk_info(expected_dir)
def test_get_kernel_ramdisk_info_ipxe(self):
expected_dir = '/http'
self.config(ipxe_enabled=True, group='pxe')
self.config(http_root=expected_dir, group='deploy')
self._test_get_kernel_ramdisk_info(expected_dir)
def test_get_kernel_ramdisk_info_bad_driver_info(self):
self.config(tftp_root='/tftp', group='pxe')
node_uuid = 'fake-node'
driver_info = {}
self.assertRaises(KeyError,
pxe_utils.get_kernel_ramdisk_info,
node_uuid,
driver_info)
def test_get_rescue_kr_info(self):
expected_dir = '/tftp'
self.config(tftp_root=expected_dir, group='pxe')
self._test_get_kernel_ramdisk_info(expected_dir, mode='rescue')
def test_get_rescue_kr_info_ipxe(self):
expected_dir = '/http'
self.config(ipxe_enabled=True, group='pxe')
self.config(http_root=expected_dir, group='deploy')
self._test_get_kernel_ramdisk_info(expected_dir, mode='rescue')
def _dhcp_options_for_instance_ipxe(self, task, boot_file, ip_version=4):
self.config(ipxe_enabled=True, group='pxe')
self.config(ipxe_boot_script='/test/boot.ipxe', group='pxe')
self.config(tftp_root='/tftp-path/', group='pxe')
if ip_version == 4:
self.config(tftp_server='192.0.2.1', group='pxe')
self.config(http_url='http://192.0.3.2:1234', group='deploy')
self.config(ipxe_boot_script='/test/boot.ipxe', group='pxe')
elif ip_version == 6:
self.config(tftp_server='ff80::1', group='pxe')
self.config(http_url='http://[ff80::1]:1234', group='deploy')
self.config(dhcp_provider='isc', group='dhcp')
if ip_version == 6:
# NOTE(TheJulia): DHCPv6 RFCs seem to indicate that the prior
# options are not imported, although they may be supported
# by vendors. The apparent proper option is to return a
# URL in the field https://tools.ietf.org/html/rfc5970#section-3
expected_boot_script_url = 'http://[ff80::1]:1234/boot.ipxe'
expected_info = [{'opt_name': '!175,59',
'opt_value': 'tftp://[ff80::1]/fake-bootfile',
'ip_version': ip_version},
{'opt_name': '59',
'opt_value': expected_boot_script_url,
'ip_version': ip_version}]
elif ip_version == 4:
expected_boot_script_url = 'http://192.0.3.2:1234/boot.ipxe'
expected_info = [{'opt_name': '!175,67',
'opt_value': boot_file,
'ip_version': ip_version},
{'opt_name': '66',
'opt_value': '192.0.2.1',
'ip_version': ip_version},
{'opt_name': '150',
'opt_value': '192.0.2.1',
'ip_version': ip_version},
{'opt_name': '67',
'opt_value': expected_boot_script_url,
'ip_version': ip_version},
{'opt_name': 'server-ip-address',
'opt_value': '192.0.2.1',
'ip_version': ip_version}]
self.assertItemsEqual(expected_info,
pxe_utils.dhcp_options_for_instance(
task, ipxe_enabled=True))
self.config(dhcp_provider='neutron', group='dhcp')
if ip_version == 6:
# Boot URL variable set from prior test of isc parameters.
expected_info = [{'opt_name': 'tag:!ipxe6,59',
'opt_value': 'tftp://[ff80::1]/fake-bootfile',
'ip_version': ip_version},
{'opt_name': 'tag:ipxe6,59',
'opt_value': expected_boot_script_url,
'ip_version': ip_version}]
elif ip_version == 4:
expected_info = [{'opt_name': 'tag:!ipxe,67',
'opt_value': boot_file,
'ip_version': ip_version},
{'opt_name': '66',
'opt_value': '192.0.2.1',
'ip_version': ip_version},
{'opt_name': '150',
'opt_value': '192.0.2.1',
'ip_version': ip_version},
{'opt_name': 'tag:ipxe,67',
'opt_value': expected_boot_script_url,
'ip_version': ip_version},
{'opt_name': 'server-ip-address',
'opt_value': '192.0.2.1',
'ip_version': ip_version}]
self.assertItemsEqual(expected_info,
pxe_utils.dhcp_options_for_instance(
task, ipxe_enabled=True))
def test_dhcp_options_for_instance_ipxe_bios(self):
self.config(ip_version=4, group='pxe')
boot_file = 'fake-bootfile-bios'
self.config(pxe_bootfile_name=boot_file, group='pxe')
with task_manager.acquire(self.context, self.node.uuid) as task:
self._dhcp_options_for_instance_ipxe(task, boot_file)
def test_dhcp_options_for_instance_ipxe_uefi(self):
self.config(ip_version=4, group='pxe')
boot_file = 'fake-bootfile-uefi'
self.config(uefi_pxe_bootfile_name=boot_file, group='pxe')
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node.properties['capabilities'] = 'boot_mode:uefi'
self._dhcp_options_for_instance_ipxe(task, boot_file)
def test_dhcp_options_for_ipxe_ipv6(self):
self.config(ip_version=6, group='pxe')
boot_file = 'fake-bootfile'
self.config(pxe_bootfile_name=boot_file, group='pxe')
with task_manager.acquire(self.context, self.node.uuid) as task:
self._dhcp_options_for_instance_ipxe(task, boot_file, ip_version=6)
@mock.patch('ironic.common.utils.rmtree_without_raise', autospec=True)
@mock.patch('ironic_lib.utils.unlink_without_raise', autospec=True)
@mock.patch('ironic.common.dhcp_factory.DHCPFactory.provider',
autospec=True)
def test_clean_up_pxe_config_uefi(self, provider_mock, unlink_mock,
rmtree_mock):
ip_address = '10.10.0.1'
address = "aa:aa:aa:aa:aa:aa"
properties = {'capabilities': 'boot_mode:uefi'}
object_utils.create_test_port(self.context, node_id=self.node.id,
address=address)
provider_mock.get_ip_addresses.return_value = [ip_address]
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node.properties = properties
pxe_utils.clean_up_pxe_config(task)
unlink_calls = [
mock.call('/tftpboot/10.10.0.1.conf'),
mock.call('/tftpboot/pxelinux.cfg/01-aa-aa-aa-aa-aa-aa'),
mock.call('/tftpboot/' + address + '.conf')
]
unlink_mock.assert_has_calls(unlink_calls)
rmtree_mock.assert_called_once_with(
os.path.join(CONF.pxe.tftp_root, self.node.uuid))
@mock.patch('ironic.common.utils.rmtree_without_raise', autospec=True)
@mock.patch('ironic_lib.utils.unlink_without_raise', autospec=True)
@mock.patch('ironic.common.dhcp_factory.DHCPFactory.provider',
autospec=True)
def test_clean_up_pxe_config_uefi_mac_address(
self, provider_mock, unlink_mock, rmtree_mock):
ip_address = '10.10.0.1'
address = "aa:aa:aa:aa:aa:aa"
properties = {'capabilities': 'boot_mode:uefi'}
object_utils.create_test_port(self.context, node_id=self.node.id,
address=address)
provider_mock.get_ip_addresses.return_value = [ip_address]
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node.properties = properties
pxe_utils.clean_up_pxe_config(task)
unlink_calls = [
mock.call('/tftpboot/10.10.0.1.conf'),
mock.call('/tftpboot/pxelinux.cfg/01-%s' %
address.replace(':', '-')),
mock.call('/tftpboot/' + address + '.conf')
]
unlink_mock.assert_has_calls(unlink_calls)
rmtree_mock.assert_called_once_with(
os.path.join(CONF.pxe.tftp_root, self.node.uuid))
@mock.patch('ironic.common.utils.rmtree_without_raise', autospec=True)
@mock.patch('ironic_lib.utils.unlink_without_raise', autospec=True)
@mock.patch('ironic.common.dhcp_factory.DHCPFactory.provider',
autospec=True)
def test_clean_up_pxe_config_uefi_instance_info(self,
provider_mock, unlink_mock,
rmtree_mock):
ip_address = '10.10.0.1'
address = "aa:aa:aa:aa:aa:aa"
object_utils.create_test_port(self.context, node_id=self.node.id,
address=address)
provider_mock.get_ip_addresses.return_value = [ip_address]
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node.instance_info['deploy_boot_mode'] = 'uefi'
pxe_utils.clean_up_pxe_config(task)
unlink_calls = [
mock.call('/tftpboot/10.10.0.1.conf'),
mock.call('/tftpboot/pxelinux.cfg/01-aa-aa-aa-aa-aa-aa'),
mock.call('/tftpboot/' + address + ".conf")
]
unlink_mock.assert_has_calls(unlink_calls)
rmtree_mock.assert_called_once_with(
os.path.join(CONF.pxe.tftp_root, self.node.uuid))
@mock.patch('ironic.common.utils.rmtree_without_raise', autospec=True)
@mock.patch('ironic_lib.utils.unlink_without_raise', autospec=True)
def test_clean_up_ipxe_config_uefi(self, unlink_mock, rmtree_mock):
self.config(ipxe_enabled=True, group='pxe')
address = "aa:aa:aa:aa:aa:aa"
properties = {'capabilities': 'boot_mode:uefi'}
object_utils.create_test_port(self.context, node_id=self.node.id,
address=address)
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node.properties = properties
pxe_utils.clean_up_pxe_config(task, ipxe_enabled=True)
ensure_calls = [
mock.call("/httpboot/pxelinux.cfg/%s"
% address.replace(':', '-')),
mock.call("/httpboot/%s.conf" % address)
]
unlink_mock.assert_has_calls(ensure_calls)
rmtree_mock.assert_called_once_with(
os.path.join(CONF.deploy.http_root, self.node.uuid))
def test_get_tftp_path_prefix_with_trailing_slash(self):
self.config(tftp_root='/tftpboot-path/', group='pxe')
path_prefix = pxe_utils.get_tftp_path_prefix()
self.assertEqual(path_prefix, '/tftpboot-path/')
def test_get_tftp_path_prefix_without_trailing_slash(self):
self.config(tftp_root='/tftpboot-path', group='pxe')
path_prefix = pxe_utils.get_tftp_path_prefix()
self.assertEqual(path_prefix, '/tftpboot-path/')
def test_get_path_relative_to_tftp_root_with_trailing_slash(self):
self.config(tftp_root='/tftpboot-path/', group='pxe')
test_file_path = '/tftpboot-path/pxelinux.cfg/test'
relpath = pxe_utils.get_path_relative_to_tftp_root(test_file_path)
self.assertEqual(relpath, 'pxelinux.cfg/test')
def test_get_path_relative_to_tftp_root_without_trailing_slash(self):
self.config(tftp_root='/tftpboot-path', group='pxe')
test_file_path = '/tftpboot-path/pxelinux.cfg/test'
relpath = pxe_utils.get_path_relative_to_tftp_root(test_file_path)
self.assertEqual(relpath, 'pxelinux.cfg/test')
@mock.patch.object(ipxe.iPXEBoot, '__init__', lambda self: None)
@mock.patch.object(pxe.PXEBoot, '__init__', lambda self: None)
class PXEInterfacesTestCase(db_base.DbTestCase):
def setUp(self):
super(PXEInterfacesTestCase, self).setUp()
n = {
'driver': 'fake-hardware',
'boot_interface': 'pxe',
'instance_info': INST_INFO_DICT,
'driver_info': DRV_INFO_DICT,
'driver_internal_info': DRV_INTERNAL_INFO_DICT,
}
self.config_temp_dir('http_root', group='deploy')
self.node = object_utils.create_test_node(self.context, **n)
def _test_parse_driver_info_missing_kernel(self, mode='deploy'):
del self.node.driver_info['%s_kernel' % mode]
if mode == 'rescue':
self.node.provision_state = states.RESCUING
self.assertRaises(exception.MissingParameterValue,
pxe_utils.parse_driver_info, self.node, mode=mode)
def test_parse_driver_info_missing_deploy_kernel(self):
self._test_parse_driver_info_missing_kernel()
def test_parse_driver_info_missing_rescue_kernel(self):
self._test_parse_driver_info_missing_kernel(mode='rescue')
def _test_parse_driver_info_missing_ramdisk(self, mode='deploy'):
del self.node.driver_info['%s_ramdisk' % mode]
if mode == 'rescue':
self.node.provision_state = states.RESCUING
self.assertRaises(exception.MissingParameterValue,
pxe_utils.parse_driver_info, self.node, mode=mode)
def test_parse_driver_info_missing_deploy_ramdisk(self):
self._test_parse_driver_info_missing_ramdisk()
def test_parse_driver_info_missing_rescue_ramdisk(self):
self._test_parse_driver_info_missing_ramdisk(mode='rescue')
def _test_parse_driver_info(self, mode='deploy'):
exp_info = {'%s_ramdisk' % mode: 'glance://%s_ramdisk_uuid' % mode,
'%s_kernel' % mode: 'glance://%s_kernel_uuid' % mode}
image_info = pxe_utils.parse_driver_info(self.node, mode=mode)
self.assertEqual(exp_info, image_info)
def test_parse_driver_info_deploy(self):
self._test_parse_driver_info()
def test_parse_driver_info_rescue(self):
self._test_parse_driver_info(mode='rescue')
def test__get_deploy_image_info(self):
expected_info = {'deploy_ramdisk':
(DRV_INFO_DICT['deploy_ramdisk'],
os.path.join(CONF.pxe.tftp_root,
self.node.uuid,
'deploy_ramdisk')),
'deploy_kernel':
(DRV_INFO_DICT['deploy_kernel'],
os.path.join(CONF.pxe.tftp_root,
self.node.uuid,
'deploy_kernel'))}
image_info = pxe_utils.get_image_info(self.node)
self.assertEqual(expected_info, image_info)
def test__get_deploy_image_info_ipxe(self):
expected_info = {'deploy_ramdisk':
(DRV_INFO_DICT['deploy_ramdisk'],
os.path.join(CONF.deploy.http_root,
self.node.uuid,
'deploy_ramdisk')),
'deploy_kernel':
(DRV_INFO_DICT['deploy_kernel'],
os.path.join(CONF.deploy.http_root,
self.node.uuid,
'deploy_kernel'))}
image_info = pxe_utils.get_image_info(self.node, ipxe_enabled=True)
self.assertEqual(expected_info, image_info)
def test__get_deploy_image_info_missing_deploy_kernel(self):
del self.node.driver_info['deploy_kernel']
self.assertRaises(exception.MissingParameterValue,
pxe_utils.get_image_info, self.node)
def test__get_deploy_image_info_deploy_ramdisk(self):
del self.node.driver_info['deploy_ramdisk']
self.assertRaises(exception.MissingParameterValue,
pxe_utils.get_image_info, self.node)
@mock.patch.object(image_service.GlanceImageService, 'show', autospec=True)
def _test_get_instance_image_info(self, show_mock):
properties = {'properties': {u'kernel_id': u'instance_kernel_uuid',
u'ramdisk_id': u'instance_ramdisk_uuid'}}
expected_info = {'ramdisk':
('instance_ramdisk_uuid',
os.path.join(CONF.pxe.tftp_root,
self.node.uuid,
'ramdisk')),
'kernel':
('instance_kernel_uuid',
os.path.join(CONF.pxe.tftp_root,
self.node.uuid,
'kernel'))}
show_mock.return_value = properties
self.context.auth_token = 'fake'
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
image_info = pxe_utils.get_instance_image_info(task)
show_mock.assert_called_once_with(mock.ANY, 'glance://image_uuid')
self.assertEqual(expected_info, image_info)
# test with saved info
show_mock.reset_mock()
image_info = pxe_utils.get_instance_image_info(task)
self.assertEqual(expected_info, image_info)
self.assertFalse(show_mock.called)
self.assertEqual('instance_kernel_uuid',
task.node.instance_info['kernel'])
self.assertEqual('instance_ramdisk_uuid',
task.node.instance_info['ramdisk'])
def test_get_instance_image_info(self):
# Tests when 'is_whole_disk_image' exists in driver_internal_info
self._test_get_instance_image_info()
def test_get_instance_image_info_without_is_whole_disk_image(self):
# Tests when 'is_whole_disk_image' doesn't exists in
# driver_internal_info
del self.node.driver_internal_info['is_whole_disk_image']
self.node.save()
self._test_get_instance_image_info()
@mock.patch('ironic.drivers.modules.deploy_utils.get_boot_option',
return_value='local')
def test_get_instance_image_info_localboot(self, boot_opt_mock):
self.node.driver_internal_info['is_whole_disk_image'] = False
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
image_info = pxe_utils.get_instance_image_info(task)
self.assertEqual({}, image_info)
boot_opt_mock.assert_called_once_with(task.node)
@mock.patch.object(image_service.GlanceImageService, 'show', autospec=True)
def test_get_instance_image_info_whole_disk_image(self, show_mock):
properties = {'properties': None}
show_mock.return_value = properties
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.node.driver_internal_info['is_whole_disk_image'] = True
image_info = pxe_utils.get_instance_image_info(task)
self.assertEqual({}, image_info)
@mock.patch('ironic.common.utils.render_template', autospec=True)
def _test_build_pxe_config_options_pxe(self, render_mock,
whle_dsk_img=False,
debug=False, mode='deploy'):
self.config(debug=debug)
self.config(pxe_append_params='test_param', group='pxe')
# NOTE: right '/' should be removed from url string
self.config(api_url='http://192.168.122.184:6385', group='conductor')
driver_internal_info = self.node.driver_internal_info
driver_internal_info['is_whole_disk_image'] = whle_dsk_img
self.node.driver_internal_info = driver_internal_info
self.node.save()
tftp_server = CONF.pxe.tftp_server
kernel_label = '%s_kernel' % mode
ramdisk_label = '%s_ramdisk' % mode
pxe_kernel = os.path.join(self.node.uuid, kernel_label)
pxe_ramdisk = os.path.join(self.node.uuid, ramdisk_label)
kernel = os.path.join(self.node.uuid, 'kernel')
ramdisk = os.path.join(self.node.uuid, 'ramdisk')
root_dir = CONF.pxe.tftp_root
image_info = {
kernel_label: (kernel_label,
os.path.join(root_dir,
self.node.uuid,
kernel_label)),
ramdisk_label: (ramdisk_label,
os.path.join(root_dir,
self.node.uuid,
ramdisk_label))
}
if (whle_dsk_img
or deploy_utils.get_boot_option(self.node) == 'local'):
ramdisk = 'no_ramdisk'
kernel = 'no_kernel'
else:
image_info.update({
'kernel': ('kernel_id',
os.path.join(root_dir,
self.node.uuid,
'kernel')),
'ramdisk': ('ramdisk_id',
os.path.join(root_dir,
self.node.uuid,
'ramdisk'))
})
expected_pxe_params = 'test_param'
if debug:
expected_pxe_params += ' ipa-debug=1'
expected_options = {
'deployment_ari_path': pxe_ramdisk,
'pxe_append_params': expected_pxe_params,
'deployment_aki_path': pxe_kernel,
'tftp_server': tftp_server,
'ipxe_timeout': 0,
'ari_path': ramdisk,
'aki_path': kernel,
}
if mode == 'rescue':
self.node.provision_state = states.RESCUING
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
options = pxe_utils.build_pxe_config_options(task, image_info)
self.assertEqual(expected_options, options)
def test_build_pxe_config_options_pxe(self):
self._test_build_pxe_config_options_pxe(whle_dsk_img=True)
def test_build_pxe_config_options_pxe_ipa_debug(self):
self._test_build_pxe_config_options_pxe(debug=True)
def test_build_pxe_config_options_pxe_rescue(self):
del self.node.driver_internal_info['is_whole_disk_image']
self._test_build_pxe_config_options_pxe(mode='rescue')
def test_build_pxe_config_options_ipa_debug_rescue(self):
del self.node.driver_internal_info['is_whole_disk_image']
self._test_build_pxe_config_options_pxe(debug=True, mode='rescue')
def test_build_pxe_config_options_pxe_local_boot(self):
del self.node.driver_internal_info['is_whole_disk_image']
i_info = self.node.instance_info
i_info.update({'capabilities': {'boot_option': 'local'}})
self.node.instance_info = i_info
self.node.save()
self._test_build_pxe_config_options_pxe(whle_dsk_img=False)
def test_build_pxe_config_options_pxe_without_is_whole_disk_image(self):
del self.node.driver_internal_info['is_whole_disk_image']
self.node.save()
self._test_build_pxe_config_options_pxe(whle_dsk_img=False)
def test_build_pxe_config_options_pxe_no_kernel_no_ramdisk(self):
del self.node.driver_internal_info['is_whole_disk_image']
self.node.save()
pxe_params = 'my-pxe-append-params ipa-debug=0'
self.config(group='pxe', tftp_server='my-tftp-server')
self.config(group='pxe', pxe_append_params=pxe_params)
self.config(group='pxe', tftp_root='/tftp-path/')
image_info = {
'deploy_kernel': ('deploy_kernel',
os.path.join(CONF.pxe.tftp_root,
'path-to-deploy_kernel')),
'deploy_ramdisk': ('deploy_ramdisk',
os.path.join(CONF.pxe.tftp_root,
'path-to-deploy_ramdisk'))}
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
options = pxe_utils.build_pxe_config_options(task, image_info)
expected_options = {
'aki_path': 'no_kernel',
'ari_path': 'no_ramdisk',
'deployment_aki_path': 'path-to-deploy_kernel',
'deployment_ari_path': 'path-to-deploy_ramdisk',
'pxe_append_params': pxe_params,
'tftp_server': 'my-tftp-server',
'ipxe_timeout': 0}
self.assertEqual(expected_options, options)
@mock.patch('ironic.common.image_service.GlanceImageService',
autospec=True)
@mock.patch('ironic.common.utils.render_template', autospec=True)
def _test_build_pxe_config_options_ipxe(self, render_mock, glance_mock,
whle_dsk_img=False,
ipxe_timeout=0,
ipxe_use_swift=False,
debug=False,
boot_from_volume=False,
mode='deploy'):
self.config(debug=debug)
self.config(pxe_append_params='test_param', group='pxe')
# NOTE: right '/' should be removed from url string
self.config(api_url='http://192.168.122.184:6385', group='conductor')
self.config(ipxe_timeout=ipxe_timeout, group='pxe')
root_dir = CONF.deploy.http_root
driver_internal_info = self.node.driver_internal_info
driver_internal_info['is_whole_disk_image'] = whle_dsk_img
self.node.driver_internal_info = driver_internal_info
self.node.save()
tftp_server = CONF.pxe.tftp_server
http_url = 'http://192.1.2.3:1234'
self.config(ipxe_enabled=True, group='pxe')
self.config(http_url=http_url, group='deploy')
kernel_label = '%s_kernel' % mode
ramdisk_label = '%s_ramdisk' % mode
if ipxe_use_swift:
self.config(ipxe_use_swift=True, group='pxe')
glance = mock.Mock()
glance_mock.return_value = glance
glance.swift_temp_url.side_effect = [
pxe_kernel, pxe_ramdisk] = [
'swift_kernel', 'swift_ramdisk']
image_info = {
kernel_label: (uuidutils.generate_uuid(),
os.path.join(root_dir,
self.node.uuid,
kernel_label)),
ramdisk_label: (uuidutils.generate_uuid(),
os.path.join(root_dir,
self.node.uuid,
ramdisk_label))
}
else:
pxe_kernel = os.path.join(http_url, self.node.uuid,
kernel_label)
pxe_ramdisk = os.path.join(http_url, self.node.uuid,
ramdisk_label)
image_info = {
kernel_label: (kernel_label,
os.path.join(root_dir,
self.node.uuid,
kernel_label)),
ramdisk_label: (ramdisk_label,
os.path.join(root_dir,
self.node.uuid,
ramdisk_label))
}
kernel = os.path.join(http_url, self.node.uuid, 'kernel')
ramdisk = os.path.join(http_url, self.node.uuid, 'ramdisk')
if (whle_dsk_img
or deploy_utils.get_boot_option(self.node) == 'local'):
ramdisk = 'no_ramdisk'
kernel = 'no_kernel'
else:
image_info.update({
'kernel': ('kernel_id',
os.path.join(root_dir,
self.node.uuid,
'kernel')),
'ramdisk': ('ramdisk_id',
os.path.join(root_dir,
self.node.uuid,
'ramdisk'))
})
ipxe_timeout_in_ms = ipxe_timeout * 1000
expected_pxe_params = 'test_param'
if debug:
expected_pxe_params += ' ipa-debug=1'
expected_options = {
'deployment_ari_path': pxe_ramdisk,
'pxe_append_params': expected_pxe_params,
'deployment_aki_path': pxe_kernel,
'tftp_server': tftp_server,
'ipxe_timeout': ipxe_timeout_in_ms,
'ari_path': ramdisk,
'aki_path': kernel,
'initrd_filename': ramdisk_label,
}
if mode == 'rescue':
self.node.provision_state = states.RESCUING
self.node.save()
if boot_from_volume:
expected_options.update({
'boot_from_volume': True,
'iscsi_boot_url': 'iscsi:fake_host::3260:0:fake_iqn',
'iscsi_initiator_iqn': 'fake_iqn_initiator',
'iscsi_volumes': [{'url': 'iscsi:fake_host::3260:1:fake_iqn',
'username': 'fake_username_1',
'password': 'fake_password_1'
}],
'username': 'fake_username',
'password': 'fake_password'
})
expected_options.pop('deployment_aki_path')
expected_options.pop('deployment_ari_path')
expected_options.pop('initrd_filename')
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
options = pxe_utils.build_pxe_config_options(task,
image_info,
ipxe_enabled=True)
self.assertEqual(expected_options, options)
def test_build_pxe_config_options_ipxe(self):
self._test_build_pxe_config_options_ipxe(whle_dsk_img=True)
def test_build_pxe_config_options_ipxe_ipa_debug(self):
self._test_build_pxe_config_options_ipxe(debug=True)
def test_build_pxe_config_options_ipxe_local_boot(self):
del self.node.driver_internal_info['is_whole_disk_image']
i_info = self.node.instance_info
i_info.update({'capabilities': {'boot_option': 'local'}})
self.node.instance_info = i_info
self.node.save()
self._test_build_pxe_config_options_ipxe(whle_dsk_img=False)
def test_build_pxe_config_options_ipxe_swift_wdi(self):
self._test_build_pxe_config_options_ipxe(whle_dsk_img=True,
ipxe_use_swift=True)
def test_build_pxe_config_options_ipxe_swift_partition(self):
self._test_build_pxe_config_options_ipxe(whle_dsk_img=False,
ipxe_use_swift=True)
def test_build_pxe_config_options_ipxe_and_ipxe_timeout(self):
self._test_build_pxe_config_options_ipxe(whle_dsk_img=True,
ipxe_timeout=120)
def test_build_pxe_config_options_ipxe_and_iscsi_boot(self):
vol_id = uuidutils.generate_uuid()
vol_id2 = uuidutils.generate_uuid()
object_utils.create_test_volume_connector(
self.context,
uuid=uuidutils.generate_uuid(),
type='iqn',
node_id=self.node.id,
connector_id='fake_iqn_initiator')
object_utils.create_test_volume_target(
self.context, node_id=self.node.id, volume_type='iscsi',
boot_index=0, volume_id='1234', uuid=vol_id,
properties={'target_lun': 0,
'target_portal': 'fake_host:3260',
'target_iqn': 'fake_iqn',
'auth_username': 'fake_username',
'auth_password': 'fake_password'})
object_utils.create_test_volume_target(
self.context, node_id=self.node.id, volume_type='iscsi',
boot_index=1, volume_id='1235', uuid=vol_id2,
properties={'target_lun': 1,
'target_portal': 'fake_host:3260',
'target_iqn': 'fake_iqn',
'auth_username': 'fake_username_1',
'auth_password': 'fake_password_1'})
self.node.driver_internal_info.update({'boot_from_volume': vol_id})
self._test_build_pxe_config_options_ipxe(boot_from_volume=True)
def test_build_pxe_config_options_ipxe_and_iscsi_boot_from_lists(self):
vol_id = uuidutils.generate_uuid()
vol_id2 = uuidutils.generate_uuid()
object_utils.create_test_volume_connector(
self.context,
uuid=uuidutils.generate_uuid(),
type='iqn',
node_id=self.node.id,
connector_id='fake_iqn_initiator')
object_utils.create_test_volume_target(
self.context, node_id=self.node.id, volume_type='iscsi',
boot_index=0, volume_id='1234', uuid=vol_id,
properties={'target_luns': [0, 2],
'target_portals': ['fake_host:3260',
'faker_host:3261'],
'target_iqns': ['fake_iqn', 'faker_iqn'],
'auth_username': 'fake_username',
'auth_password': 'fake_password'})
object_utils.create_test_volume_target(
self.context, node_id=self.node.id, volume_type='iscsi',
boot_index=1, volume_id='1235', uuid=vol_id2,
properties={'target_lun': [1, 3],
'target_portal': ['fake_host:3260', 'faker_host:3261'],
'target_iqn': ['fake_iqn', 'faker_iqn'],
'auth_username': 'fake_username_1',
'auth_password': 'fake_password_1'})
self.node.driver_internal_info.update({'boot_from_volume': vol_id})
self._test_build_pxe_config_options_ipxe(boot_from_volume=True)
def test_get_volume_pxe_options(self):
vol_id = uuidutils.generate_uuid()
vol_id2 = uuidutils.generate_uuid()
object_utils.create_test_volume_connector(
self.context,
uuid=uuidutils.generate_uuid(),
type='iqn',
node_id=self.node.id,
connector_id='fake_iqn_initiator')
object_utils.create_test_volume_target(
self.context, node_id=self.node.id, volume_type='iscsi',
boot_index=0, volume_id='1234', uuid=vol_id,
properties={'target_lun': [0, 1, 3],
'target_portal': 'fake_host:3260',
'target_iqns': 'fake_iqn',
'auth_username': 'fake_username',
'auth_password': 'fake_password'})
object_utils.create_test_volume_target(
self.context, node_id=self.node.id, volume_type='iscsi',
boot_index=1, volume_id='1235', uuid=vol_id2,
properties={'target_lun': 1,
'target_portal': 'fake_host:3260',
'target_iqn': 'fake_iqn',
'auth_username': 'fake_username_1',
'auth_password': 'fake_password_1'})
self.node.driver_internal_info.update({'boot_from_volume': vol_id})
driver_internal_info = self.node.driver_internal_info
driver_internal_info['boot_from_volume'] = vol_id
self.node.driver_internal_info = driver_internal_info
self.node.save()
expected = {'boot_from_volume': True,
'username': 'fake_username', 'password': 'fake_password',
'iscsi_boot_url': 'iscsi:fake_host::3260:0:fake_iqn',
'iscsi_initiator_iqn': 'fake_iqn_initiator',
'iscsi_volumes': [{
'url': 'iscsi:fake_host::3260:1:fake_iqn',
'username': 'fake_username_1',
'password': 'fake_password_1'
}]
}
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
options = pxe_utils.get_volume_pxe_options(task)
self.assertEqual(expected, options)
def test_get_volume_pxe_options_unsupported_volume_type(self):
vol_id = uuidutils.generate_uuid()
object_utils.create_test_volume_target(
self.context, node_id=self.node.id, volume_type='fake_type',
boot_index=0, volume_id='1234', uuid=vol_id,
properties={'foo': 'bar'})
driver_internal_info = self.node.driver_internal_info
driver_internal_info['boot_from_volume'] = vol_id
self.node.driver_internal_info = driver_internal_info
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
options = pxe_utils.get_volume_pxe_options(task)
self.assertEqual({}, options)
def test_get_volume_pxe_options_unsupported_additional_volume_type(self):
vol_id = uuidutils.generate_uuid()
vol_id2 = uuidutils.generate_uuid()
object_utils.create_test_volume_target(
self.context, node_id=self.node.id, volume_type='iscsi',
boot_index=0, volume_id='1234', uuid=vol_id,
properties={'target_lun': 0,
'target_portal': 'fake_host:3260',
'target_iqn': 'fake_iqn',
'auth_username': 'fake_username',
'auth_password': 'fake_password'})
object_utils.create_test_volume_target(
self.context, node_id=self.node.id, volume_type='fake_type',
boot_index=1, volume_id='1234', uuid=vol_id2,
properties={'foo': 'bar'})
driver_internal_info = self.node.driver_internal_info
driver_internal_info['boot_from_volume'] = vol_id
self.node.driver_internal_info = driver_internal_info
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
options = pxe_utils.get_volume_pxe_options(task)
self.assertEqual([], options['iscsi_volumes'])
def test_build_pxe_config_options_ipxe_rescue(self):
self._test_build_pxe_config_options_ipxe(mode='rescue')
def test_build_pxe_config_options_ipxe_rescue_swift(self):
self._test_build_pxe_config_options_ipxe(mode='rescue',
ipxe_use_swift=True)
def test_build_pxe_config_options_ipxe_rescue_timeout(self):
self._test_build_pxe_config_options_ipxe(mode='rescue',
ipxe_timeout=120)
@mock.patch.object(deploy_utils, 'fetch_images', autospec=True)
def test__cache_tftp_images_master_path(self, mock_fetch_image):
temp_dir = tempfile.mkdtemp()
self.config(tftp_root=temp_dir, group='pxe')
self.config(tftp_master_path=os.path.join(temp_dir,
'tftp_master_path'),
group='pxe')
image_path = os.path.join(temp_dir, self.node.uuid,
'deploy_kernel')
image_info = {'deploy_kernel': ('deploy_kernel', image_path)}
fileutils.ensure_tree(CONF.pxe.tftp_master_path)
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
pxe_utils.cache_ramdisk_kernel(task, image_info)
mock_fetch_image.assert_called_once_with(self.context,
mock.ANY,
[('deploy_kernel',
image_path)],
True)
@mock.patch.object(pxe_utils, 'TFTPImageCache', lambda: None)
@mock.patch.object(fileutils, 'ensure_tree', autospec=True)
@mock.patch.object(deploy_utils, 'fetch_images', autospec=True)
def test_cache_ramdisk_kernel(self, mock_fetch_image, mock_ensure_tree):
self.config(ipxe_enabled=False, group='pxe')
fake_pxe_info = {'foo': 'bar'}
expected_path = os.path.join(CONF.pxe.tftp_root, self.node.uuid)
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
pxe_utils.cache_ramdisk_kernel(task, fake_pxe_info)
mock_ensure_tree.assert_called_with(expected_path)
mock_fetch_image.assert_called_once_with(
self.context, mock.ANY, list(fake_pxe_info.values()), True)
@mock.patch.object(pxe_utils, 'TFTPImageCache', lambda: None)
@mock.patch.object(fileutils, 'ensure_tree', autospec=True)
@mock.patch.object(deploy_utils, 'fetch_images', autospec=True)
def test_cache_ramdisk_kernel_ipxe(self, mock_fetch_image,
mock_ensure_tree):
self.config(ipxe_enabled=True, group='pxe')
fake_pxe_info = {'foo': 'bar'}
expected_path = os.path.join(CONF.deploy.http_root,
self.node.uuid)
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
pxe_utils.cache_ramdisk_kernel(task, fake_pxe_info)
mock_ensure_tree.assert_called_with(expected_path)
mock_fetch_image.assert_called_once_with(self.context, mock.ANY,
list(fake_pxe_info.values()),
True)
@mock.patch.object(pxe_utils.LOG, 'error', autospec=True)
def test_validate_boot_parameters_for_trusted_boot_one(self, mock_log):
properties = {'capabilities': 'boot_mode:uefi'}
instance_info = {"boot_option": "netboot"}
self.node.properties = properties
self.node.instance_info['capabilities'] = instance_info
self.node.driver_internal_info['is_whole_disk_image'] = False
self.assertRaises(exception.InvalidParameterValue,
pxe.validate_boot_parameters_for_trusted_boot,
self.node)
self.assertTrue(mock_log.called)
@mock.patch.object(pxe_utils.LOG, 'error', autospec=True)
def test_validate_boot_parameters_for_trusted_boot_two(self, mock_log):
properties = {'capabilities': 'boot_mode:bios'}
instance_info = {"boot_option": "local"}
self.node.properties = properties
self.node.instance_info['capabilities'] = instance_info
self.node.driver_internal_info['is_whole_disk_image'] = False
self.assertRaises(exception.InvalidParameterValue,
pxe.validate_boot_parameters_for_trusted_boot,
self.node)
self.assertTrue(mock_log.called)
@mock.patch.object(pxe_utils.LOG, 'error', autospec=True)
def test_validate_boot_parameters_for_trusted_boot_three(self, mock_log):
properties = {'capabilities': 'boot_mode:bios'}
instance_info = {"boot_option": "netboot"}
self.node.properties = properties
self.node.instance_info['capabilities'] = instance_info
self.node.driver_internal_info['is_whole_disk_image'] = True
self.assertRaises(exception.InvalidParameterValue,
pxe.validate_boot_parameters_for_trusted_boot,
self.node)
self.assertTrue(mock_log.called)
@mock.patch.object(pxe_utils.LOG, 'error', autospec=True)
def test_validate_boot_parameters_for_trusted_boot_pass(self, mock_log):
properties = {'capabilities': 'boot_mode:bios'}
instance_info = {"boot_option": "netboot"}
self.node.properties = properties
self.node.instance_info['capabilities'] = instance_info
self.node.driver_internal_info['is_whole_disk_image'] = False
pxe.validate_boot_parameters_for_trusted_boot(self.node)
self.assertFalse(mock_log.called)
@mock.patch.object(ironic_utils, 'unlink_without_raise', autospec=True)
@mock.patch.object(pxe_utils, 'clean_up_pxe_config', autospec=True)
@mock.patch.object(pxe_utils, 'TFTPImageCache', autospec=True)
class CleanUpPxeEnvTestCase(db_base.DbTestCase):
def setUp(self):
super(CleanUpPxeEnvTestCase, self).setUp()
instance_info = INST_INFO_DICT
instance_info['deploy_key'] = 'fake-56789'
self.node = object_utils.create_test_node(
self.context, boot_interface='pxe',
instance_info=instance_info,
driver_info=DRV_INFO_DICT,
driver_internal_info=DRV_INTERNAL_INFO_DICT,
)
def test__clean_up_pxe_env(self, mock_cache, mock_pxe_clean,
mock_unlink):
image_info = {'label': ['', 'deploy_kernel']}
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
pxe_utils.clean_up_pxe_env(task, image_info)
mock_pxe_clean.assert_called_once_with(task, ipxe_enabled=False)
mock_unlink.assert_any_call('deploy_kernel')
mock_cache.return_value.clean_up.assert_called_once_with()
class TFTPImageCacheTestCase(db_base.DbTestCase):
@mock.patch.object(fileutils, 'ensure_tree')
def test_with_master_path(self, mock_ensure_tree):
self.config(tftp_master_path='/fake/path', group='pxe')
self.config(image_cache_size=500, group='pxe')
self.config(image_cache_ttl=30, group='pxe')
cache = pxe_utils.TFTPImageCache()
mock_ensure_tree.assert_called_once_with('/fake/path')
self.assertEqual(500 * 1024 * 1024, cache._cache_size)
self.assertEqual(30 * 60, cache._cache_ttl)
@mock.patch.object(fileutils, 'ensure_tree')
def test_without_master_path(self, mock_ensure_tree):
self.config(tftp_master_path='', group='pxe')
self.config(image_cache_size=500, group='pxe')
self.config(image_cache_ttl=30, group='pxe')
cache = pxe_utils.TFTPImageCache()
mock_ensure_tree.assert_not_called()
self.assertEqual(500 * 1024 * 1024, cache._cache_size)
self.assertEqual(30 * 60, cache._cache_ttl)
| 47.399884 | 79 | 0.598335 |
import os
import tempfile
from ironic_lib import utils as ironic_utils
import mock
from oslo_config import cfg
from oslo_utils import fileutils
from oslo_utils import uuidutils
import six
from ironic.common import exception
from ironic.common.glance_service import image_service
from ironic.common import pxe_utils
from ironic.common import states
from ironic.common import utils
from ironic.conductor import task_manager
from ironic.drivers.modules import deploy_utils
from ironic.drivers.modules import ipxe
from ironic.drivers.modules import pxe
from ironic.tests.unit.db import base as db_base
from ironic.tests.unit.db import utils as db_utils
from ironic.tests.unit.objects import utils as object_utils
CONF = cfg.CONF
INST_INFO_DICT = db_utils.get_test_pxe_instance_info()
DRV_INFO_DICT = db_utils.get_test_pxe_driver_info()
DRV_INTERNAL_INFO_DICT = db_utils.get_test_pxe_driver_internal_info()
@mock.patch('ironic.drivers.modules.pxe.PXEBoot.__init__', lambda self: None)
class TestPXEUtils(db_base.DbTestCase):
def setUp(self):
super(TestPXEUtils, self).setUp()
self.pxe_options = {
'deployment_aki_path': u'/tftpboot/1be26c0b-03f2-4d2e-ae87-'
u'c02d7f33c123/deploy_kernel',
'aki_path': u'/tftpboot/1be26c0b-03f2-4d2e-ae87-c02d7f33c123/'
u'kernel',
'ari_path': u'/tftpboot/1be26c0b-03f2-4d2e-ae87-c02d7f33c123/'
u'ramdisk',
'pxe_append_params': 'test_param',
'deployment_ari_path': u'/tftpboot/1be26c0b-03f2-4d2e-ae87-c02d7'
u'f33c123/deploy_ramdisk',
'ipa-api-url': 'http://192.168.122.184:6385',
'ipxe_timeout': 0,
'ramdisk_opts': 'ramdisk_param',
}
self.ipxe_options = self.pxe_options.copy()
self.ipxe_options.update({
'deployment_aki_path': 'http://1.2.3.4:1234/deploy_kernel',
'deployment_ari_path': 'http://1.2.3.4:1234/deploy_ramdisk',
'aki_path': 'http://1.2.3.4:1234/kernel',
'ari_path': 'http://1.2.3.4:1234/ramdisk',
'initrd_filename': 'deploy_ramdisk',
})
self.ipxe_options_timeout = self.ipxe_options.copy()
self.ipxe_options_timeout.update({
'ipxe_timeout': 120
})
self.ipxe_options_boot_from_volume_no_extra_volume = \
self.ipxe_options.copy()
self.ipxe_options_boot_from_volume_no_extra_volume.update({
'boot_from_volume': True,
'iscsi_boot_url': 'iscsi:fake_host::3260:0:fake_iqn',
'iscsi_initiator_iqn': 'fake_iqn',
'iscsi_volumes': [],
'username': 'fake_username',
'password': 'fake_password',
})
self.ipxe_options_boot_from_volume_extra_volume = \
self.ipxe_options.copy()
self.ipxe_options_boot_from_volume_extra_volume.update({
'boot_from_volume': True,
'iscsi_boot_url': 'iscsi:fake_host::3260:0:fake_iqn',
'iscsi_initiator_iqn': 'fake_iqn',
'iscsi_volumes': [{'url': 'iscsi:fake_host::3260:1:fake_iqn',
'username': 'fake_username_1',
'password': 'fake_password_1',
}],
'username': 'fake_username',
'password': 'fake_password',
})
self.ipxe_options_boot_from_volume_no_extra_volume.pop(
'initrd_filename', None)
self.ipxe_options_boot_from_volume_extra_volume.pop(
'initrd_filename', None)
self.node = object_utils.create_test_node(self.context)
def test_default_pxe_config(self):
rendered_template = utils.render_template(
CONF.pxe.pxe_config_template,
{'pxe_options': self.pxe_options,
'ROOT': '{{ ROOT }}',
'DISK_IDENTIFIER': '{{ DISK_IDENTIFIER }}'})
with open('ironic/tests/unit/drivers/pxe_config.template') as f:
expected_template = f.read().rstrip()
self.assertEqual(six.text_type(expected_template), rendered_template)
def test_default_ipxe_boot_script(self):
rendered_template = utils.render_template(
CONF.pxe.ipxe_boot_script,
{'ipxe_for_mac_uri': 'pxelinux.cfg/'})
with open('ironic/tests/unit/drivers/boot.ipxe') as f:
expected_template = f.read().rstrip()
self.assertEqual(six.text_type(expected_template), rendered_template)
def test_default_ipxe_config(self):
self.config(
pxe_config_template='ironic/drivers/modules/ipxe_config.template',
group='pxe'
)
self.config(http_url='http://1.2.3.4:1234', group='deploy')
rendered_template = utils.render_template(
CONF.pxe.pxe_config_template,
{'pxe_options': self.ipxe_options,
'ROOT': '{{ ROOT }}',
'DISK_IDENTIFIER': '{{ DISK_IDENTIFIER }}'})
templ_file = 'ironic/tests/unit/drivers/ipxe_config.template'
with open(templ_file) as f:
expected_template = f.read().rstrip()
self.assertEqual(six.text_type(expected_template), rendered_template)
def test_default_ipxe_timeout_config(self):
self.config(
pxe_config_template='ironic/drivers/modules/ipxe_config.template',
group='pxe'
)
self.config(http_url='http://1.2.3.4:1234', group='deploy')
rendered_template = utils.render_template(
CONF.pxe.pxe_config_template,
{'pxe_options': self.ipxe_options_timeout,
'ROOT': '{{ ROOT }}',
'DISK_IDENTIFIER': '{{ DISK_IDENTIFIER }}'})
templ_file = 'ironic/tests/unit/drivers/ipxe_config_timeout.template'
with open(templ_file) as f:
expected_template = f.read().rstrip()
self.assertEqual(six.text_type(expected_template), rendered_template)
def test_default_ipxe_boot_from_volume_config(self):
self.config(
pxe_config_template='ironic/drivers/modules/ipxe_config.template',
group='pxe'
)
self.config(http_url='http://1.2.3.4:1234', group='deploy')
rendered_template = utils.render_template(
CONF.pxe.pxe_config_template,
{'pxe_options': self.ipxe_options_boot_from_volume_extra_volume,
'ROOT': '{{ ROOT }}',
'DISK_IDENTIFIER': '{{ DISK_IDENTIFIER }}'})
templ_file = 'ironic/tests/unit/drivers/' \
'ipxe_config_boot_from_volume_extra_volume.template'
with open(templ_file) as f:
expected_template = f.read().rstrip()
self.assertEqual(six.text_type(expected_template), rendered_template)
def test_default_ipxe_boot_from_volume_config_no_extra_volumes(self):
self.config(
pxe_config_template='ironic/drivers/modules/ipxe_config.template',
group='pxe'
)
self.config(http_url='http://1.2.3.4:1234', group='deploy')
pxe_options = self.ipxe_options_boot_from_volume_no_extra_volume
pxe_options['iscsi_volumes'] = []
rendered_template = utils.render_template(
CONF.pxe.pxe_config_template,
{'pxe_options': pxe_options,
'ROOT': '{{ ROOT }}',
'DISK_IDENTIFIER': '{{ DISK_IDENTIFIER }}'})
templ_file = 'ironic/tests/unit/drivers/' \
'ipxe_config_boot_from_volume_no_extra_volumes.template'
with open(templ_file) as f:
expected_template = f.read().rstrip()
self.assertEqual(six.text_type(expected_template), rendered_template)
def test_default_grub_config(self):
pxe_opts = self.pxe_options
pxe_opts['boot_mode'] = 'uefi'
pxe_opts['tftp_server'] = '192.0.2.1'
rendered_template = utils.render_template(
CONF.pxe.uefi_pxe_config_template,
{'pxe_options': pxe_opts,
'ROOT': '(( ROOT ))',
'DISK_IDENTIFIER': '(( DISK_IDENTIFIER ))'})
templ_file = 'ironic/tests/unit/drivers/pxe_grub_config.template'
with open(templ_file) as f:
expected_template = f.read().rstrip()
self.assertEqual(six.text_type(expected_template), rendered_template)
@mock.patch('ironic.common.utils.create_link_without_raise', autospec=True)
@mock.patch('ironic_lib.utils.unlink_without_raise', autospec=True)
def test__write_mac_pxe_configs(self, unlink_mock, create_link_mock):
port_1 = object_utils.create_test_port(
self.context, node_id=self.node.id,
address='11:22:33:44:55:66', uuid=uuidutils.generate_uuid())
port_2 = object_utils.create_test_port(
self.context, node_id=self.node.id,
address='11:22:33:44:55:67', uuid=uuidutils.generate_uuid())
create_link_calls = [
mock.call(u'../1be26c0b-03f2-4d2e-ae87-c02d7f33c123/config',
'/tftpboot/pxelinux.cfg/01-11-22-33-44-55-66'),
mock.call(u'1be26c0b-03f2-4d2e-ae87-c02d7f33c123/config',
'/tftpboot/11:22:33:44:55:66.conf'),
mock.call(u'../1be26c0b-03f2-4d2e-ae87-c02d7f33c123/config',
'/tftpboot/pxelinux.cfg/01-11-22-33-44-55-67'),
mock.call(u'1be26c0b-03f2-4d2e-ae87-c02d7f33c123/config',
'/tftpboot/11:22:33:44:55:67.conf')
]
unlink_calls = [
mock.call('/tftpboot/pxelinux.cfg/01-11-22-33-44-55-66'),
mock.call('/tftpboot/11:22:33:44:55:66.conf'),
mock.call('/tftpboot/pxelinux.cfg/01-11-22-33-44-55-67'),
mock.call('/tftpboot/11:22:33:44:55:67.conf')
]
with task_manager.acquire(self.context, self.node.uuid) as task:
task.ports = [port_1, port_2]
pxe_utils._link_mac_pxe_configs(task)
unlink_mock.assert_has_calls(unlink_calls)
create_link_mock.assert_has_calls(create_link_calls)
@mock.patch('ironic.common.utils.create_link_without_raise', autospec=True)
@mock.patch('ironic_lib.utils.unlink_without_raise', autospec=True)
def test__write_infiniband_mac_pxe_configs(
self, unlink_mock, create_link_mock):
client_id1 = (
'20:00:55:04:01:fe:80:00:00:00:00:00:00:00:02:c9:02:00:23:13:92')
port_1 = object_utils.create_test_port(
self.context, node_id=self.node.id,
address='11:22:33:44:55:66', uuid=uuidutils.generate_uuid(),
extra={'client-id': client_id1})
client_id2 = (
'20:00:55:04:01:fe:80:00:00:00:00:00:00:00:02:c9:02:00:23:45:12')
port_2 = object_utils.create_test_port(
self.context, node_id=self.node.id,
address='11:22:33:44:55:67', uuid=uuidutils.generate_uuid(),
extra={'client-id': client_id2})
create_link_calls = [
mock.call(u'../1be26c0b-03f2-4d2e-ae87-c02d7f33c123/config',
'/tftpboot/pxelinux.cfg/20-11-22-33-44-55-66'),
mock.call(u'1be26c0b-03f2-4d2e-ae87-c02d7f33c123/config',
'/tftpboot/11:22:33:44:55:66.conf'),
mock.call(u'../1be26c0b-03f2-4d2e-ae87-c02d7f33c123/config',
'/tftpboot/pxelinux.cfg/20-11-22-33-44-55-67'),
mock.call(u'1be26c0b-03f2-4d2e-ae87-c02d7f33c123/config',
'/tftpboot/11:22:33:44:55:67.conf')
]
unlink_calls = [
mock.call('/tftpboot/pxelinux.cfg/20-11-22-33-44-55-66'),
mock.call('/tftpboot/11:22:33:44:55:66.conf'),
mock.call('/tftpboot/pxelinux.cfg/20-11-22-33-44-55-67'),
mock.call('/tftpboot/11:22:33:44:55:67.conf')
]
with task_manager.acquire(self.context, self.node.uuid) as task:
task.ports = [port_1, port_2]
pxe_utils._link_mac_pxe_configs(task)
unlink_mock.assert_has_calls(unlink_calls)
create_link_mock.assert_has_calls(create_link_calls)
@mock.patch('ironic.common.utils.create_link_without_raise', autospec=True)
@mock.patch('ironic_lib.utils.unlink_without_raise', autospec=True)
def test__write_mac_ipxe_configs(self, unlink_mock, create_link_mock):
self.config(ipxe_enabled=True, group='pxe')
port_1 = object_utils.create_test_port(
self.context, node_id=self.node.id,
address='11:22:33:44:55:66', uuid=uuidutils.generate_uuid())
port_2 = object_utils.create_test_port(
self.context, node_id=self.node.id,
address='11:22:33:44:55:67', uuid=uuidutils.generate_uuid())
create_link_calls = [
mock.call(u'../1be26c0b-03f2-4d2e-ae87-c02d7f33c123/config',
'/httpboot/pxelinux.cfg/11-22-33-44-55-66'),
mock.call(u'1be26c0b-03f2-4d2e-ae87-c02d7f33c123/config',
'/httpboot/11:22:33:44:55:66.conf'),
mock.call(u'../1be26c0b-03f2-4d2e-ae87-c02d7f33c123/config',
'/httpboot/pxelinux.cfg/11-22-33-44-55-67'),
mock.call(u'1be26c0b-03f2-4d2e-ae87-c02d7f33c123/config',
'/httpboot/11:22:33:44:55:67.conf')
]
unlink_calls = [
mock.call('/httpboot/pxelinux.cfg/11-22-33-44-55-66'),
mock.call('/httpboot/11:22:33:44:55:66.conf'),
mock.call('/httpboot/pxelinux.cfg/11-22-33-44-55-67'),
mock.call('/httpboot/11:22:33:44:55:67.conf'),
]
with task_manager.acquire(self.context, self.node.uuid) as task:
task.ports = [port_1, port_2]
pxe_utils._link_mac_pxe_configs(task, ipxe_enabled=True)
unlink_mock.assert_has_calls(unlink_calls)
create_link_mock.assert_has_calls(create_link_calls)
@mock.patch('ironic.common.utils.create_link_without_raise', autospec=True)
@mock.patch('ironic_lib.utils.unlink_without_raise', autospec=True)
@mock.patch('ironic.common.dhcp_factory.DHCPFactory.provider',
autospec=True)
def test__link_ip_address_pxe_configs(self, provider_mock, unlink_mock,
create_link_mock):
ip_address = '10.10.0.1'
address = "aa:aa:aa:aa:aa:aa"
object_utils.create_test_port(self.context, node_id=self.node.id,
address=address)
provider_mock.get_ip_addresses.return_value = [ip_address]
create_link_calls = [
mock.call(u'1be26c0b-03f2-4d2e-ae87-c02d7f33c123/config',
u'/tftpboot/10.10.0.1.conf'),
]
with task_manager.acquire(self.context, self.node.uuid) as task:
pxe_utils._link_ip_address_pxe_configs(task, False)
unlink_mock.assert_called_once_with('/tftpboot/10.10.0.1.conf')
create_link_mock.assert_has_calls(create_link_calls)
@mock.patch.object(os, 'chmod', autospec=True)
@mock.patch('ironic.common.utils.write_to_file', autospec=True)
@mock.patch('ironic.common.utils.render_template', autospec=True)
@mock.patch('oslo_utils.fileutils.ensure_tree', autospec=True)
def test_create_pxe_config(self, ensure_tree_mock, render_mock,
write_mock, chmod_mock):
with task_manager.acquire(self.context, self.node.uuid) as task:
pxe_utils.create_pxe_config(task, self.pxe_options,
CONF.pxe.pxe_config_template)
render_mock.assert_called_with(
CONF.pxe.pxe_config_template,
{'pxe_options': self.pxe_options,
'ROOT': '{{ ROOT }}',
'DISK_IDENTIFIER': '{{ DISK_IDENTIFIER }}'}
)
node_dir = os.path.join(CONF.pxe.tftp_root, self.node.uuid)
pxe_dir = os.path.join(CONF.pxe.tftp_root, 'pxelinux.cfg')
ensure_calls = [
mock.call(node_dir), mock.call(pxe_dir),
]
ensure_tree_mock.assert_has_calls(ensure_calls)
chmod_mock.assert_not_called()
pxe_cfg_file_path = pxe_utils.get_pxe_config_file_path(self.node.uuid)
write_mock.assert_called_with(pxe_cfg_file_path,
render_mock.return_value)
@mock.patch.object(os, 'chmod', autospec=True)
@mock.patch('ironic.common.utils.write_to_file', autospec=True)
@mock.patch('ironic.common.utils.render_template', autospec=True)
@mock.patch('oslo_utils.fileutils.ensure_tree', autospec=True)
def test_create_pxe_config_set_dir_permission(self, ensure_tree_mock,
render_mock,
write_mock, chmod_mock):
self.config(dir_permission=0o755, group='pxe')
with task_manager.acquire(self.context, self.node.uuid) as task:
pxe_utils.create_pxe_config(task, self.pxe_options,
CONF.pxe.pxe_config_template)
render_mock.assert_called_with(
CONF.pxe.pxe_config_template,
{'pxe_options': self.pxe_options,
'ROOT': '{{ ROOT }}',
'DISK_IDENTIFIER': '{{ DISK_IDENTIFIER }}'}
)
node_dir = os.path.join(CONF.pxe.tftp_root, self.node.uuid)
pxe_dir = os.path.join(CONF.pxe.tftp_root, 'pxelinux.cfg')
ensure_calls = [
mock.call(node_dir), mock.call(pxe_dir),
]
ensure_tree_mock.assert_has_calls(ensure_calls)
chmod_calls = [mock.call(node_dir, 0o755), mock.call(pxe_dir, 0o755)]
chmod_mock.assert_has_calls(chmod_calls)
pxe_cfg_file_path = pxe_utils.get_pxe_config_file_path(self.node.uuid)
write_mock.assert_called_with(pxe_cfg_file_path,
render_mock.return_value)
@mock.patch.object(os.path, 'isdir', autospec=True)
@mock.patch.object(os, 'chmod', autospec=True)
@mock.patch('ironic.common.utils.write_to_file', autospec=True)
@mock.patch('ironic.common.utils.render_template', autospec=True)
@mock.patch('oslo_utils.fileutils.ensure_tree', autospec=True)
def test_create_pxe_config_existing_dirs(self, ensure_tree_mock,
render_mock,
write_mock, chmod_mock,
isdir_mock):
self.config(dir_permission=0o755, group='pxe')
with task_manager.acquire(self.context, self.node.uuid) as task:
isdir_mock.return_value = True
pxe_utils.create_pxe_config(task, self.pxe_options,
CONF.pxe.pxe_config_template)
render_mock.assert_called_with(
CONF.pxe.pxe_config_template,
{'pxe_options': self.pxe_options,
'ROOT': '{{ ROOT }}',
'DISK_IDENTIFIER': '{{ DISK_IDENTIFIER }}'}
)
ensure_tree_mock.assert_has_calls([])
chmod_mock.assert_not_called()
isdir_mock.assert_has_calls([])
pxe_cfg_file_path = pxe_utils.get_pxe_config_file_path(self.node.uuid)
write_mock.assert_called_with(pxe_cfg_file_path,
render_mock.return_value)
@mock.patch.object(os, 'chmod', autospec=True)
@mock.patch('ironic.common.pxe_utils._link_ip_address_pxe_configs',
autospec=True)
@mock.patch('ironic.common.utils.write_to_file', autospec=True)
@mock.patch('ironic.common.utils.render_template', autospec=True)
@mock.patch('oslo_utils.fileutils.ensure_tree', autospec=True)
def test_create_pxe_config_uefi_grub(self, ensure_tree_mock, render_mock,
write_mock, link_ip_configs_mock,
chmod_mock):
grub_tmplte = "ironic/drivers/modules/pxe_grub_config.template"
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node.properties['capabilities'] = 'boot_mode:uefi'
pxe_utils.create_pxe_config(task, self.pxe_options,
grub_tmplte)
ensure_calls = [
mock.call(os.path.join(CONF.pxe.tftp_root, self.node.uuid)),
mock.call(os.path.join(CONF.pxe.tftp_root, 'pxelinux.cfg')),
]
ensure_tree_mock.assert_has_calls(ensure_calls)
chmod_mock.assert_not_called()
render_mock.assert_called_with(
grub_tmplte,
{'pxe_options': self.pxe_options,
'ROOT': '(( ROOT ))',
'DISK_IDENTIFIER': '(( DISK_IDENTIFIER ))'})
link_ip_configs_mock.assert_called_once_with(task, False)
pxe_cfg_file_path = pxe_utils.get_pxe_config_file_path(self.node.uuid)
write_mock.assert_called_with(pxe_cfg_file_path,
render_mock.return_value)
@mock.patch.object(os, 'chmod', autospec=True)
@mock.patch('ironic.common.pxe_utils._link_mac_pxe_configs',
autospec=True)
@mock.patch('ironic.common.pxe_utils._link_ip_address_pxe_configs',
autospec=True)
@mock.patch('ironic.common.utils.write_to_file', autospec=True)
@mock.patch('ironic.common.utils.render_template', autospec=True)
@mock.patch('oslo_utils.fileutils.ensure_tree', autospec=True)
def test_create_pxe_config_uefi_mac_address(
self, ensure_tree_mock, render_mock,
write_mock, link_ip_configs_mock,
link_mac_pxe_configs_mock, chmod_mock):
grub_tmplte = "ironic/drivers/modules/pxe_grub_config.template"
link_ip_configs_mock.side_efect = exception.FailedToGetIPAddressOnPort(
port_id='blah')
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node.properties['capabilities'] = 'boot_mode:uefi'
pxe_utils.create_pxe_config(task, self.pxe_options,
grub_tmplte)
ensure_calls = [
mock.call(os.path.join(CONF.pxe.tftp_root, self.node.uuid)),
mock.call(os.path.join(CONF.pxe.tftp_root, 'pxelinux.cfg')),
]
ensure_tree_mock.assert_has_calls(ensure_calls)
chmod_mock.assert_not_called()
render_mock.assert_called_with(
grub_tmplte,
{'pxe_options': self.pxe_options,
'ROOT': '(( ROOT ))',
'DISK_IDENTIFIER': '(( DISK_IDENTIFIER ))'})
link_mac_pxe_configs_mock.assert_called_once_with(
task, ipxe_enabled=False)
link_ip_configs_mock.assert_called_once_with(task, False)
pxe_cfg_file_path = pxe_utils.get_pxe_config_file_path(self.node.uuid)
write_mock.assert_called_with(pxe_cfg_file_path,
render_mock.return_value)
@mock.patch.object(os, 'chmod', autospec=True)
@mock.patch('ironic.common.pxe_utils._link_mac_pxe_configs', autospec=True)
@mock.patch('ironic.common.utils.write_to_file', autospec=True)
@mock.patch('ironic.common.utils.render_template', autospec=True)
@mock.patch('oslo_utils.fileutils.ensure_tree', autospec=True)
def test_create_pxe_config_uefi_ipxe(self, ensure_tree_mock, render_mock,
write_mock, link_mac_pxe_mock,
chmod_mock):
self.config(ipxe_enabled=True, group='pxe')
ipxe_template = "ironic/drivers/modules/ipxe_config.template"
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node.properties['capabilities'] = 'boot_mode:uefi'
pxe_utils.create_pxe_config(task, self.ipxe_options,
ipxe_template, ipxe_enabled=True)
ensure_calls = [
mock.call(os.path.join(CONF.deploy.http_root, self.node.uuid)),
mock.call(os.path.join(CONF.deploy.http_root, 'pxelinux.cfg')),
]
ensure_tree_mock.assert_has_calls(ensure_calls)
chmod_mock.assert_not_called()
render_mock.assert_called_with(
ipxe_template,
{'pxe_options': self.ipxe_options,
'ROOT': '{{ ROOT }}',
'DISK_IDENTIFIER': '{{ DISK_IDENTIFIER }}'})
link_mac_pxe_mock.assert_called_once_with(task, ipxe_enabled=True)
pxe_cfg_file_path = pxe_utils.get_pxe_config_file_path(
self.node.uuid, ipxe_enabled=True)
write_mock.assert_called_with(pxe_cfg_file_path,
render_mock.return_value)
@mock.patch('ironic.common.utils.rmtree_without_raise', autospec=True)
@mock.patch('ironic_lib.utils.unlink_without_raise', autospec=True)
def test_clean_up_pxe_config(self, unlink_mock, rmtree_mock):
address = "aa:aa:aa:aa:aa:aa"
object_utils.create_test_port(self.context, node_id=self.node.id,
address=address)
with task_manager.acquire(self.context, self.node.uuid) as task:
pxe_utils.clean_up_pxe_config(task)
ensure_calls = [
mock.call("/tftpboot/pxelinux.cfg/01-%s"
% address.replace(':', '-')),
mock.call("/tftpboot/%s.conf" % address)
]
unlink_mock.assert_has_calls(ensure_calls)
rmtree_mock.assert_called_once_with(
os.path.join(CONF.pxe.tftp_root, self.node.uuid))
@mock.patch.object(os.path, 'isfile', lambda path: False)
@mock.patch('ironic.common.utils.file_has_content', autospec=True)
@mock.patch('ironic.common.utils.write_to_file', autospec=True)
@mock.patch('ironic.common.utils.render_template', autospec=True)
def test_create_ipxe_boot_script(self, render_mock, write_mock,
file_has_content_mock):
render_mock.return_value = 'foo'
pxe_utils.create_ipxe_boot_script()
self.assertFalse(file_has_content_mock.called)
write_mock.assert_called_once_with(
os.path.join(CONF.deploy.http_root,
os.path.basename(CONF.pxe.ipxe_boot_script)),
'foo')
render_mock.assert_called_once_with(
CONF.pxe.ipxe_boot_script,
{'ipxe_for_mac_uri': 'pxelinux.cfg/'})
@mock.patch.object(os.path, 'isfile', lambda path: True)
@mock.patch('ironic.common.utils.file_has_content', autospec=True)
@mock.patch('ironic.common.utils.write_to_file', autospec=True)
@mock.patch('ironic.common.utils.render_template', autospec=True)
def test_create_ipxe_boot_script_copy_file_different(
self, render_mock, write_mock, file_has_content_mock):
file_has_content_mock.return_value = False
render_mock.return_value = 'foo'
pxe_utils.create_ipxe_boot_script()
file_has_content_mock.assert_called_once_with(
os.path.join(CONF.deploy.http_root,
os.path.basename(CONF.pxe.ipxe_boot_script)),
'foo')
write_mock.assert_called_once_with(
os.path.join(CONF.deploy.http_root,
os.path.basename(CONF.pxe.ipxe_boot_script)),
'foo')
render_mock.assert_called_once_with(
CONF.pxe.ipxe_boot_script,
{'ipxe_for_mac_uri': 'pxelinux.cfg/'})
@mock.patch.object(os.path, 'isfile', lambda path: True)
@mock.patch('ironic.common.utils.file_has_content', autospec=True)
@mock.patch('ironic.common.utils.write_to_file', autospec=True)
@mock.patch('ironic.common.utils.render_template', autospec=True)
def test_create_ipxe_boot_script_already_exists(self, render_mock,
write_mock,
file_has_content_mock):
file_has_content_mock.return_value = True
pxe_utils.create_ipxe_boot_script()
self.assertFalse(write_mock.called)
def test__get_pxe_mac_path(self):
mac = '00:11:22:33:44:55:66'
self.assertEqual('/tftpboot/pxelinux.cfg/01-00-11-22-33-44-55-66',
pxe_utils._get_pxe_mac_path(mac))
def test__get_pxe_mac_path_ipxe(self):
self.config(http_root='/httpboot', group='deploy')
mac = '00:11:22:33:AA:BB:CC'
self.assertEqual('/httpboot/pxelinux.cfg/00-11-22-33-aa-bb-cc',
pxe_utils._get_pxe_mac_path(mac, ipxe_enabled=True))
def test__get_pxe_ip_address_path(self):
ipaddress = '10.10.0.1'
self.assertEqual('/tftpboot/10.10.0.1.conf',
pxe_utils._get_pxe_ip_address_path(ipaddress))
def test_get_root_dir(self):
expected_dir = '/tftproot'
self.config(ipxe_enabled=False, group='pxe')
self.config(tftp_root=expected_dir, group='pxe')
self.assertEqual(expected_dir, pxe_utils.get_root_dir())
def test_get_root_dir_ipxe(self):
expected_dir = '/httpboot'
self.config(ipxe_enabled=True, group='pxe')
self.config(http_root=expected_dir, group='deploy')
self.assertEqual(expected_dir, pxe_utils.get_root_dir())
def test_get_pxe_config_file_path(self):
self.assertEqual(os.path.join(CONF.pxe.tftp_root,
self.node.uuid,
'config'),
pxe_utils.get_pxe_config_file_path(self.node.uuid))
def _dhcp_options_for_instance(self, ip_version=4):
self.config(ip_version=ip_version, group='pxe')
if ip_version == 4:
self.config(tftp_server='192.0.2.1', group='pxe')
elif ip_version == 6:
self.config(tftp_server='ff80::1', group='pxe')
self.config(pxe_bootfile_name='fake-bootfile', group='pxe')
self.config(tftp_root='/tftp-path/', group='pxe')
if ip_version == 6:
expected_info = [{'opt_name': '59',
'opt_value': 'tftp://[ff80::1]/fake-bootfile',
'ip_version': ip_version}]
elif ip_version == 4:
expected_info = [{'opt_name': '67',
'opt_value': 'fake-bootfile',
'ip_version': ip_version},
{'opt_name': '210',
'opt_value': '/tftp-path/',
'ip_version': ip_version},
{'opt_name': '66',
'opt_value': '192.0.2.1',
'ip_version': ip_version},
{'opt_name': '150',
'opt_value': '192.0.2.1',
'ip_version': ip_version},
{'opt_name': 'server-ip-address',
'opt_value': '192.0.2.1',
'ip_version': ip_version}
]
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertEqual(expected_info,
pxe_utils.dhcp_options_for_instance(task))
def test_dhcp_options_for_instance(self):
self._dhcp_options_for_instance(ip_version=4)
def test_dhcp_options_for_instance_ipv6(self):
self.config(tftp_server='ff80::1', group='pxe')
self._dhcp_options_for_instance(ip_version=6)
def _test_get_kernel_ramdisk_info(self, expected_dir, mode='deploy'):
node_uuid = 'fake-node'
driver_info = {
'%s_kernel' % mode: 'glance://%s-kernel' % mode,
'%s_ramdisk' % mode: 'glance://%s-ramdisk' % mode,
}
expected = {}
for k, v in driver_info.items():
expected[k] = (v, expected_dir + '/fake-node/%s' % k)
kr_info = pxe_utils.get_kernel_ramdisk_info(node_uuid,
driver_info,
mode=mode)
self.assertEqual(expected, kr_info)
def test_get_kernel_ramdisk_info(self):
expected_dir = '/tftp'
self.config(tftp_root=expected_dir, group='pxe')
self._test_get_kernel_ramdisk_info(expected_dir)
def test_get_kernel_ramdisk_info_ipxe(self):
expected_dir = '/http'
self.config(ipxe_enabled=True, group='pxe')
self.config(http_root=expected_dir, group='deploy')
self._test_get_kernel_ramdisk_info(expected_dir)
def test_get_kernel_ramdisk_info_bad_driver_info(self):
self.config(tftp_root='/tftp', group='pxe')
node_uuid = 'fake-node'
driver_info = {}
self.assertRaises(KeyError,
pxe_utils.get_kernel_ramdisk_info,
node_uuid,
driver_info)
def test_get_rescue_kr_info(self):
expected_dir = '/tftp'
self.config(tftp_root=expected_dir, group='pxe')
self._test_get_kernel_ramdisk_info(expected_dir, mode='rescue')
def test_get_rescue_kr_info_ipxe(self):
expected_dir = '/http'
self.config(ipxe_enabled=True, group='pxe')
self.config(http_root=expected_dir, group='deploy')
self._test_get_kernel_ramdisk_info(expected_dir, mode='rescue')
def _dhcp_options_for_instance_ipxe(self, task, boot_file, ip_version=4):
self.config(ipxe_enabled=True, group='pxe')
self.config(ipxe_boot_script='/test/boot.ipxe', group='pxe')
self.config(tftp_root='/tftp-path/', group='pxe')
if ip_version == 4:
self.config(tftp_server='192.0.2.1', group='pxe')
self.config(http_url='http://192.0.3.2:1234', group='deploy')
self.config(ipxe_boot_script='/test/boot.ipxe', group='pxe')
elif ip_version == 6:
self.config(tftp_server='ff80::1', group='pxe')
self.config(http_url='http://[ff80::1]:1234', group='deploy')
self.config(dhcp_provider='isc', group='dhcp')
if ip_version == 6:
expected_boot_script_url = 'http://[ff80::1]:1234/boot.ipxe'
expected_info = [{'opt_name': '!175,59',
'opt_value': 'tftp://[ff80::1]/fake-bootfile',
'ip_version': ip_version},
{'opt_name': '59',
'opt_value': expected_boot_script_url,
'ip_version': ip_version}]
elif ip_version == 4:
expected_boot_script_url = 'http://192.0.3.2:1234/boot.ipxe'
expected_info = [{'opt_name': '!175,67',
'opt_value': boot_file,
'ip_version': ip_version},
{'opt_name': '66',
'opt_value': '192.0.2.1',
'ip_version': ip_version},
{'opt_name': '150',
'opt_value': '192.0.2.1',
'ip_version': ip_version},
{'opt_name': '67',
'opt_value': expected_boot_script_url,
'ip_version': ip_version},
{'opt_name': 'server-ip-address',
'opt_value': '192.0.2.1',
'ip_version': ip_version}]
self.assertItemsEqual(expected_info,
pxe_utils.dhcp_options_for_instance(
task, ipxe_enabled=True))
self.config(dhcp_provider='neutron', group='dhcp')
if ip_version == 6:
expected_info = [{'opt_name': 'tag:!ipxe6,59',
'opt_value': 'tftp://[ff80::1]/fake-bootfile',
'ip_version': ip_version},
{'opt_name': 'tag:ipxe6,59',
'opt_value': expected_boot_script_url,
'ip_version': ip_version}]
elif ip_version == 4:
expected_info = [{'opt_name': 'tag:!ipxe,67',
'opt_value': boot_file,
'ip_version': ip_version},
{'opt_name': '66',
'opt_value': '192.0.2.1',
'ip_version': ip_version},
{'opt_name': '150',
'opt_value': '192.0.2.1',
'ip_version': ip_version},
{'opt_name': 'tag:ipxe,67',
'opt_value': expected_boot_script_url,
'ip_version': ip_version},
{'opt_name': 'server-ip-address',
'opt_value': '192.0.2.1',
'ip_version': ip_version}]
self.assertItemsEqual(expected_info,
pxe_utils.dhcp_options_for_instance(
task, ipxe_enabled=True))
def test_dhcp_options_for_instance_ipxe_bios(self):
self.config(ip_version=4, group='pxe')
boot_file = 'fake-bootfile-bios'
self.config(pxe_bootfile_name=boot_file, group='pxe')
with task_manager.acquire(self.context, self.node.uuid) as task:
self._dhcp_options_for_instance_ipxe(task, boot_file)
def test_dhcp_options_for_instance_ipxe_uefi(self):
self.config(ip_version=4, group='pxe')
boot_file = 'fake-bootfile-uefi'
self.config(uefi_pxe_bootfile_name=boot_file, group='pxe')
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node.properties['capabilities'] = 'boot_mode:uefi'
self._dhcp_options_for_instance_ipxe(task, boot_file)
def test_dhcp_options_for_ipxe_ipv6(self):
self.config(ip_version=6, group='pxe')
boot_file = 'fake-bootfile'
self.config(pxe_bootfile_name=boot_file, group='pxe')
with task_manager.acquire(self.context, self.node.uuid) as task:
self._dhcp_options_for_instance_ipxe(task, boot_file, ip_version=6)
@mock.patch('ironic.common.utils.rmtree_without_raise', autospec=True)
@mock.patch('ironic_lib.utils.unlink_without_raise', autospec=True)
@mock.patch('ironic.common.dhcp_factory.DHCPFactory.provider',
autospec=True)
def test_clean_up_pxe_config_uefi(self, provider_mock, unlink_mock,
rmtree_mock):
ip_address = '10.10.0.1'
address = "aa:aa:aa:aa:aa:aa"
properties = {'capabilities': 'boot_mode:uefi'}
object_utils.create_test_port(self.context, node_id=self.node.id,
address=address)
provider_mock.get_ip_addresses.return_value = [ip_address]
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node.properties = properties
pxe_utils.clean_up_pxe_config(task)
unlink_calls = [
mock.call('/tftpboot/10.10.0.1.conf'),
mock.call('/tftpboot/pxelinux.cfg/01-aa-aa-aa-aa-aa-aa'),
mock.call('/tftpboot/' + address + '.conf')
]
unlink_mock.assert_has_calls(unlink_calls)
rmtree_mock.assert_called_once_with(
os.path.join(CONF.pxe.tftp_root, self.node.uuid))
@mock.patch('ironic.common.utils.rmtree_without_raise', autospec=True)
@mock.patch('ironic_lib.utils.unlink_without_raise', autospec=True)
@mock.patch('ironic.common.dhcp_factory.DHCPFactory.provider',
autospec=True)
def test_clean_up_pxe_config_uefi_mac_address(
self, provider_mock, unlink_mock, rmtree_mock):
ip_address = '10.10.0.1'
address = "aa:aa:aa:aa:aa:aa"
properties = {'capabilities': 'boot_mode:uefi'}
object_utils.create_test_port(self.context, node_id=self.node.id,
address=address)
provider_mock.get_ip_addresses.return_value = [ip_address]
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node.properties = properties
pxe_utils.clean_up_pxe_config(task)
unlink_calls = [
mock.call('/tftpboot/10.10.0.1.conf'),
mock.call('/tftpboot/pxelinux.cfg/01-%s' %
address.replace(':', '-')),
mock.call('/tftpboot/' + address + '.conf')
]
unlink_mock.assert_has_calls(unlink_calls)
rmtree_mock.assert_called_once_with(
os.path.join(CONF.pxe.tftp_root, self.node.uuid))
@mock.patch('ironic.common.utils.rmtree_without_raise', autospec=True)
@mock.patch('ironic_lib.utils.unlink_without_raise', autospec=True)
@mock.patch('ironic.common.dhcp_factory.DHCPFactory.provider',
autospec=True)
def test_clean_up_pxe_config_uefi_instance_info(self,
provider_mock, unlink_mock,
rmtree_mock):
ip_address = '10.10.0.1'
address = "aa:aa:aa:aa:aa:aa"
object_utils.create_test_port(self.context, node_id=self.node.id,
address=address)
provider_mock.get_ip_addresses.return_value = [ip_address]
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node.instance_info['deploy_boot_mode'] = 'uefi'
pxe_utils.clean_up_pxe_config(task)
unlink_calls = [
mock.call('/tftpboot/10.10.0.1.conf'),
mock.call('/tftpboot/pxelinux.cfg/01-aa-aa-aa-aa-aa-aa'),
mock.call('/tftpboot/' + address + ".conf")
]
unlink_mock.assert_has_calls(unlink_calls)
rmtree_mock.assert_called_once_with(
os.path.join(CONF.pxe.tftp_root, self.node.uuid))
@mock.patch('ironic.common.utils.rmtree_without_raise', autospec=True)
@mock.patch('ironic_lib.utils.unlink_without_raise', autospec=True)
def test_clean_up_ipxe_config_uefi(self, unlink_mock, rmtree_mock):
self.config(ipxe_enabled=True, group='pxe')
address = "aa:aa:aa:aa:aa:aa"
properties = {'capabilities': 'boot_mode:uefi'}
object_utils.create_test_port(self.context, node_id=self.node.id,
address=address)
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node.properties = properties
pxe_utils.clean_up_pxe_config(task, ipxe_enabled=True)
ensure_calls = [
mock.call("/httpboot/pxelinux.cfg/%s"
% address.replace(':', '-')),
mock.call("/httpboot/%s.conf" % address)
]
unlink_mock.assert_has_calls(ensure_calls)
rmtree_mock.assert_called_once_with(
os.path.join(CONF.deploy.http_root, self.node.uuid))
def test_get_tftp_path_prefix_with_trailing_slash(self):
self.config(tftp_root='/tftpboot-path/', group='pxe')
path_prefix = pxe_utils.get_tftp_path_prefix()
self.assertEqual(path_prefix, '/tftpboot-path/')
def test_get_tftp_path_prefix_without_trailing_slash(self):
self.config(tftp_root='/tftpboot-path', group='pxe')
path_prefix = pxe_utils.get_tftp_path_prefix()
self.assertEqual(path_prefix, '/tftpboot-path/')
def test_get_path_relative_to_tftp_root_with_trailing_slash(self):
self.config(tftp_root='/tftpboot-path/', group='pxe')
test_file_path = '/tftpboot-path/pxelinux.cfg/test'
relpath = pxe_utils.get_path_relative_to_tftp_root(test_file_path)
self.assertEqual(relpath, 'pxelinux.cfg/test')
def test_get_path_relative_to_tftp_root_without_trailing_slash(self):
self.config(tftp_root='/tftpboot-path', group='pxe')
test_file_path = '/tftpboot-path/pxelinux.cfg/test'
relpath = pxe_utils.get_path_relative_to_tftp_root(test_file_path)
self.assertEqual(relpath, 'pxelinux.cfg/test')
@mock.patch.object(ipxe.iPXEBoot, '__init__', lambda self: None)
@mock.patch.object(pxe.PXEBoot, '__init__', lambda self: None)
class PXEInterfacesTestCase(db_base.DbTestCase):
def setUp(self):
super(PXEInterfacesTestCase, self).setUp()
n = {
'driver': 'fake-hardware',
'boot_interface': 'pxe',
'instance_info': INST_INFO_DICT,
'driver_info': DRV_INFO_DICT,
'driver_internal_info': DRV_INTERNAL_INFO_DICT,
}
self.config_temp_dir('http_root', group='deploy')
self.node = object_utils.create_test_node(self.context, **n)
def _test_parse_driver_info_missing_kernel(self, mode='deploy'):
del self.node.driver_info['%s_kernel' % mode]
if mode == 'rescue':
self.node.provision_state = states.RESCUING
self.assertRaises(exception.MissingParameterValue,
pxe_utils.parse_driver_info, self.node, mode=mode)
def test_parse_driver_info_missing_deploy_kernel(self):
self._test_parse_driver_info_missing_kernel()
def test_parse_driver_info_missing_rescue_kernel(self):
self._test_parse_driver_info_missing_kernel(mode='rescue')
def _test_parse_driver_info_missing_ramdisk(self, mode='deploy'):
del self.node.driver_info['%s_ramdisk' % mode]
if mode == 'rescue':
self.node.provision_state = states.RESCUING
self.assertRaises(exception.MissingParameterValue,
pxe_utils.parse_driver_info, self.node, mode=mode)
def test_parse_driver_info_missing_deploy_ramdisk(self):
self._test_parse_driver_info_missing_ramdisk()
def test_parse_driver_info_missing_rescue_ramdisk(self):
self._test_parse_driver_info_missing_ramdisk(mode='rescue')
def _test_parse_driver_info(self, mode='deploy'):
exp_info = {'%s_ramdisk' % mode: 'glance://%s_ramdisk_uuid' % mode,
'%s_kernel' % mode: 'glance://%s_kernel_uuid' % mode}
image_info = pxe_utils.parse_driver_info(self.node, mode=mode)
self.assertEqual(exp_info, image_info)
def test_parse_driver_info_deploy(self):
self._test_parse_driver_info()
def test_parse_driver_info_rescue(self):
self._test_parse_driver_info(mode='rescue')
def test__get_deploy_image_info(self):
expected_info = {'deploy_ramdisk':
(DRV_INFO_DICT['deploy_ramdisk'],
os.path.join(CONF.pxe.tftp_root,
self.node.uuid,
'deploy_ramdisk')),
'deploy_kernel':
(DRV_INFO_DICT['deploy_kernel'],
os.path.join(CONF.pxe.tftp_root,
self.node.uuid,
'deploy_kernel'))}
image_info = pxe_utils.get_image_info(self.node)
self.assertEqual(expected_info, image_info)
def test__get_deploy_image_info_ipxe(self):
expected_info = {'deploy_ramdisk':
(DRV_INFO_DICT['deploy_ramdisk'],
os.path.join(CONF.deploy.http_root,
self.node.uuid,
'deploy_ramdisk')),
'deploy_kernel':
(DRV_INFO_DICT['deploy_kernel'],
os.path.join(CONF.deploy.http_root,
self.node.uuid,
'deploy_kernel'))}
image_info = pxe_utils.get_image_info(self.node, ipxe_enabled=True)
self.assertEqual(expected_info, image_info)
def test__get_deploy_image_info_missing_deploy_kernel(self):
del self.node.driver_info['deploy_kernel']
self.assertRaises(exception.MissingParameterValue,
pxe_utils.get_image_info, self.node)
def test__get_deploy_image_info_deploy_ramdisk(self):
del self.node.driver_info['deploy_ramdisk']
self.assertRaises(exception.MissingParameterValue,
pxe_utils.get_image_info, self.node)
@mock.patch.object(image_service.GlanceImageService, 'show', autospec=True)
def _test_get_instance_image_info(self, show_mock):
properties = {'properties': {u'kernel_id': u'instance_kernel_uuid',
u'ramdisk_id': u'instance_ramdisk_uuid'}}
expected_info = {'ramdisk':
('instance_ramdisk_uuid',
os.path.join(CONF.pxe.tftp_root,
self.node.uuid,
'ramdisk')),
'kernel':
('instance_kernel_uuid',
os.path.join(CONF.pxe.tftp_root,
self.node.uuid,
'kernel'))}
show_mock.return_value = properties
self.context.auth_token = 'fake'
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
image_info = pxe_utils.get_instance_image_info(task)
show_mock.assert_called_once_with(mock.ANY, 'glance://image_uuid')
self.assertEqual(expected_info, image_info)
show_mock.reset_mock()
image_info = pxe_utils.get_instance_image_info(task)
self.assertEqual(expected_info, image_info)
self.assertFalse(show_mock.called)
self.assertEqual('instance_kernel_uuid',
task.node.instance_info['kernel'])
self.assertEqual('instance_ramdisk_uuid',
task.node.instance_info['ramdisk'])
def test_get_instance_image_info(self):
self._test_get_instance_image_info()
def test_get_instance_image_info_without_is_whole_disk_image(self):
# driver_internal_info
del self.node.driver_internal_info['is_whole_disk_image']
self.node.save()
self._test_get_instance_image_info()
@mock.patch('ironic.drivers.modules.deploy_utils.get_boot_option',
return_value='local')
def test_get_instance_image_info_localboot(self, boot_opt_mock):
self.node.driver_internal_info['is_whole_disk_image'] = False
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
image_info = pxe_utils.get_instance_image_info(task)
self.assertEqual({}, image_info)
boot_opt_mock.assert_called_once_with(task.node)
@mock.patch.object(image_service.GlanceImageService, 'show', autospec=True)
def test_get_instance_image_info_whole_disk_image(self, show_mock):
properties = {'properties': None}
show_mock.return_value = properties
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.node.driver_internal_info['is_whole_disk_image'] = True
image_info = pxe_utils.get_instance_image_info(task)
self.assertEqual({}, image_info)
@mock.patch('ironic.common.utils.render_template', autospec=True)
def _test_build_pxe_config_options_pxe(self, render_mock,
whle_dsk_img=False,
debug=False, mode='deploy'):
self.config(debug=debug)
self.config(pxe_append_params='test_param', group='pxe')
# NOTE: right '/' should be removed from url string
self.config(api_url='http://192.168.122.184:6385', group='conductor')
driver_internal_info = self.node.driver_internal_info
driver_internal_info['is_whole_disk_image'] = whle_dsk_img
self.node.driver_internal_info = driver_internal_info
self.node.save()
tftp_server = CONF.pxe.tftp_server
kernel_label = '%s_kernel' % mode
ramdisk_label = '%s_ramdisk' % mode
pxe_kernel = os.path.join(self.node.uuid, kernel_label)
pxe_ramdisk = os.path.join(self.node.uuid, ramdisk_label)
kernel = os.path.join(self.node.uuid, 'kernel')
ramdisk = os.path.join(self.node.uuid, 'ramdisk')
root_dir = CONF.pxe.tftp_root
image_info = {
kernel_label: (kernel_label,
os.path.join(root_dir,
self.node.uuid,
kernel_label)),
ramdisk_label: (ramdisk_label,
os.path.join(root_dir,
self.node.uuid,
ramdisk_label))
}
if (whle_dsk_img
or deploy_utils.get_boot_option(self.node) == 'local'):
ramdisk = 'no_ramdisk'
kernel = 'no_kernel'
else:
image_info.update({
'kernel': ('kernel_id',
os.path.join(root_dir,
self.node.uuid,
'kernel')),
'ramdisk': ('ramdisk_id',
os.path.join(root_dir,
self.node.uuid,
'ramdisk'))
})
expected_pxe_params = 'test_param'
if debug:
expected_pxe_params += ' ipa-debug=1'
expected_options = {
'deployment_ari_path': pxe_ramdisk,
'pxe_append_params': expected_pxe_params,
'deployment_aki_path': pxe_kernel,
'tftp_server': tftp_server,
'ipxe_timeout': 0,
'ari_path': ramdisk,
'aki_path': kernel,
}
if mode == 'rescue':
self.node.provision_state = states.RESCUING
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
options = pxe_utils.build_pxe_config_options(task, image_info)
self.assertEqual(expected_options, options)
def test_build_pxe_config_options_pxe(self):
self._test_build_pxe_config_options_pxe(whle_dsk_img=True)
def test_build_pxe_config_options_pxe_ipa_debug(self):
self._test_build_pxe_config_options_pxe(debug=True)
def test_build_pxe_config_options_pxe_rescue(self):
del self.node.driver_internal_info['is_whole_disk_image']
self._test_build_pxe_config_options_pxe(mode='rescue')
def test_build_pxe_config_options_ipa_debug_rescue(self):
del self.node.driver_internal_info['is_whole_disk_image']
self._test_build_pxe_config_options_pxe(debug=True, mode='rescue')
def test_build_pxe_config_options_pxe_local_boot(self):
del self.node.driver_internal_info['is_whole_disk_image']
i_info = self.node.instance_info
i_info.update({'capabilities': {'boot_option': 'local'}})
self.node.instance_info = i_info
self.node.save()
self._test_build_pxe_config_options_pxe(whle_dsk_img=False)
def test_build_pxe_config_options_pxe_without_is_whole_disk_image(self):
del self.node.driver_internal_info['is_whole_disk_image']
self.node.save()
self._test_build_pxe_config_options_pxe(whle_dsk_img=False)
def test_build_pxe_config_options_pxe_no_kernel_no_ramdisk(self):
del self.node.driver_internal_info['is_whole_disk_image']
self.node.save()
pxe_params = 'my-pxe-append-params ipa-debug=0'
self.config(group='pxe', tftp_server='my-tftp-server')
self.config(group='pxe', pxe_append_params=pxe_params)
self.config(group='pxe', tftp_root='/tftp-path/')
image_info = {
'deploy_kernel': ('deploy_kernel',
os.path.join(CONF.pxe.tftp_root,
'path-to-deploy_kernel')),
'deploy_ramdisk': ('deploy_ramdisk',
os.path.join(CONF.pxe.tftp_root,
'path-to-deploy_ramdisk'))}
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
options = pxe_utils.build_pxe_config_options(task, image_info)
expected_options = {
'aki_path': 'no_kernel',
'ari_path': 'no_ramdisk',
'deployment_aki_path': 'path-to-deploy_kernel',
'deployment_ari_path': 'path-to-deploy_ramdisk',
'pxe_append_params': pxe_params,
'tftp_server': 'my-tftp-server',
'ipxe_timeout': 0}
self.assertEqual(expected_options, options)
@mock.patch('ironic.common.image_service.GlanceImageService',
autospec=True)
@mock.patch('ironic.common.utils.render_template', autospec=True)
def _test_build_pxe_config_options_ipxe(self, render_mock, glance_mock,
whle_dsk_img=False,
ipxe_timeout=0,
ipxe_use_swift=False,
debug=False,
boot_from_volume=False,
mode='deploy'):
self.config(debug=debug)
self.config(pxe_append_params='test_param', group='pxe')
# NOTE: right '/' should be removed from url string
self.config(api_url='http://192.168.122.184:6385', group='conductor')
self.config(ipxe_timeout=ipxe_timeout, group='pxe')
root_dir = CONF.deploy.http_root
driver_internal_info = self.node.driver_internal_info
driver_internal_info['is_whole_disk_image'] = whle_dsk_img
self.node.driver_internal_info = driver_internal_info
self.node.save()
tftp_server = CONF.pxe.tftp_server
http_url = 'http://192.1.2.3:1234'
self.config(ipxe_enabled=True, group='pxe')
self.config(http_url=http_url, group='deploy')
kernel_label = '%s_kernel' % mode
ramdisk_label = '%s_ramdisk' % mode
if ipxe_use_swift:
self.config(ipxe_use_swift=True, group='pxe')
glance = mock.Mock()
glance_mock.return_value = glance
glance.swift_temp_url.side_effect = [
pxe_kernel, pxe_ramdisk] = [
'swift_kernel', 'swift_ramdisk']
image_info = {
kernel_label: (uuidutils.generate_uuid(),
os.path.join(root_dir,
self.node.uuid,
kernel_label)),
ramdisk_label: (uuidutils.generate_uuid(),
os.path.join(root_dir,
self.node.uuid,
ramdisk_label))
}
else:
pxe_kernel = os.path.join(http_url, self.node.uuid,
kernel_label)
pxe_ramdisk = os.path.join(http_url, self.node.uuid,
ramdisk_label)
image_info = {
kernel_label: (kernel_label,
os.path.join(root_dir,
self.node.uuid,
kernel_label)),
ramdisk_label: (ramdisk_label,
os.path.join(root_dir,
self.node.uuid,
ramdisk_label))
}
kernel = os.path.join(http_url, self.node.uuid, 'kernel')
ramdisk = os.path.join(http_url, self.node.uuid, 'ramdisk')
if (whle_dsk_img
or deploy_utils.get_boot_option(self.node) == 'local'):
ramdisk = 'no_ramdisk'
kernel = 'no_kernel'
else:
image_info.update({
'kernel': ('kernel_id',
os.path.join(root_dir,
self.node.uuid,
'kernel')),
'ramdisk': ('ramdisk_id',
os.path.join(root_dir,
self.node.uuid,
'ramdisk'))
})
ipxe_timeout_in_ms = ipxe_timeout * 1000
expected_pxe_params = 'test_param'
if debug:
expected_pxe_params += ' ipa-debug=1'
expected_options = {
'deployment_ari_path': pxe_ramdisk,
'pxe_append_params': expected_pxe_params,
'deployment_aki_path': pxe_kernel,
'tftp_server': tftp_server,
'ipxe_timeout': ipxe_timeout_in_ms,
'ari_path': ramdisk,
'aki_path': kernel,
'initrd_filename': ramdisk_label,
}
if mode == 'rescue':
self.node.provision_state = states.RESCUING
self.node.save()
if boot_from_volume:
expected_options.update({
'boot_from_volume': True,
'iscsi_boot_url': 'iscsi:fake_host::3260:0:fake_iqn',
'iscsi_initiator_iqn': 'fake_iqn_initiator',
'iscsi_volumes': [{'url': 'iscsi:fake_host::3260:1:fake_iqn',
'username': 'fake_username_1',
'password': 'fake_password_1'
}],
'username': 'fake_username',
'password': 'fake_password'
})
expected_options.pop('deployment_aki_path')
expected_options.pop('deployment_ari_path')
expected_options.pop('initrd_filename')
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
options = pxe_utils.build_pxe_config_options(task,
image_info,
ipxe_enabled=True)
self.assertEqual(expected_options, options)
def test_build_pxe_config_options_ipxe(self):
self._test_build_pxe_config_options_ipxe(whle_dsk_img=True)
def test_build_pxe_config_options_ipxe_ipa_debug(self):
self._test_build_pxe_config_options_ipxe(debug=True)
def test_build_pxe_config_options_ipxe_local_boot(self):
del self.node.driver_internal_info['is_whole_disk_image']
i_info = self.node.instance_info
i_info.update({'capabilities': {'boot_option': 'local'}})
self.node.instance_info = i_info
self.node.save()
self._test_build_pxe_config_options_ipxe(whle_dsk_img=False)
def test_build_pxe_config_options_ipxe_swift_wdi(self):
self._test_build_pxe_config_options_ipxe(whle_dsk_img=True,
ipxe_use_swift=True)
def test_build_pxe_config_options_ipxe_swift_partition(self):
self._test_build_pxe_config_options_ipxe(whle_dsk_img=False,
ipxe_use_swift=True)
def test_build_pxe_config_options_ipxe_and_ipxe_timeout(self):
self._test_build_pxe_config_options_ipxe(whle_dsk_img=True,
ipxe_timeout=120)
def test_build_pxe_config_options_ipxe_and_iscsi_boot(self):
vol_id = uuidutils.generate_uuid()
vol_id2 = uuidutils.generate_uuid()
object_utils.create_test_volume_connector(
self.context,
uuid=uuidutils.generate_uuid(),
type='iqn',
node_id=self.node.id,
connector_id='fake_iqn_initiator')
object_utils.create_test_volume_target(
self.context, node_id=self.node.id, volume_type='iscsi',
boot_index=0, volume_id='1234', uuid=vol_id,
properties={'target_lun': 0,
'target_portal': 'fake_host:3260',
'target_iqn': 'fake_iqn',
'auth_username': 'fake_username',
'auth_password': 'fake_password'})
object_utils.create_test_volume_target(
self.context, node_id=self.node.id, volume_type='iscsi',
boot_index=1, volume_id='1235', uuid=vol_id2,
properties={'target_lun': 1,
'target_portal': 'fake_host:3260',
'target_iqn': 'fake_iqn',
'auth_username': 'fake_username_1',
'auth_password': 'fake_password_1'})
self.node.driver_internal_info.update({'boot_from_volume': vol_id})
self._test_build_pxe_config_options_ipxe(boot_from_volume=True)
def test_build_pxe_config_options_ipxe_and_iscsi_boot_from_lists(self):
vol_id = uuidutils.generate_uuid()
vol_id2 = uuidutils.generate_uuid()
object_utils.create_test_volume_connector(
self.context,
uuid=uuidutils.generate_uuid(),
type='iqn',
node_id=self.node.id,
connector_id='fake_iqn_initiator')
object_utils.create_test_volume_target(
self.context, node_id=self.node.id, volume_type='iscsi',
boot_index=0, volume_id='1234', uuid=vol_id,
properties={'target_luns': [0, 2],
'target_portals': ['fake_host:3260',
'faker_host:3261'],
'target_iqns': ['fake_iqn', 'faker_iqn'],
'auth_username': 'fake_username',
'auth_password': 'fake_password'})
object_utils.create_test_volume_target(
self.context, node_id=self.node.id, volume_type='iscsi',
boot_index=1, volume_id='1235', uuid=vol_id2,
properties={'target_lun': [1, 3],
'target_portal': ['fake_host:3260', 'faker_host:3261'],
'target_iqn': ['fake_iqn', 'faker_iqn'],
'auth_username': 'fake_username_1',
'auth_password': 'fake_password_1'})
self.node.driver_internal_info.update({'boot_from_volume': vol_id})
self._test_build_pxe_config_options_ipxe(boot_from_volume=True)
def test_get_volume_pxe_options(self):
vol_id = uuidutils.generate_uuid()
vol_id2 = uuidutils.generate_uuid()
object_utils.create_test_volume_connector(
self.context,
uuid=uuidutils.generate_uuid(),
type='iqn',
node_id=self.node.id,
connector_id='fake_iqn_initiator')
object_utils.create_test_volume_target(
self.context, node_id=self.node.id, volume_type='iscsi',
boot_index=0, volume_id='1234', uuid=vol_id,
properties={'target_lun': [0, 1, 3],
'target_portal': 'fake_host:3260',
'target_iqns': 'fake_iqn',
'auth_username': 'fake_username',
'auth_password': 'fake_password'})
object_utils.create_test_volume_target(
self.context, node_id=self.node.id, volume_type='iscsi',
boot_index=1, volume_id='1235', uuid=vol_id2,
properties={'target_lun': 1,
'target_portal': 'fake_host:3260',
'target_iqn': 'fake_iqn',
'auth_username': 'fake_username_1',
'auth_password': 'fake_password_1'})
self.node.driver_internal_info.update({'boot_from_volume': vol_id})
driver_internal_info = self.node.driver_internal_info
driver_internal_info['boot_from_volume'] = vol_id
self.node.driver_internal_info = driver_internal_info
self.node.save()
expected = {'boot_from_volume': True,
'username': 'fake_username', 'password': 'fake_password',
'iscsi_boot_url': 'iscsi:fake_host::3260:0:fake_iqn',
'iscsi_initiator_iqn': 'fake_iqn_initiator',
'iscsi_volumes': [{
'url': 'iscsi:fake_host::3260:1:fake_iqn',
'username': 'fake_username_1',
'password': 'fake_password_1'
}]
}
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
options = pxe_utils.get_volume_pxe_options(task)
self.assertEqual(expected, options)
def test_get_volume_pxe_options_unsupported_volume_type(self):
vol_id = uuidutils.generate_uuid()
object_utils.create_test_volume_target(
self.context, node_id=self.node.id, volume_type='fake_type',
boot_index=0, volume_id='1234', uuid=vol_id,
properties={'foo': 'bar'})
driver_internal_info = self.node.driver_internal_info
driver_internal_info['boot_from_volume'] = vol_id
self.node.driver_internal_info = driver_internal_info
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
options = pxe_utils.get_volume_pxe_options(task)
self.assertEqual({}, options)
def test_get_volume_pxe_options_unsupported_additional_volume_type(self):
vol_id = uuidutils.generate_uuid()
vol_id2 = uuidutils.generate_uuid()
object_utils.create_test_volume_target(
self.context, node_id=self.node.id, volume_type='iscsi',
boot_index=0, volume_id='1234', uuid=vol_id,
properties={'target_lun': 0,
'target_portal': 'fake_host:3260',
'target_iqn': 'fake_iqn',
'auth_username': 'fake_username',
'auth_password': 'fake_password'})
object_utils.create_test_volume_target(
self.context, node_id=self.node.id, volume_type='fake_type',
boot_index=1, volume_id='1234', uuid=vol_id2,
properties={'foo': 'bar'})
driver_internal_info = self.node.driver_internal_info
driver_internal_info['boot_from_volume'] = vol_id
self.node.driver_internal_info = driver_internal_info
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
options = pxe_utils.get_volume_pxe_options(task)
self.assertEqual([], options['iscsi_volumes'])
def test_build_pxe_config_options_ipxe_rescue(self):
self._test_build_pxe_config_options_ipxe(mode='rescue')
def test_build_pxe_config_options_ipxe_rescue_swift(self):
self._test_build_pxe_config_options_ipxe(mode='rescue',
ipxe_use_swift=True)
def test_build_pxe_config_options_ipxe_rescue_timeout(self):
self._test_build_pxe_config_options_ipxe(mode='rescue',
ipxe_timeout=120)
@mock.patch.object(deploy_utils, 'fetch_images', autospec=True)
def test__cache_tftp_images_master_path(self, mock_fetch_image):
temp_dir = tempfile.mkdtemp()
self.config(tftp_root=temp_dir, group='pxe')
self.config(tftp_master_path=os.path.join(temp_dir,
'tftp_master_path'),
group='pxe')
image_path = os.path.join(temp_dir, self.node.uuid,
'deploy_kernel')
image_info = {'deploy_kernel': ('deploy_kernel', image_path)}
fileutils.ensure_tree(CONF.pxe.tftp_master_path)
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
pxe_utils.cache_ramdisk_kernel(task, image_info)
mock_fetch_image.assert_called_once_with(self.context,
mock.ANY,
[('deploy_kernel',
image_path)],
True)
@mock.patch.object(pxe_utils, 'TFTPImageCache', lambda: None)
@mock.patch.object(fileutils, 'ensure_tree', autospec=True)
@mock.patch.object(deploy_utils, 'fetch_images', autospec=True)
def test_cache_ramdisk_kernel(self, mock_fetch_image, mock_ensure_tree):
self.config(ipxe_enabled=False, group='pxe')
fake_pxe_info = {'foo': 'bar'}
expected_path = os.path.join(CONF.pxe.tftp_root, self.node.uuid)
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
pxe_utils.cache_ramdisk_kernel(task, fake_pxe_info)
mock_ensure_tree.assert_called_with(expected_path)
mock_fetch_image.assert_called_once_with(
self.context, mock.ANY, list(fake_pxe_info.values()), True)
@mock.patch.object(pxe_utils, 'TFTPImageCache', lambda: None)
@mock.patch.object(fileutils, 'ensure_tree', autospec=True)
@mock.patch.object(deploy_utils, 'fetch_images', autospec=True)
def test_cache_ramdisk_kernel_ipxe(self, mock_fetch_image,
mock_ensure_tree):
self.config(ipxe_enabled=True, group='pxe')
fake_pxe_info = {'foo': 'bar'}
expected_path = os.path.join(CONF.deploy.http_root,
self.node.uuid)
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
pxe_utils.cache_ramdisk_kernel(task, fake_pxe_info)
mock_ensure_tree.assert_called_with(expected_path)
mock_fetch_image.assert_called_once_with(self.context, mock.ANY,
list(fake_pxe_info.values()),
True)
@mock.patch.object(pxe_utils.LOG, 'error', autospec=True)
def test_validate_boot_parameters_for_trusted_boot_one(self, mock_log):
properties = {'capabilities': 'boot_mode:uefi'}
instance_info = {"boot_option": "netboot"}
self.node.properties = properties
self.node.instance_info['capabilities'] = instance_info
self.node.driver_internal_info['is_whole_disk_image'] = False
self.assertRaises(exception.InvalidParameterValue,
pxe.validate_boot_parameters_for_trusted_boot,
self.node)
self.assertTrue(mock_log.called)
@mock.patch.object(pxe_utils.LOG, 'error', autospec=True)
def test_validate_boot_parameters_for_trusted_boot_two(self, mock_log):
properties = {'capabilities': 'boot_mode:bios'}
instance_info = {"boot_option": "local"}
self.node.properties = properties
self.node.instance_info['capabilities'] = instance_info
self.node.driver_internal_info['is_whole_disk_image'] = False
self.assertRaises(exception.InvalidParameterValue,
pxe.validate_boot_parameters_for_trusted_boot,
self.node)
self.assertTrue(mock_log.called)
@mock.patch.object(pxe_utils.LOG, 'error', autospec=True)
def test_validate_boot_parameters_for_trusted_boot_three(self, mock_log):
properties = {'capabilities': 'boot_mode:bios'}
instance_info = {"boot_option": "netboot"}
self.node.properties = properties
self.node.instance_info['capabilities'] = instance_info
self.node.driver_internal_info['is_whole_disk_image'] = True
self.assertRaises(exception.InvalidParameterValue,
pxe.validate_boot_parameters_for_trusted_boot,
self.node)
self.assertTrue(mock_log.called)
@mock.patch.object(pxe_utils.LOG, 'error', autospec=True)
def test_validate_boot_parameters_for_trusted_boot_pass(self, mock_log):
properties = {'capabilities': 'boot_mode:bios'}
instance_info = {"boot_option": "netboot"}
self.node.properties = properties
self.node.instance_info['capabilities'] = instance_info
self.node.driver_internal_info['is_whole_disk_image'] = False
pxe.validate_boot_parameters_for_trusted_boot(self.node)
self.assertFalse(mock_log.called)
@mock.patch.object(ironic_utils, 'unlink_without_raise', autospec=True)
@mock.patch.object(pxe_utils, 'clean_up_pxe_config', autospec=True)
@mock.patch.object(pxe_utils, 'TFTPImageCache', autospec=True)
class CleanUpPxeEnvTestCase(db_base.DbTestCase):
def setUp(self):
super(CleanUpPxeEnvTestCase, self).setUp()
instance_info = INST_INFO_DICT
instance_info['deploy_key'] = 'fake-56789'
self.node = object_utils.create_test_node(
self.context, boot_interface='pxe',
instance_info=instance_info,
driver_info=DRV_INFO_DICT,
driver_internal_info=DRV_INTERNAL_INFO_DICT,
)
def test__clean_up_pxe_env(self, mock_cache, mock_pxe_clean,
mock_unlink):
image_info = {'label': ['', 'deploy_kernel']}
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
pxe_utils.clean_up_pxe_env(task, image_info)
mock_pxe_clean.assert_called_once_with(task, ipxe_enabled=False)
mock_unlink.assert_any_call('deploy_kernel')
mock_cache.return_value.clean_up.assert_called_once_with()
class TFTPImageCacheTestCase(db_base.DbTestCase):
@mock.patch.object(fileutils, 'ensure_tree')
def test_with_master_path(self, mock_ensure_tree):
self.config(tftp_master_path='/fake/path', group='pxe')
self.config(image_cache_size=500, group='pxe')
self.config(image_cache_ttl=30, group='pxe')
cache = pxe_utils.TFTPImageCache()
mock_ensure_tree.assert_called_once_with('/fake/path')
self.assertEqual(500 * 1024 * 1024, cache._cache_size)
self.assertEqual(30 * 60, cache._cache_ttl)
@mock.patch.object(fileutils, 'ensure_tree')
def test_without_master_path(self, mock_ensure_tree):
self.config(tftp_master_path='', group='pxe')
self.config(image_cache_size=500, group='pxe')
self.config(image_cache_ttl=30, group='pxe')
cache = pxe_utils.TFTPImageCache()
mock_ensure_tree.assert_not_called()
self.assertEqual(500 * 1024 * 1024, cache._cache_size)
self.assertEqual(30 * 60, cache._cache_ttl)
| true | true |
f7f46a8e28a98db46b887ffca14b7e6f5f918a8d | 177 | py | Python | pretrain/contrastive_scene_contexts/model/modules/__init__.py | ut-amrl/ContrastiveSceneContexts | 622b9cd32ea2dcf8307d25eb2e7ee1c09d220134 | [
"MIT"
] | 135 | 2021-05-25T00:07:04.000Z | 2022-03-24T02:52:22.000Z | pretrain/contrastive_scene_contexts/model/modules/__init__.py | ut-amrl/ContrastiveSceneContexts | 622b9cd32ea2dcf8307d25eb2e7ee1c09d220134 | [
"MIT"
] | 25 | 2021-06-17T03:45:44.000Z | 2022-03-04T16:04:42.000Z | pretrain/contrastive_scene_contexts/model/modules/__init__.py | ut-amrl/ContrastiveSceneContexts | 622b9cd32ea2dcf8307d25eb2e7ee1c09d220134 | [
"MIT"
] | 21 | 2021-05-25T07:52:36.000Z | 2022-02-18T06:10:45.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree. | 44.25 | 65 | 0.757062 | true | true | |
f7f46adee8d9df42744c92d77b1d2dee52160b78 | 21,286 | py | Python | Opioid_app_backend/medication.py | drwitt/Mayo_Clinic_Opioid_Taper_Project | d816f01d7ad592ee2ff7ee54df960a536f38dcd1 | [
"Apache-2.0"
] | null | null | null | Opioid_app_backend/medication.py | drwitt/Mayo_Clinic_Opioid_Taper_Project | d816f01d7ad592ee2ff7ee54df960a536f38dcd1 | [
"Apache-2.0"
] | null | null | null | Opioid_app_backend/medication.py | drwitt/Mayo_Clinic_Opioid_Taper_Project | d816f01d7ad592ee2ff7ee54df960a536f38dcd1 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 6 11:05:22 2019
@author: dannywitt
"""
class Medication:
def __init__(self,
med_name,
combination_state,
combination_drug_unit_doses,
delivery_rate,
drug_delivery_form,
available_opioid_doses,
available_combination_doses,
MME_conversion_factor):
self.med_name = med_name
self.combination_state = combination_state
self.combination_drug_unit_doses = combination_drug_unit_doses
self.delivery_rate = delivery_rate
self.drug_delivery_form = drug_delivery_form
self.available_opioid_unit_doses = available_opioid_doses
self.minimum_dose_available = min(available_opioid_doses)
self.combined_drug_name = self.is_combined_drug()
self.available_combination_doses = available_combination_doses
self.MME_conversion_factor = MME_conversion_factor
def full_drug_name(self):
return '{} ({}, {} form)'.format(self.med_name,
self.delivery_rate,
self.drug_delivery_form)
def is_combined_drug(self):
if self.combination_state == 'Combined':
self.combined_drug_name = self.med_name.split()[-1]
else:
self.combined_drug_name = 'Not Combined'
return self.combined_drug_name
#Classes for each specific drug (inheriting from Medication class):
#Updated medications included in study, as of April 20, 2019:
class Hydrocodone_Acetaminophen(Medication):
def __init__(self,
med_name = 'Hydrocodone Acetaminophen',
combination_state = 'Combined',
combination_drug_unit_doses = 325,
delivery_rate = 'Immediate Release',
drug_delivery_form = 'Capsule',
available_opioid_doses = [2.5, 5, 3.75, 7.5, 10],
#Include whole and "cut in half" doses:
available_combination_doses = [(2.5, 162.5),
(3.75, 162.5),
(5, 162.5),
(5, 325),
(7.5, 325),
(10, 325)],
MME_conversion_factor = 1.0):
super().__init__(med_name,
combination_state,
combination_drug_unit_doses,
delivery_rate,
drug_delivery_form,
available_opioid_doses,
available_combination_doses,
MME_conversion_factor)
return
class Hydromorphone_Immediate_Release(Medication):
def __init__(self,
med_name = 'Hydromorphone',
combination_state = 'Not Combined',
combination_drug_unit_doses = 'NA',
delivery_rate = 'Immediate Release',
drug_delivery_form = 'Tablet',
available_opioid_doses = [1, 2],
available_combination_doses = 'Not Combined',
MME_conversion_factor = 4.0):
super().__init__(med_name,
combination_state,
combination_drug_unit_doses,
delivery_rate,
drug_delivery_form,
available_opioid_doses,
available_combination_doses,
MME_conversion_factor)
return
class Morphine_Extended_Release(Medication):
def __init__(self,
med_name = 'Morphine',
combination_state = 'Not Combined',
combination_drug_unit_doses = 'NA',
delivery_rate = 'Extended Release',
drug_delivery_form = 'Capsule',
available_opioid_doses = [7.5, 15, 30],
available_combination_doses = 'Not Combined',
MME_conversion_factor = 1.0):
super().__init__(med_name,
combination_state,
combination_drug_unit_doses,
delivery_rate,
drug_delivery_form,
available_opioid_doses,
available_combination_doses,
MME_conversion_factor)
return
class Morphine_Immediate_Release(Medication):
def __init__(self,
med_name = 'Morphine',
combination_state = 'Not Combined',
combination_drug_unit_doses = 'NA',
delivery_rate = 'Immediate Release',
drug_delivery_form = 'Capsule',
available_opioid_doses = [7.5, 15, 30],
available_combination_doses = 'Not Combined',
MME_conversion_factor = 1.0):
super().__init__(med_name,
combination_state,
combination_drug_unit_doses,
delivery_rate,
drug_delivery_form,
available_opioid_doses,
available_combination_doses,
MME_conversion_factor)
return
class Oxycodone_Immediate_Release(Medication):
def __init__(self,
med_name = 'Oxycodone',
combination_state = 'Not Combined',
combination_drug_unit_doses = 'NA',
delivery_rate = 'Immediate Release',
drug_delivery_form = 'Capsule',
available_opioid_doses = [2.5, 5, 10, 20],
available_combination_doses = 'Not Combined',
MME_conversion_factor = 1.5):
super().__init__(med_name,
combination_state,
combination_drug_unit_doses,
delivery_rate,
drug_delivery_form,
available_opioid_doses,
available_combination_doses,
MME_conversion_factor)
return
class Oxycodone_Extended_Release(Medication):
def __init__(self,
med_name = 'Oxycodone',
combination_state = 'Not Combined',
combination_drug_unit_doses = 'NA',
delivery_rate = 'Extended Release',
drug_delivery_form = 'Capsule',
available_opioid_doses = [2.5, 5, 10],
available_combination_doses = 'Not Combined',
MME_conversion_factor = 1.5):
super().__init__(med_name,
combination_state,
combination_drug_unit_doses,
delivery_rate,
drug_delivery_form,
available_opioid_doses,
available_combination_doses,
MME_conversion_factor)
return
class Oxycodone_Acetaminophen(Medication):
def __init__(self,
med_name = 'Oxycodone Acetaminophen',
combination_state = 'Combined',
combination_drug_unit_doses = '325mg',
delivery_rate = 'Immediate Release',
drug_delivery_form = 'Tablet',
available_opioid_doses = [2.5, 5, 10],
available_combination_doses = [(2.5, 162.5),
(5, 325),
(5, 162.5),
(10, 325)],
MME_conversion_factor = 1.5):
super().__init__(med_name,
combination_state,
combination_drug_unit_doses,
delivery_rate,
drug_delivery_form,
available_opioid_doses,
available_combination_doses,
MME_conversion_factor)
return
class Tramadol_Immediate_Release(Medication):
def __init__(self,
med_name = 'Tramadol',
combination_state = 'Not Combined',
combination_drug_unit_doses = 'NA',
delivery_rate = 'Immediate Release',
drug_delivery_form = 'Capsule',
available_opioid_doses = [25, 50],
available_combination_doses = 'Not Combined',
MME_conversion_factor = 0.1):
super().__init__(med_name,
combination_state,
combination_drug_unit_doses,
delivery_rate,
drug_delivery_form,
available_opioid_doses,
available_combination_doses,
MME_conversion_factor)
return
##############################################################################
#Additional Medications (not used in current study implementation):
#class Hydromorphone_Extended_Release(Medication):
#
# def __init__(self,
# med_name = 'Hydromorphone',
# combination_state = 'Not Combined',
# combination_drug_unit_doses = 'NA',
# delivery_rate = 'Extended Release',
# drug_delivery_form = 'Tablet',
# available_opioid_doses = [8, 12, 16, 32],
# available_combination_doses = 'Not Combined',
# MME_conversion_factor = 4.0):
# super().__init__(med_name,
# combination_state,
# combination_drug_unit_doses,
# delivery_rate,
# drug_delivery_form,
# available_opioid_doses,
# available_combination_doses,
# MME_conversion_factor)
# return
#
#class Codeine_Acetaminophen(Medication):
#
# def __init__(self,
# med_name = 'Codeine Acetaminophen',
# combination_state = 'Combined',
# combination_drug_unit_doses = '300mg',
# delivery_rate = 'Immediate Release',
# drug_delivery_form = 'Tablet',
# available_opioid_doses = [15, 30, 60],
# available_combination_doses = [(15, 300),(30, 300), (60, 300)],
# MME_conversion_factor = 0.15):
# super().__init__(med_name,
# combination_state,
# combination_drug_unit_doses,
# delivery_rate,
# drug_delivery_form,
# available_opioid_doses,
# available_combination_doses,
# MME_conversion_factor)
# return
#
#class Codeine_Sulfate(Medication):
#
# def __init__(self,
# med_name = 'Codeine Sulfate',
# combination_state = 'Not Combined',
# combination_drug_unit_doses = 'NA',
# delivery_rate = 'Immediate Release',
# drug_delivery_form = 'Tablet',
# available_opioid_doses = [15, 30, 60],
# available_combination_doses = 'Not Combined',
# MME_conversion_factor = 0.15):
# super().__init__(med_name,
# combination_state,
# combination_drug_unit_doses,
# delivery_rate,
# drug_delivery_form,
# available_opioid_doses,
# available_combination_doses,
# MME_conversion_factor)
# return
#
#class Hydrocodone_Ibuprofen(Medication):
#
# def __init__(self,
# med_name = 'Hydrocodone Ibuprofen',
# combination_state = 'Combined',
# combination_drug_unit_doses = '300mg',
# delivery_rate = 'Immediate Release',
# drug_delivery_form = 'Tablet',
# available_opioid_doses = [2.5, 5, 7.5, 10],
# available_combination_doses = [(2.5, 200),
# (5, 200),
# (7.5, 200),
# (10, 200)],
# MME_conversion_factor = 1.0):
# super().__init__(med_name,
# combination_state,
# combination_drug_unit_doses,
# delivery_rate,
# drug_delivery_form,
# available_opioid_doses,
# available_combination_doses,
# MME_conversion_factor)
# return
#
#class Meperidine_Hydrochloride(Medication):
#
# def __init__(self,
# med_name = 'Meperidine Hydrochloride',
# combination_state = 'Not Combined',
# combination_drug_unit_doses = 'NA',
# delivery_rate = 'Immediate Release',
# drug_delivery_form = 'Tablet',
# available_opioid_doses = [50, 100],
# available_combination_doses = 'Not Combined',
# MME_conversion_factor = 0.1):
# super().__init__(med_name,
# combination_state,
# combination_drug_unit_doses,
# delivery_rate,
# drug_delivery_form,
# available_opioid_doses,
# available_combination_doses,
# MME_conversion_factor)
# return
#
#class Morphine_Extended_Release_Tablet(Medication):
#
# def __init__(self,
# med_name = 'Morphine',
# combination_state = 'Not Combined',
# combination_drug_unit_doses = 'NA',
# delivery_rate = 'Extended Release',
# drug_delivery_form = 'Tablet',
# available_opioid_doses = [15, 30, 45, 60, 100, 200],
# available_combination_doses = 'Not Combined',
# MME_conversion_factor = 1.0):
# super().__init__(med_name,
# combination_state,
# combination_drug_unit_doses,
# delivery_rate,
# drug_delivery_form,
# available_opioid_doses,
# available_combination_doses,
# MME_conversion_factor)
# return
#
#class Oxycodone_Immediate_Release_Tablet(Medication):
#
# def __init__(self,
# med_name = 'Oxycodone',
# combination_state = 'Not Combined',
# combination_drug_unit_doses = 'NA',
# delivery_rate = 'Immediate Release',
# drug_delivery_form = 'Tablet',
# available_opioid_doses = [5, 10, 15, 20, 30],
# available_combination_doses = 'Not Combined',
# MME_conversion_factor = 1.5):
# super().__init__(med_name,
# combination_state,
# combination_drug_unit_doses,
# delivery_rate,
# drug_delivery_form,
# available_opioid_doses,
# available_combination_doses,
# MME_conversion_factor)
# return
#
#class Oxycodone_Aspirin(Medication):
#
# def __init__(self,
# med_name = 'Oxycodone Aspirin',
# combination_state = 'Combined',
# combination_drug_unit_doses = '325mg',
# delivery_rate = 'Immediate Release',
# drug_delivery_form = 'Tablet',
# available_opioid_doses = [4.8355],
# available_combination_doses = [(4.8355, 325)],
# MME_conversion_factor = 1.5):
# super().__init__(med_name,
# combination_state,
# combination_drug_unit_doses,
# delivery_rate,
# drug_delivery_form,
# available_opioid_doses,
# available_combination_doses,
# MME_conversion_factor)
# return
#
#class Oxycodone_Ibuprofen(Medication):
#
# def __init__(self,
# med_name = 'Oxycodone Ibuprofen',
# combination_state = 'Combined',
# combination_drug_unit_doses = '400mg',
# delivery_rate = 'Immediate Release',
# drug_delivery_form = 'Tablet',
# available_opioid_doses = [5],
# available_combination_doses = [(5, 400)],
# MME_conversion_factor = 1.5):
# super().__init__(med_name,
# combination_state,
# combination_drug_unit_doses,
# delivery_rate,
# drug_delivery_form,
# available_opioid_doses,
# available_combination_doses,
# MME_conversion_factor)
# return
#
#class Oxymorphone_Immediate_Release(Medication):
#
# def __init__(self,
# med_name = 'Oxymorphone',
# combination_state = 'Not Combined',
# combination_drug_unit_doses = 'NA',
# delivery_rate = 'Immediate Release',
# drug_delivery_form = 'Tablet',
# available_opioid_doses = [5, 10],
# available_combination_doses = 'Not Combined',
# MME_conversion_factor = 3.0):
# super().__init__(med_name,
# combination_state,
# combination_drug_unit_doses,
# delivery_rate,
# drug_delivery_form,
# available_opioid_doses,
# available_combination_doses,
# MME_conversion_factor)
# return
#
#class Oxymorphone_Extended_Release(Medication):
#
# def __init__(self,
# med_name = 'Oxymorphone',
# combination_state = 'Not Combined',
# combination_drug_unit_doses = 'NA',
# delivery_rate = 'Extended Release',
# drug_delivery_form = 'Tablet',
# available_opioid_doses = [5, 7.5, 10, 15, 20, 30, 40],
# available_combination_doses = 'Not Combined',
# MME_conversion_factor = 3.0):
# super().__init__(med_name,
# combination_state,
# combination_drug_unit_doses,
# delivery_rate,
# drug_delivery_form,
# available_opioid_doses,
# available_combination_doses,
# MME_conversion_factor)
# return
#
#class Tramadol_Extended_Release(Medication):
#
# def __init__(self,
# med_name = 'Tramadol',
# combination_state = 'Not Combined',
# combination_drug_unit_doses = 'NA',
# delivery_rate = 'Extended Release',
# drug_delivery_form = 'Tablet',
# available_opioid_doses = [100, 200, 300],
# available_combination_doses = 'Not Combined',
# MME_conversion_factor = 0.1):
# super().__init__(med_name,
# combination_state,
# combination_drug_unit_doses,
# delivery_rate,
# drug_delivery_form,
# available_opioid_doses,
# available_combination_doses,
# MME_conversion_factor)
# return
#
#class Tramadol_Acetaminophen(Medication):
#
# def __init__(self,
# med_name = 'Tramadol Acetaminophen',
# combination_state = 'Combined',
# combination_drug_unit_doses = '325mg',
# delivery_rate = 'Immediate Release',
# drug_delivery_form = 'Tablet',
# available_opioid_doses = [37.5],
# available_combination_doses = [(37.5, 325)],
# MME_conversion_factor = 0.1):
# super().__init__(med_name,
# combination_state,
# combination_drug_unit_doses,
# delivery_rate,
# drug_delivery_form,
# available_opioid_doses,
# available_combination_doses,
# MME_conversion_factor)
# return
| 42.40239 | 81 | 0.496242 |
class Medication:
def __init__(self,
med_name,
combination_state,
combination_drug_unit_doses,
delivery_rate,
drug_delivery_form,
available_opioid_doses,
available_combination_doses,
MME_conversion_factor):
self.med_name = med_name
self.combination_state = combination_state
self.combination_drug_unit_doses = combination_drug_unit_doses
self.delivery_rate = delivery_rate
self.drug_delivery_form = drug_delivery_form
self.available_opioid_unit_doses = available_opioid_doses
self.minimum_dose_available = min(available_opioid_doses)
self.combined_drug_name = self.is_combined_drug()
self.available_combination_doses = available_combination_doses
self.MME_conversion_factor = MME_conversion_factor
def full_drug_name(self):
return '{} ({}, {} form)'.format(self.med_name,
self.delivery_rate,
self.drug_delivery_form)
def is_combined_drug(self):
if self.combination_state == 'Combined':
self.combined_drug_name = self.med_name.split()[-1]
else:
self.combined_drug_name = 'Not Combined'
return self.combined_drug_name
class Hydrocodone_Acetaminophen(Medication):
def __init__(self,
med_name = 'Hydrocodone Acetaminophen',
combination_state = 'Combined',
combination_drug_unit_doses = 325,
delivery_rate = 'Immediate Release',
drug_delivery_form = 'Capsule',
available_opioid_doses = [2.5, 5, 3.75, 7.5, 10],
available_combination_doses = [(2.5, 162.5),
(3.75, 162.5),
(5, 162.5),
(5, 325),
(7.5, 325),
(10, 325)],
MME_conversion_factor = 1.0):
super().__init__(med_name,
combination_state,
combination_drug_unit_doses,
delivery_rate,
drug_delivery_form,
available_opioid_doses,
available_combination_doses,
MME_conversion_factor)
return
class Hydromorphone_Immediate_Release(Medication):
def __init__(self,
med_name = 'Hydromorphone',
combination_state = 'Not Combined',
combination_drug_unit_doses = 'NA',
delivery_rate = 'Immediate Release',
drug_delivery_form = 'Tablet',
available_opioid_doses = [1, 2],
available_combination_doses = 'Not Combined',
MME_conversion_factor = 4.0):
super().__init__(med_name,
combination_state,
combination_drug_unit_doses,
delivery_rate,
drug_delivery_form,
available_opioid_doses,
available_combination_doses,
MME_conversion_factor)
return
class Morphine_Extended_Release(Medication):
def __init__(self,
med_name = 'Morphine',
combination_state = 'Not Combined',
combination_drug_unit_doses = 'NA',
delivery_rate = 'Extended Release',
drug_delivery_form = 'Capsule',
available_opioid_doses = [7.5, 15, 30],
available_combination_doses = 'Not Combined',
MME_conversion_factor = 1.0):
super().__init__(med_name,
combination_state,
combination_drug_unit_doses,
delivery_rate,
drug_delivery_form,
available_opioid_doses,
available_combination_doses,
MME_conversion_factor)
return
class Morphine_Immediate_Release(Medication):
def __init__(self,
med_name = 'Morphine',
combination_state = 'Not Combined',
combination_drug_unit_doses = 'NA',
delivery_rate = 'Immediate Release',
drug_delivery_form = 'Capsule',
available_opioid_doses = [7.5, 15, 30],
available_combination_doses = 'Not Combined',
MME_conversion_factor = 1.0):
super().__init__(med_name,
combination_state,
combination_drug_unit_doses,
delivery_rate,
drug_delivery_form,
available_opioid_doses,
available_combination_doses,
MME_conversion_factor)
return
class Oxycodone_Immediate_Release(Medication):
def __init__(self,
med_name = 'Oxycodone',
combination_state = 'Not Combined',
combination_drug_unit_doses = 'NA',
delivery_rate = 'Immediate Release',
drug_delivery_form = 'Capsule',
available_opioid_doses = [2.5, 5, 10, 20],
available_combination_doses = 'Not Combined',
MME_conversion_factor = 1.5):
super().__init__(med_name,
combination_state,
combination_drug_unit_doses,
delivery_rate,
drug_delivery_form,
available_opioid_doses,
available_combination_doses,
MME_conversion_factor)
return
class Oxycodone_Extended_Release(Medication):
def __init__(self,
med_name = 'Oxycodone',
combination_state = 'Not Combined',
combination_drug_unit_doses = 'NA',
delivery_rate = 'Extended Release',
drug_delivery_form = 'Capsule',
available_opioid_doses = [2.5, 5, 10],
available_combination_doses = 'Not Combined',
MME_conversion_factor = 1.5):
super().__init__(med_name,
combination_state,
combination_drug_unit_doses,
delivery_rate,
drug_delivery_form,
available_opioid_doses,
available_combination_doses,
MME_conversion_factor)
return
class Oxycodone_Acetaminophen(Medication):
def __init__(self,
med_name = 'Oxycodone Acetaminophen',
combination_state = 'Combined',
combination_drug_unit_doses = '325mg',
delivery_rate = 'Immediate Release',
drug_delivery_form = 'Tablet',
available_opioid_doses = [2.5, 5, 10],
available_combination_doses = [(2.5, 162.5),
(5, 325),
(5, 162.5),
(10, 325)],
MME_conversion_factor = 1.5):
super().__init__(med_name,
combination_state,
combination_drug_unit_doses,
delivery_rate,
drug_delivery_form,
available_opioid_doses,
available_combination_doses,
MME_conversion_factor)
return
class Tramadol_Immediate_Release(Medication):
def __init__(self,
med_name = 'Tramadol',
combination_state = 'Not Combined',
combination_drug_unit_doses = 'NA',
delivery_rate = 'Immediate Release',
drug_delivery_form = 'Capsule',
available_opioid_doses = [25, 50],
available_combination_doses = 'Not Combined',
MME_conversion_factor = 0.1):
super().__init__(med_name,
combination_state,
combination_drug_unit_doses,
delivery_rate,
drug_delivery_form,
available_opioid_doses,
available_combination_doses,
MME_conversion_factor)
return
| true | true |
f7f46b73f8b40ac7cae2436f27742acbd9ecce21 | 1,640 | py | Python | byslib/data/cumulative_sum.py | bayashi-cl/byslib-python | d7cbb1cb75904e85c353227def7c99a2bceea12c | [
"CC0-1.0"
] | null | null | null | byslib/data/cumulative_sum.py | bayashi-cl/byslib-python | d7cbb1cb75904e85c353227def7c99a2bceea12c | [
"CC0-1.0"
] | null | null | null | byslib/data/cumulative_sum.py | bayashi-cl/byslib-python | d7cbb1cb75904e85c353227def7c99a2bceea12c | [
"CC0-1.0"
] | null | null | null | # @title Cumulative Sum
from itertools import chain
from typing import List
class CumulativeSum:
"""Cumulative Sum
Notes
-----
Get sum of semi-open interval [left, right)
Time complexity
* Build : :math:`O(N)`
* fold : :math:`O(1)`
Examples
--------
>>> cs = CumulativeSum([3, 1, 4, 1, 5])
>>> print(cs.fold(0, 3))
8
"""
def __init__(self, data: List[int]) -> None:
n = len(data)
self.__data = [0] * (n + 1)
for i in range(n):
self.__data[i + 1] = self.__data[i] + data[i]
def fold(self, left: int, right: int) -> int:
return self.__data[right] - self.__data[left]
class CumulativeSum2D:
"""Cumulative Sum 2D
Notes
-----
Get sum of range
Time complexity
* Build : :math:`O(N * M)`
* fold : :math:`O(1)`
Examples
--------
>>> cs = CumulativeSum([3, 1, 4, 1, 5])
>>> print(cs.fold(0, 3))
8
"""
def __init__(self, data: List[List[int]]) -> None:
n = len(data)
m = len(data[0])
self.__data = [[0] + row for row in chain([[0] * m], data)]
for i in range(1, n + 1):
for j in range(1, m + 1):
self.__data[i][j] += (
self.__data[i][j - 1]
+ self.__data[i - 1][j]
- self.__data[i - 1][j - 1]
)
def fold(self, up: int, left: int, down: int, right: int) -> int:
return (
self.__data[down][right]
- self.__data[up][right]
- self.__data[down][left]
+ self.__data[up][left]
)
| 23.098592 | 69 | 0.472561 |
from itertools import chain
from typing import List
class CumulativeSum:
def __init__(self, data: List[int]) -> None:
n = len(data)
self.__data = [0] * (n + 1)
for i in range(n):
self.__data[i + 1] = self.__data[i] + data[i]
def fold(self, left: int, right: int) -> int:
return self.__data[right] - self.__data[left]
class CumulativeSum2D:
def __init__(self, data: List[List[int]]) -> None:
n = len(data)
m = len(data[0])
self.__data = [[0] + row for row in chain([[0] * m], data)]
for i in range(1, n + 1):
for j in range(1, m + 1):
self.__data[i][j] += (
self.__data[i][j - 1]
+ self.__data[i - 1][j]
- self.__data[i - 1][j - 1]
)
def fold(self, up: int, left: int, down: int, right: int) -> int:
return (
self.__data[down][right]
- self.__data[up][right]
- self.__data[down][left]
+ self.__data[up][left]
)
| true | true |
f7f46bbf8aeafd461462c8bacebf43e0f81464aa | 5,681 | py | Python | curve25519.py | jake-b/burstcoin-python-wallet | b54be463a9cdf44ff4d0cd238b336b50500399a7 | [
"MIT"
] | 5 | 2018-04-07T10:57:32.000Z | 2019-10-08T11:15:07.000Z | curve25519.py | jake-b/burstcoin-python-wallet | b54be463a9cdf44ff4d0cd238b336b50500399a7 | [
"MIT"
] | null | null | null | curve25519.py | jake-b/burstcoin-python-wallet | b54be463a9cdf44ff4d0cd238b336b50500399a7 | [
"MIT"
] | null | null | null | # a pedagogical implementation of curve25519 with ec-kcdsa
# coded by doctorevil to validate nxt's port of Matthijs van Duin's implementation
# warning: this implementation is not timing attack resistant
# ec arithmetic equations from http://hyperelliptic.org/EFD/g1p/auto-montgom.html
# Modified slightly to support Python 3 these modifications are released under
# the MIT license. Specifically the disclaimer of warranty/liability:
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from hashlib import sha256
from ecdsa.numbertheory import square_root_mod_prime, SquareRootError, inverse_mod
CURVE_P = 2**255 - 19
CURVE_A = 486662
CURVE_ORDER = 7237005577332262213973186563042994240857116359379907606001950938285454250989
CURVE_G_X = 9
CURVE_G_Y = 14781619447589544791020593568409986887264606134616475288964881837755586237401
def le32(n): # to little endian
#return bytes.fromhex('%064x' % n) #[::-1]
return bytes.fromhex('%064x' % n)[::-1]
def from_le32(s):
#return int(s.hex(), 16)
return int(s[::-1].hex(), 16)
def curve25519_x_to_y(x):
t = (x ** 3 + CURVE_A * x ** 2 + x) % CURVE_P
try:
return square_root_mod_prime(t, CURVE_P)
except SquareRootError:
return None
def curve25519_affine_add(x1, y1, x2, y2):
if (x1, y1) == (1, 0):
return x2, y2
if (x2, y2) == (1, 0):
return x1, y1
if x1 == x2 and y1 != y2:
return (1, 0)
if x1 == x2 and y1 == y2:
return curve25519_affine_double(x1, y1)
t1 = (y2 - y1) ** 2 % CURVE_P
t2 = (x2 - x1) ** 2 % CURVE_P
x3 = (t1 * inverse_mod(t2, CURVE_P) - 486662 - x1 - x2) % CURVE_P
t1 = (2 * x1 + x2 + 486662) % CURVE_P
t2 = (y2 - y1) % CURVE_P
t3 = (x2 - x1) % CURVE_P
y3 = t1 * (y2 - y1) * inverse_mod((x2 - x1) % CURVE_P, CURVE_P) - \
t2 ** 3 * inverse_mod(t3 ** 3 % CURVE_P, CURVE_P) - y1
y3 = y3 % CURVE_P
return x3, y3
def curve25519_affine_double(x1, y1):
if (x1, y1) == (1, 0):
return (1, 0)
x2 = (3 * x1 ** 2 + 2 * CURVE_A * x1 + 1) ** 2 * inverse_mod((2 * y1) ** 2, CURVE_P) - CURVE_A - x1 - x1
y2 = (2 * x1 + x1 + CURVE_A) * (3 * x1 ** 2 + 2 * CURVE_A * x1 + 1) * inverse_mod(2 * y1, CURVE_P) - \
(3 * x1 ** 2 + 2 * CURVE_A * x1 + 1) ** 3 * inverse_mod((2 * y1) ** 3, CURVE_P) - y1
return x2 % CURVE_P, y2 % CURVE_P
def curve25519_affine_mult(n, x1, y1):
tx, ty = 1, 0
for bit in map(int, bin(n)[2:]):
tx, ty = curve25519_affine_double(tx, ty)
if bit:
tx, ty = curve25519_affine_add(tx, ty, x1, y1)
return tx, ty
def clamp(secret):
a = secret[0]
a &= 248
b = secret[31]
b &= 127
b |= 64
returnVal = (bytearray((a,)) + secret[1:-1] + bytearray((b,)))
return returnVal
def is_negative(x):
return x & 1
def curve25519_eckcdsa_keygen(secret):
s = from_le32(clamp(secret))
x, y = curve25519_affine_mult(s, CURVE_G_X, CURVE_G_Y)
signing_key = inverse_mod(s if is_negative(y) else -s, CURVE_ORDER)
return le32(x), le32(signing_key), le32(s)
def kcdsa_sign(message, secret):
verification_key, signing_key, ignored = curve25519_eckcdsa_keygen(secret)
m = sha256(message).digest()
k = sha256(m + signing_key).digest()
k_Gx, ignored, k_clamped = curve25519_eckcdsa_keygen(k)
r = sha256(m + k_Gx).digest()
s = (from_le32(k_clamped) - from_le32(r)) * from_le32(signing_key) % CURVE_ORDER
return le32(s) + r
def kcdsa_verify(signature, message, public_key):
if len(signature) != 64:
return False
s = from_le32(signature[:32])
r = from_le32(signature[32:64])
px = from_le32(public_key)
py = curve25519_x_to_y(px)
if py is None: # pubkey is bogus; bail
return False
tx1, ty1 = curve25519_affine_mult(s, px, py)
tx2, ty2 = curve25519_affine_mult(r, CURVE_G_X, CURVE_G_Y)
if not is_negative(py):
ty2 = -ty2
k_Gx, k_Gy = curve25519_affine_add(tx1, ty1, tx2, ty2)
m = sha256(message).digest()
return le32(r) == sha256(m + le32(k_Gx)).digest()
if __name__ == "__main__":
import sys
passphrase = sys.argv[1].encode('utf-8')
secret = sha256(passphrase).digest()
message = sys.argv[2].encode('utf-8')
verification_key, signing_key, secret_clamped = curve25519_eckcdsa_keygen(secret)
print('pubkey', verification_key.hex())
print('signing key', signing_key.hex())
signature = kcdsa_sign(message, secret)
print('signature', signature.hex())
assert kcdsa_verify(signature, message, verification_key)
assert not kcdsa_verify(signature[::-1], signature, verification_key) | 36.88961 | 108 | 0.662559 |
from hashlib import sha256
from ecdsa.numbertheory import square_root_mod_prime, SquareRootError, inverse_mod
CURVE_P = 2**255 - 19
CURVE_A = 486662
CURVE_ORDER = 7237005577332262213973186563042994240857116359379907606001950938285454250989
CURVE_G_X = 9
CURVE_G_Y = 14781619447589544791020593568409986887264606134616475288964881837755586237401
def le32(n):
turn bytes.fromhex('%064x' % n)[::-1]
def from_le32(s):
return int(s[::-1].hex(), 16)
def curve25519_x_to_y(x):
t = (x ** 3 + CURVE_A * x ** 2 + x) % CURVE_P
try:
return square_root_mod_prime(t, CURVE_P)
except SquareRootError:
return None
def curve25519_affine_add(x1, y1, x2, y2):
if (x1, y1) == (1, 0):
return x2, y2
if (x2, y2) == (1, 0):
return x1, y1
if x1 == x2 and y1 != y2:
return (1, 0)
if x1 == x2 and y1 == y2:
return curve25519_affine_double(x1, y1)
t1 = (y2 - y1) ** 2 % CURVE_P
t2 = (x2 - x1) ** 2 % CURVE_P
x3 = (t1 * inverse_mod(t2, CURVE_P) - 486662 - x1 - x2) % CURVE_P
t1 = (2 * x1 + x2 + 486662) % CURVE_P
t2 = (y2 - y1) % CURVE_P
t3 = (x2 - x1) % CURVE_P
y3 = t1 * (y2 - y1) * inverse_mod((x2 - x1) % CURVE_P, CURVE_P) - \
t2 ** 3 * inverse_mod(t3 ** 3 % CURVE_P, CURVE_P) - y1
y3 = y3 % CURVE_P
return x3, y3
def curve25519_affine_double(x1, y1):
if (x1, y1) == (1, 0):
return (1, 0)
x2 = (3 * x1 ** 2 + 2 * CURVE_A * x1 + 1) ** 2 * inverse_mod((2 * y1) ** 2, CURVE_P) - CURVE_A - x1 - x1
y2 = (2 * x1 + x1 + CURVE_A) * (3 * x1 ** 2 + 2 * CURVE_A * x1 + 1) * inverse_mod(2 * y1, CURVE_P) - \
(3 * x1 ** 2 + 2 * CURVE_A * x1 + 1) ** 3 * inverse_mod((2 * y1) ** 3, CURVE_P) - y1
return x2 % CURVE_P, y2 % CURVE_P
def curve25519_affine_mult(n, x1, y1):
tx, ty = 1, 0
for bit in map(int, bin(n)[2:]):
tx, ty = curve25519_affine_double(tx, ty)
if bit:
tx, ty = curve25519_affine_add(tx, ty, x1, y1)
return tx, ty
def clamp(secret):
a = secret[0]
a &= 248
b = secret[31]
b &= 127
b |= 64
returnVal = (bytearray((a,)) + secret[1:-1] + bytearray((b,)))
return returnVal
def is_negative(x):
return x & 1
def curve25519_eckcdsa_keygen(secret):
s = from_le32(clamp(secret))
x, y = curve25519_affine_mult(s, CURVE_G_X, CURVE_G_Y)
signing_key = inverse_mod(s if is_negative(y) else -s, CURVE_ORDER)
return le32(x), le32(signing_key), le32(s)
def kcdsa_sign(message, secret):
verification_key, signing_key, ignored = curve25519_eckcdsa_keygen(secret)
m = sha256(message).digest()
k = sha256(m + signing_key).digest()
k_Gx, ignored, k_clamped = curve25519_eckcdsa_keygen(k)
r = sha256(m + k_Gx).digest()
s = (from_le32(k_clamped) - from_le32(r)) * from_le32(signing_key) % CURVE_ORDER
return le32(s) + r
def kcdsa_verify(signature, message, public_key):
if len(signature) != 64:
return False
s = from_le32(signature[:32])
r = from_le32(signature[32:64])
px = from_le32(public_key)
py = curve25519_x_to_y(px)
if py is None:
return False
tx1, ty1 = curve25519_affine_mult(s, px, py)
tx2, ty2 = curve25519_affine_mult(r, CURVE_G_X, CURVE_G_Y)
if not is_negative(py):
ty2 = -ty2
k_Gx, k_Gy = curve25519_affine_add(tx1, ty1, tx2, ty2)
m = sha256(message).digest()
return le32(r) == sha256(m + le32(k_Gx)).digest()
if __name__ == "__main__":
import sys
passphrase = sys.argv[1].encode('utf-8')
secret = sha256(passphrase).digest()
message = sys.argv[2].encode('utf-8')
verification_key, signing_key, secret_clamped = curve25519_eckcdsa_keygen(secret)
print('pubkey', verification_key.hex())
print('signing key', signing_key.hex())
signature = kcdsa_sign(message, secret)
print('signature', signature.hex())
assert kcdsa_verify(signature, message, verification_key)
assert not kcdsa_verify(signature[::-1], signature, verification_key) | true | true |
f7f46bf020511a4f03524af47070176c37123866 | 955 | py | Python | designate/objects/quota.py | infobloxopen/designate | 531a28b8453cfe5641284a16e0342db8d709ab36 | [
"Apache-2.0"
] | null | null | null | designate/objects/quota.py | infobloxopen/designate | 531a28b8453cfe5641284a16e0342db8d709ab36 | [
"Apache-2.0"
] | null | null | null | designate/objects/quota.py | infobloxopen/designate | 531a28b8453cfe5641284a16e0342db8d709ab36 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2014 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from designate.objects import base
class Quota(base.DictObjectMixin, base.PersistentObjectMixin,
base.DesignateObject):
FIELDS = {
'tenant_id': {},
'resource': {},
'hard_limit': {}
}
class QuotaList(base.ListObjectMixin, base.DesignateObject):
LIST_ITEM_TYPE = Quota
| 32.931034 | 78 | 0.701571 |
from designate.objects import base
class Quota(base.DictObjectMixin, base.PersistentObjectMixin,
base.DesignateObject):
FIELDS = {
'tenant_id': {},
'resource': {},
'hard_limit': {}
}
class QuotaList(base.ListObjectMixin, base.DesignateObject):
LIST_ITEM_TYPE = Quota
| true | true |
f7f46c9071b1a13cfedc915a4492e1bb29c96c39 | 9,918 | py | Python | NAA/web.py | AlbertUnruh/NAA-API | c2e79c01951b4fb2ae4b4e4f6d27f544891bd5a0 | [
"MIT"
] | null | null | null | NAA/web.py | AlbertUnruh/NAA-API | c2e79c01951b4fb2ae4b4e4f6d27f544891bd5a0 | [
"MIT"
] | null | null | null | NAA/web.py | AlbertUnruh/NAA-API | c2e79c01951b4fb2ae4b4e4f6d27f544891bd5a0 | [
"MIT"
] | null | null | null | import typing
from werkzeug.serving import run_simple
from werkzeug.wrappers import Request, Response
from json import dumps
from warnings import warn
from .models import Node, APIRequest, APIResponse
__all__ = (
"API",
"HTTP_METHODS",
"ALLOWED_LIBS",
)
HTTP_METHODS = [
"GET",
"HEAD",
"POST",
"PUT",
"DELETE",
"CONNECT",
"OPTIONS",
"TRACE",
"PATCH",
]
ALLOWED_LIBS = {
"AlbertUnruhUtils": "https://github.com/AlbertUnruh/AlbertUnruhUtils.py",
}
def _default_endpoint(*_):
return APIResponse(404, {"message": "No Path!"})
class API:
_version_pattern = "v{version}"
_version_default = None
_current_version = None
_checks_request_global: dict[str, list[tuple[callable, int]]]
_checks_response_global: dict[str, list[callable]]
_versions: dict[str, Node]
def __init__(
self,
*,
host="127.0.0.1",
port=3333,
name=None,
default=1,
version_pattern="v{version}",
used_libs=None,
):
"""
Parameters
----------
host: str
The host of the server.
port: int
The port of the server.
name: str, optional
The name of the server.
default: int
The default version.
version_pattern: str
The pattern for the versions.
used_libs: list[str], optional
Additional used libraries to adapt the code to them.
"""
self._host = host
self._port = port
self._name = name or "NAA API"
self._checks_request_global = {}
self._checks_response_global = {}
self._versions = {}
self._default_endpoint = _default_endpoint
assert (
"{version}" in version_pattern
), "'{version}' must be present in 'version_pattern'!"
self._version_pattern = version_pattern
self._version_default = self._version_pattern.format(version=default)
if used_libs is None:
used_libs = []
assert all(lib in ALLOWED_LIBS for lib in used_libs), (
f"You can only use supported libraries! You can use one of these: "
f"{', '.join(f'{k} ({ALLOWED_LIBS[k]})' for k in ALLOWED_LIBS)}"
)
if len(used_libs):
if len(used_libs) == 1:
lib = used_libs[0]
warn(RuntimeWarning(f"Used Library {lib} must be used everywhere!"))
else:
libs = ", ".join(used_libs[:-1]) + f" and {used_libs[-1]}"
warn(RuntimeWarning(f"Used Libraries {libs} must be used everywhere!"))
self._used_libs = used_libs
@Request.application
def _application(self, request):
"""
Parameters
----------
request: Request
"""
path = request.path[1:]
version = self._version_default
p = path.split("/")
if p:
for v in self._versions:
if v == p[0]:
version = v
# to get rid of the version in path
path = path[len(v) + 1 :] # noqa: E203
break
del p
request = APIRequest(
method=request.method,
headers=dict(request.headers),
ip=request.remote_addr,
url=path,
version=version,
)
for check, status in self._checks_request_global.get(version):
if not check(request):
return Response(
status=status,
response=dumps({"message": APIResponse.DEFAULT_MESSAGES[status]}),
content_type="application/json",
)
if not path:
result = self._default_endpoint(request)
# format result from
# AlbertUnruhUtils.ratelimit.server.ServerRateLimit.__call__.decorator()
# Notes
# -----
# - decorator is in this case nested and not direct accessible
# - library: https://github.com/AlbertUnruh/AlbertUnruhUtils.py
if "AlbertUnruhUtils" in self._used_libs:
auu, result = result
if not auu[0]:
result = APIResponse(429)
result._response.update(auu[1]) # noqa
else:
path = path.split("/")
result = self._versions[version].find_node(
path=path, request=request
) # type: APIResponse
for check in self._checks_response_global.get(version):
check(result)
status = result.status_code
response = result.response
response.update(message=result.message)
response = dumps(response)
return Response(
status=status, response=response, content_type="application/json"
)
def add_version(self, version, *, fallback: list[callable] = None):
"""
Parameters
----------
version: int
fallback: list[callable]
"""
for fb in fallback or []:
self.add_version(version)(fb)
def decorator(clb):
"""
Parameters
----------
clb: callable
"""
self._current_version = self._version_pattern.format(version=version)
self._checks_request_global[
self._current_version
] = self._checks_request_global.get(self._current_version, [])
self._checks_response_global[
self._current_version
] = self._checks_response_global.get(self._current_version, [])
version_node = self._versions.get(
self._current_version, Node(*HTTP_METHODS, used_libs=self._used_libs)
) # type: Node
node = Node(*HTTP_METHODS, used_libs=self._used_libs)(clb)
node._children.update(version_node._children) # noqa
self._versions[self._current_version] = node
clb(self)
self._current_version = None
return clb
return decorator
def add(self, *methods, ignore_invalid_methods=False):
"""
Parameters
----------
methods: str
ignore_invalid_methods: bool
"""
def decorator(clb):
"""
Parameters
----------
clb: callable
The function/method which should be added as a node.
Returns
-------
Node
The new node.
"""
version = self._get_version()
node = Node(
*methods,
ignore_invalid_methods=ignore_invalid_methods,
used_libs=self._used_libs,
)
node(clb)
self._versions[version]._children[clb.__name__] = node # noqa
return node
return decorator
def add_global_request_check(self, default_return_value):
"""
If the check returns False the `default_return_value`
is returned and the request 'll not be processed.
Parameters
----------
default_return_value: int
"""
def decorator(clb):
"""
Parameters
----------
clb: callable
"""
version = self._get_version()
self._checks_request_global[version].append((clb, default_return_value))
return clb
return decorator
def add_global_response_check(self):
"""
Can be used to edit responses before sending them.
"""
def decorator(clb):
"""
Parameters
----------
clb: callable
"""
version = self._get_version()
self._checks_response_global[version].append(clb)
return clb
return decorator
def default_endpoint(
self,
clb: typing.Callable[[APIRequest], APIResponse],
) -> typing.Callable[[APIRequest], APIResponse]:
"""
Adds a default endpoint. 'll be displayed if no path is given.
Parameters
----------
clb: typing.Callable[[APIRequest], APIResponse]
The endpoint.
Returns
-------
typing.Callable[[APIRequest], APIResponse]
"""
self._default_endpoint = clb
return clb
@property
def host(self):
"""
Returns
-------
str
"""
return self._host
@property
def port(self):
"""
Returns
-------
int
"""
return self._port
def run_api(self, *, debug=False, reload=False, processes=1):
"""
Parameters
----------
debug, reload: bool
Whether it should debug/reload.
processes: int
The number of processes which can be used by the server.
"""
if self._versions and (default := self._version_default) is not None:
if default not in self._versions:
raise RuntimeError(
f"Can't have {default!r} as default version, because this version is not set!"
)
run_simple(
self.host,
self.port,
self._application,
use_reloader=reload,
use_debugger=debug,
processes=processes,
)
__call__ = run_api
def _get_version(self):
"""
Returns
-------
str
Raises
------
AssertionError
"""
assert (
version := self._current_version
) is not None, (
"You can only add an endpoint if you are in a version (API.add_version)"
)
return version
| 27.626741 | 98 | 0.527022 | import typing
from werkzeug.serving import run_simple
from werkzeug.wrappers import Request, Response
from json import dumps
from warnings import warn
from .models import Node, APIRequest, APIResponse
__all__ = (
"API",
"HTTP_METHODS",
"ALLOWED_LIBS",
)
HTTP_METHODS = [
"GET",
"HEAD",
"POST",
"PUT",
"DELETE",
"CONNECT",
"OPTIONS",
"TRACE",
"PATCH",
]
ALLOWED_LIBS = {
"AlbertUnruhUtils": "https://github.com/AlbertUnruh/AlbertUnruhUtils.py",
}
def _default_endpoint(*_):
return APIResponse(404, {"message": "No Path!"})
class API:
_version_pattern = "v{version}"
_version_default = None
_current_version = None
_checks_request_global: dict[str, list[tuple[callable, int]]]
_checks_response_global: dict[str, list[callable]]
_versions: dict[str, Node]
def __init__(
self,
*,
host="127.0.0.1",
port=3333,
name=None,
default=1,
version_pattern="v{version}",
used_libs=None,
):
self._host = host
self._port = port
self._name = name or "NAA API"
self._checks_request_global = {}
self._checks_response_global = {}
self._versions = {}
self._default_endpoint = _default_endpoint
assert (
"{version}" in version_pattern
), "'{version}' must be present in 'version_pattern'!"
self._version_pattern = version_pattern
self._version_default = self._version_pattern.format(version=default)
if used_libs is None:
used_libs = []
assert all(lib in ALLOWED_LIBS for lib in used_libs), (
f"You can only use supported libraries! You can use one of these: "
f"{', '.join(f'{k} ({ALLOWED_LIBS[k]})' for k in ALLOWED_LIBS)}"
)
if len(used_libs):
if len(used_libs) == 1:
lib = used_libs[0]
warn(RuntimeWarning(f"Used Library {lib} must be used everywhere!"))
else:
libs = ", ".join(used_libs[:-1]) + f" and {used_libs[-1]}"
warn(RuntimeWarning(f"Used Libraries {libs} must be used everywhere!"))
self._used_libs = used_libs
@Request.application
def _application(self, request):
path = request.path[1:]
version = self._version_default
p = path.split("/")
if p:
for v in self._versions:
if v == p[0]:
version = v
path = path[len(v) + 1 :]
break
del p
request = APIRequest(
method=request.method,
headers=dict(request.headers),
ip=request.remote_addr,
url=path,
version=version,
)
for check, status in self._checks_request_global.get(version):
if not check(request):
return Response(
status=status,
response=dumps({"message": APIResponse.DEFAULT_MESSAGES[status]}),
content_type="application/json",
)
if not path:
result = self._default_endpoint(request)
if "AlbertUnruhUtils" in self._used_libs:
auu, result = result
if not auu[0]:
result = APIResponse(429)
result._response.update(auu[1])
else:
path = path.split("/")
result = self._versions[version].find_node(
path=path, request=request
)
for check in self._checks_response_global.get(version):
check(result)
status = result.status_code
response = result.response
response.update(message=result.message)
response = dumps(response)
return Response(
status=status, response=response, content_type="application/json"
)
def add_version(self, version, *, fallback: list[callable] = None):
for fb in fallback or []:
self.add_version(version)(fb)
def decorator(clb):
self._current_version = self._version_pattern.format(version=version)
self._checks_request_global[
self._current_version
] = self._checks_request_global.get(self._current_version, [])
self._checks_response_global[
self._current_version
] = self._checks_response_global.get(self._current_version, [])
version_node = self._versions.get(
self._current_version, Node(*HTTP_METHODS, used_libs=self._used_libs)
)
node = Node(*HTTP_METHODS, used_libs=self._used_libs)(clb)
node._children.update(version_node._children)
self._versions[self._current_version] = node
clb(self)
self._current_version = None
return clb
return decorator
def add(self, *methods, ignore_invalid_methods=False):
def decorator(clb):
version = self._get_version()
node = Node(
*methods,
ignore_invalid_methods=ignore_invalid_methods,
used_libs=self._used_libs,
)
node(clb)
self._versions[version]._children[clb.__name__] = node
return node
return decorator
def add_global_request_check(self, default_return_value):
def decorator(clb):
version = self._get_version()
self._checks_request_global[version].append((clb, default_return_value))
return clb
return decorator
def add_global_response_check(self):
def decorator(clb):
version = self._get_version()
self._checks_response_global[version].append(clb)
return clb
return decorator
def default_endpoint(
self,
clb: typing.Callable[[APIRequest], APIResponse],
) -> typing.Callable[[APIRequest], APIResponse]:
self._default_endpoint = clb
return clb
@property
def host(self):
return self._host
@property
def port(self):
return self._port
def run_api(self, *, debug=False, reload=False, processes=1):
if self._versions and (default := self._version_default) is not None:
if default not in self._versions:
raise RuntimeError(
f"Can't have {default!r} as default version, because this version is not set!"
)
run_simple(
self.host,
self.port,
self._application,
use_reloader=reload,
use_debugger=debug,
processes=processes,
)
__call__ = run_api
def _get_version(self):
assert (
version := self._current_version
) is not None, (
"You can only add an endpoint if you are in a version (API.add_version)"
)
return version
| true | true |
f7f46e2b3c6e42b33eb61ceb086623c097b17c00 | 21,617 | py | Python | pvpn/crypto.py | qwj/python-vpn | b09e4ab180e3cb4c6b480e9693e8f19854c7b6ec | [
"MIT"
] | 125 | 2018-12-25T12:18:55.000Z | 2022-03-26T17:56:31.000Z | pvpn/crypto.py | Open-ATS-Github/python-vpn | b09e4ab180e3cb4c6b480e9693e8f19854c7b6ec | [
"MIT"
] | 6 | 2019-01-08T09:12:28.000Z | 2022-01-11T15:39:24.000Z | pvpn/crypto.py | Open-ATS-Github/python-vpn | b09e4ab180e3cb4c6b480e9693e8f19854c7b6ec | [
"MIT"
] | 36 | 2018-12-29T15:40:12.000Z | 2022-03-25T21:37:04.000Z | import hashlib, os, random, hmac
from Crypto.Cipher import AES, ChaCha20_Poly1305
from . import enums
class Prf:
DIGESTS_1 = {
enums.HashId_1.MD5: (hashlib.md5, 16),
enums.HashId_1.SHA: (hashlib.sha1, 20),
enums.HashId_1.SHA2_256: (hashlib.sha256, 32),
enums.HashId_1.SHA2_384: (hashlib.sha384, 48),
enums.HashId_1.SHA2_512: (hashlib.sha512, 64),
}
DIGESTS = {
enums.PrfId.PRF_HMAC_MD5: (hashlib.md5, 16),
enums.PrfId.PRF_HMAC_SHA1: (hashlib.sha1, 20),
enums.PrfId.PRF_HMAC_SHA2_256: (hashlib.sha256, 32),
enums.PrfId.PRF_HMAC_SHA2_384: (hashlib.sha384, 48),
enums.PrfId.PRF_HMAC_SHA2_512: (hashlib.sha512, 64),
}
def __init__(self, transform):
self.hasher, self.key_size = self.DIGESTS[transform] if type(transform) is enums.PrfId else self.DIGESTS_1[transform]
def prf(self, key, data):
return hmac.HMAC(key, data, digestmod=self.hasher).digest()
def prfplus(self, key, seed, count=True):
temp = bytes()
for i in range(1, 1024):
temp = self.prf(key, temp + seed + (bytes([i]) if count else b''))
yield from temp
class Integrity:
DIGESTS_1 = {
enums.IntegId_1.AUTH_HMAC_MD5: (hashlib.md5, 16, 12),
enums.IntegId_1.AUTH_HMAC_SHA1: (hashlib.sha1, 20, 12),
enums.IntegId_1.AUTH_HMAC_SHA2_256: (hashlib.sha256, 32, 16),
enums.IntegId_1.AUTH_HMAC_SHA2_384: (hashlib.sha384, 48, 24),
enums.IntegId_1.AUTH_HMAC_SHA2_512: (hashlib.sha512, 64, 32),
}
DIGESTS = {
enums.IntegId.AUTH_HMAC_MD5_96: (hashlib.md5, 16, 12),
enums.IntegId.AUTH_HMAC_SHA1_96: (hashlib.sha1, 20, 12),
enums.IntegId.AUTH_HMAC_MD5_128: (hashlib.md5, 16, 16),
enums.IntegId.AUTH_HMAC_SHA1_160: (hashlib.sha1, 20, 20),
enums.IntegId.AUTH_HMAC_SHA2_256_128: (hashlib.sha256, 32, 16),
enums.IntegId.AUTH_HMAC_SHA2_384_192: (hashlib.sha384, 48, 24),
enums.IntegId.AUTH_HMAC_SHA2_512_256: (hashlib.sha512, 64, 32),
}
def __init__(self, transform):
self.hasher, self.key_size, self.hash_size = self.DIGESTS[transform] if type(transform) is enums.IntegId else self.DIGESTS_1[transform]
def compute(self, key, data):
return hmac.HMAC(key, data, digestmod=self.hasher).digest()[:self.hash_size]
class Cipher:
def __init__(self, transform, keylen):
assert type(transform) is enums.EncrId and transform == enums.EncrId.ENCR_AES_CBC or \
type(transform) is enums.EncrId_1 and transform == enums.EncrId_1.AES_CBC
self.keylen = keylen
@property
def block_size(self):
return 16
@property
def key_size(self):
return self.keylen // 8
def encrypt(self, key, iv, data):
return AES.new(key, AES.MODE_CBC, iv=iv).encrypt(data)
def decrypt(self, key, iv, data):
return AES.new(key, AES.MODE_CBC, iv=iv).decrypt(data)
def generate_iv(self):
return os.urandom(self.block_size)
class Crypto:
def __init__(self, cipher, sk_e, integrity=None, sk_a=None, prf=None, sk_p=None, *, iv=None):
self.cipher = cipher
self.sk_e = sk_e
self.integrity = integrity
self.sk_a = sk_a
self.prf = prf
self.sk_p = sk_p
self.iv = {0: iv}
self.last_iv = None
self.m_id = set()
def decrypt_esp(self, encrypted):
iv = encrypted[:self.cipher.block_size]
ciphertext = encrypted[self.cipher.block_size:len(encrypted)-self.integrity.hash_size]
plain = self.cipher.decrypt(self.sk_e, bytes(iv), bytes(ciphertext))
next_header = plain[-1]
padlen = plain[-2]
return next_header, plain[:-2-padlen]
def encrypt_esp(self, next_header, plain):
iv = self.cipher.generate_iv()
padlen = self.cipher.block_size - ((len(plain)+1) % self.cipher.block_size) - 1
plain += b'\x00' * padlen + bytes([padlen, next_header])
encrypted = self.cipher.encrypt(self.sk_e, bytes(iv), bytes(plain))
return iv + encrypted + bytes(self.integrity.hash_size)
def encrypt_1(self, plain, m_id):
if m_id not in self.iv:
self.iv[m_id] = self.prf.hasher(self.iv[0]+m_id.to_bytes(4, 'big')).digest()[:self.cipher.block_size]
padlen = self.cipher.block_size - ((len(plain)+1) % self.cipher.block_size)
plain += b'\x00' * padlen + bytes([padlen])
encrypted = self.cipher.encrypt(self.sk_e, self.iv[m_id], bytes(plain))
self.iv[m_id] = encrypted[-self.cipher.block_size:]
return encrypted
def decrypt_1(self, encrypted, m_id):
if m_id not in self.iv:
self.iv[m_id] = self.prf.hasher(self.iv[0]+m_id.to_bytes(4, 'big')).digest()[:self.cipher.block_size]
plain = self.cipher.decrypt(self.sk_e, self.iv[m_id], encrypted)
self.iv[m_id] = encrypted[-self.cipher.block_size:]
padlen = plain[-1]
# do not remove padding according to ios cisco ipsec bug
return plain
def decrypt(self, encrypted):
iv = encrypted[:self.cipher.block_size]
ciphertext = encrypted[self.cipher.block_size:len(encrypted)-self.integrity.hash_size]
plain = self.cipher.decrypt(self.sk_e, bytes(iv), bytes(ciphertext))
padlen = plain[-1]
return plain[:-1-padlen]
def encrypt(self, plain):
iv = self.cipher.generate_iv()
padlen = self.cipher.block_size - (len(plain) % self.cipher.block_size) - 1
plain += b'\x00' * padlen + bytes([padlen])
encrypted = self.cipher.encrypt(self.sk_e, bytes(iv), bytes(plain))
return iv + encrypted + bytes(self.integrity.hash_size)
def verify_checksum(self, encrypted):
checksum = self.integrity.compute(self.sk_a, encrypted[:len(encrypted)-self.integrity.hash_size])
assert checksum == encrypted[len(encrypted)-self.integrity.hash_size:]
def add_checksum(self, encrypted):
checksum = self.integrity.compute(self.sk_a, encrypted[:len(encrypted)-self.integrity.hash_size])
encrypted[len(encrypted)-len(checksum):] = checksum
def aead_chacha20poly1305_encrypt(key, counter, plain_text, auth_text):
cipher = ChaCha20_Poly1305.new(key=key, nonce=b'\x00\x00\x00\x00'+counter.to_bytes(8, 'little'))
cipher.update(auth_text)
cipher_text, digest = cipher.encrypt_and_digest(plain_text)
return cipher_text+digest
def aead_chacha20poly1305_decrypt(key, counter, cipher_text, auth_text):
cipher = ChaCha20_Poly1305.new(key=key, nonce=b'\x00\x00\x00\x00'+counter.to_bytes(8, 'little'))
cipher.update(auth_text)
return cipher.decrypt_and_verify(cipher_text[:-16], cipher_text[-16:])
# DH and ECDH algorithms
def ec_add(P, Q, l, p, a):
if P == 0:
return Q
if P == Q:
z = (3*(P>>l)*(P>>l)+a) * pow(2*(P&(1<<l)-1), p-2, p)
else:
z = ((Q&(1<<l)-1) - (P&(1<<l)-1)) * pow((Q>>l)-(P>>l), p-2, p)
x = (z*z - (P>>l) - (Q>>l)) % p
return x<<l | (z*((P>>l)-x) - (P&(1<<l)-1)) % p
def ec_mul(P, l, i, p, a):
r = 0
while i > 0:
if i & 1:
r = ec_add(r, P, l<<3, p, a)
i, P = i>>1, ec_add(P, P, l<<3, p, a)
return r
def ec_scalar(k, u, p, a24, bits):
x_2, x_3, z_2, z_3, swap = 1, u, 0, 1, 0
for t in range(bits-1, -1, -1):
k_t = (k >> t) & 1
if swap^k_t:
x_2, x_3, z_2, z_3 = x_3, x_2, z_3, z_2
swap = k_t
A, B, C, D = x_2+z_2, x_2-z_2, x_3+z_3, x_3-z_3
AA, BB, DA, CB = A*A, B*B, D*A, C*B
E = AA - BB
x_3 = pow(DA + CB, 2, p)
z_3 = u * pow(DA - CB, 2, p) % p
x_2 = AA * BB % p
z_2 = E * (AA + a24*E) % p
if swap:
x_2, x_3, z_2, z_3 = x_3, x_2, z_3, z_2
return (x_2 * pow(z_2, p-2, p) % p)
def X25519(k, u):
u, k = int.from_bytes(u, 'little') if isinstance(u, bytes) else u, int.from_bytes(k, 'little')
k = k & ((1 << 256) - (1 << 255) - 8) | (1 << 254)
return ec_scalar(k, u, 2**255-19, 121665, 255).to_bytes(32, 'little')
def X448(k, u):
u, k = int.from_bytes(u, 'little') if isinstance(u, bytes) else u, int.from_bytes(k, 'little')
k = k & (-4) | (1 << 447)
return ec_scalar(k, u, 2**448-2**224-1, 39081, 448).to_bytes(56, 'little')
PRIMES = {
enums.DhId.DH_1: (0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A63A3620FFFFFFFFFFFFFFFF, 2, 96),
enums.DhId.DH_2: (0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF, 2, 128),
enums.DhId.DH_5: (0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF, 2, 192),
enums.DhId.DH_14: (0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF, 2, 256),
enums.DhId.DH_15: (0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF, 2, 384),
enums.DhId.DH_16: (0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199FFFFFFFFFFFFFFFF, 2, 512),
enums.DhId.DH_17: (0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C93402849236C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AACC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E6DCC4024FFFFFFFFFFFFFFFF, 2, 768),
enums.DhId.DH_18: (0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C93402849236C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AACC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E438777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F5683423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD922222E04A4037C0713EB57A81A23F0C73473FC646CEA306B4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A364597E899A0255DC164F31CC50846851DF9AB48195DED7EA1B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F924009438B481C6CD7889A002ED5EE382BC9190DA6FC026E479558E4475677E9AA9E3050E2765694DFC81F56E880B96E7160C980DD98EDD3DFFFFFFFFFFFFFFFFF, 2, 1024),
enums.DhId.DH_19: (0xFFFFFFFF00000001000000000000000000000000FFFFFFFFFFFFFFFFFFFFFFFF, (0x6B17D1F2E12C4247F8BCE6E563A440F277037D812DEB33A0F4A13945D898C2964FE342E2FE1A7F9B8EE7EB4A7C0F9E162BCE33576B315ECECBB6406837BF51F5, -3), 32),
enums.DhId.DH_20: (0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFFFF0000000000000000FFFFFFFF, (0xAA87CA22BE8B05378EB1C71EF320AD746E1D3B628BA79B9859F741E082542A385502F25DBF55296C3A545E3872760AB73617DE4A96262C6F5D9E98BF9292DC29F8F41DBD289A147CE9DA3113B5F0B8C00A60B1CE1D7E819D7A431D7C90EA0E5F, -3), 48),
enums.DhId.DH_21: (0x01FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF, (0x00C6858E06B70404E9CD9E3ECB662395B4429C648139053FB521F828AF606B4D3DBAA14B5E77EFE75928FE1DC127A2FFA8DE3348B3C1856A429BF97E7E31C2E5BD66011839296A789A3BC0045C8A5FB42C7D1BD998F54449579B446817AFBD17273E662C97EE72995EF42640C550B9013FAD0761353C7086A272C24088BE94769FD16650, -3), 66),
enums.DhId.DH_22: (0xB10B8F96A080E01DDE92DE5EAE5D54EC52C99FBCFB06A3C69A6A9DCA52D23B616073E28675A23D189838EF1E2EE652C013ECB4AEA906112324975C3CD49B83BFACCBDD7D90C4BD7098488E9C219A73724EFFD6FAE5644738FAA31A4FF55BCCC0A151AF5F0DC8B4BD45BF37DF365C1A65E68CFDA76D4DA708DF1FB2BC2E4A4371, 0xA4D1CBD5C3FD34126765A442EFB99905F8104DD258AC507FD6406CFF14266D31266FEA1E5C41564B777E690F5504F213160217B4B01B886A5E91547F9E2749F4D7FBD7D3B9A92EE1909D0D2263F80A76A6A24C087A091F531DBF0A0169B6A28AD662A4D18E73AFA32D779D5918D08BC8858F4DCEF97C2A24855E6EEB22B3B2E5, 128),
enums.DhId.DH_23: (0xAD107E1E9123A9D0D660FAA79559C51FA20D64E5683B9FD1B54B1597B61D0A75E6FA141DF95A56DBAF9A3C407BA1DF15EB3D688A309C180E1DE6B85A1274A0A66D3F8152AD6AC2129037C9EDEFDA4DF8D91E8FEF55B7394B7AD5B7D0B6C12207C9F98D11ED34DBF6C6BA0B2C8BBC27BE6A00E0A0B9C49708B3BF8A317091883681286130BC8985DB1602E714415D9330278273C7DE31EFDC7310F7121FD5A07415987D9ADC0A486DCDF93ACC44328387315D75E198C641A480CD86A1B9E587E8BE60E69CC928B2B9C52172E413042E9B23F10B0E16E79763C9B53DCF4BA80A29E3FB73C16B8E75B97EF363E2FFA31F71CF9DE5384E71B81C0AC4DFFE0C10E64F, 0xAC4032EF4F2D9AE39DF30B5C8FFDAC506CDEBE7B89998CAF74866A08CFE4FFE3A6824A4E10B9A6F0DD921F01A70C4AFAAB739D7700C29F52C57DB17C620A8652BE5E9001A8D66AD7C17669101999024AF4D027275AC1348BB8A762D0521BC98AE247150422EA1ED409939D54DA7460CDB5F6C6B250717CBEF180EB34118E98D119529A45D6F834566E3025E316A330EFBB77A86F0C1AB15B051AE3D428C8F8ACB70A8137150B8EEB10E183EDD19963DDD9E263E4770589EF6AA21E7F5F2FF381B539CCE3409D13CD566AFBB48D6C019181E1BCFE94B30269EDFE72FE9B6AA4BD7B5A0F1C71CFFF4C19C418E1F6EC017981BC087F2A7065B384B890D3191F2BFA, 256),
enums.DhId.DH_24: (0x87A8E61DB4B6663CFFBBD19C651959998CEEF608660DD0F25D2CEED4435E3B00E00DF8F1D61957D4FAF7DF4561B2AA3016C3D91134096FAA3BF4296D830E9A7C209E0C6497517ABD5A8A9D306BCF67ED91F9E6725B4758C022E0B1EF4275BF7B6C5BFC11D45F9088B941F54EB1E59BB8BC39A0BF12307F5C4FDB70C581B23F76B63ACAE1CAA6B7902D52526735488A0EF13C6D9A51BFA4AB3AD8347796524D8EF6A167B5A41825D967E144E5140564251CCACB83E6B486F6B3CA3F7971506026C0B857F689962856DED4010ABD0BE621C3A3960A54E710C375F26375D7014103A4B54330C198AF126116D2276E11715F693877FAD7EF09CADB094AE91E1A1597, 0x3FB32C9B73134D0B2E77506660EDBD484CA7B18F21EF205407F4793A1A0BA12510DBC15077BE463FFF4FED4AAC0BB555BE3A6C1B0C6B47B1BC3773BF7E8C6F62901228F8C28CBB18A55AE31341000A650196F931C77A57F2DDF463E5E9EC144B777DE62AAAB8A8628AC376D282D6ED3864E67982428EBC831D14348F6F2F9193B5045AF2767164E1DFC967C1FB3F2E55A4BD1BFFE83B9C80D052B985D182EA0ADB2A3B7313D3FE14C8484B1E052588B9B7D2BBD2DF016199ECD06E1557CD0915B3353BBB64E0EC377FD028370DF92B52C7891428CDC67EB6184B523D1DB246C32F63078490F00EF8D647D148D47954515E2327CFEF98C582664B4C0F6CC41659, 256),
enums.DhId.DH_25: (0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFFFFFFFFFFFF, (0x188DA80EB03090F67CBF20EB43A18800F4FF0AFD82FF101207192B95FFC8DA78631011ED6B24CDD573F977A11E794811, 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFFFFFFFFFFFC), 24),
enums.DhId.DH_26: (0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF000000000000000000000001, (0xB70E0CBD6BB4BF7F321390B94A03C1D356C21122343280D6115C1D21BD376388B5F723FB4C22DFE6CD4375A05A07476444D5819985007E34, 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFFFFFFFFFFFFFFFFFFFE), 28),
enums.DhId.DH_27: (0xD7C134AA264366862A18302575D1D787B09F075797DA89F57EC8C0FF, (0x0D9029AD2C7E5CF4340823B2A87DC68C9E4CE3174C1E6EFDEE12C07D58AA56F772C0726F24C6B89E4ECDAC24354B9E99CAA3F6D3761402CD, 0x68A5E62CA9CE6C1C299803A6C1530B514E182AD8B0042A59CAD29F43), 28),
enums.DhId.DH_28: (0xA9FB57DBA1EEA9BC3E660A909D838D726E3BF623D52620282013481D1F6E5377, (0x8BD2AEB9CB7E57CB2C4B482FFC81B7AFB9DE27E1E3BD23C23A4453BD9ACE3262547EF835C3DAC4FD97F8461A14611DC9C27745132DED8E545C1D54C72F046997, 0x7D5A0975FC2C3057EEF67530417AFFE7FB8055C126DC5C6CE94A4B44F330B5D9), 32),
enums.DhId.DH_29: (0x8CB91E82A3386D280F5D6F7E50E641DF152F7109ED5456B412B1DA197FB71123ACD3A729901D1A71874700133107EC53, (0x1D1C64F068CF45FFA2A63A81B7C13F6B8847A3E77EF14FE3DB7FCAFE0CBD10E8E826E03436D646AAEF87B2E247D4AF1E8ABE1D7520F9C2A45CB1EB8E95CFD55262B70B29FEEC5864E19C054FF99129280E4646217791811142820341263C5315, 0x7BC382C63D8C150C3C72080ACE05AFA0C2BEA28E4FB22787139165EFBA91F90F8AA5814A503AD4EB04A8C7DD22CE2826), 48),
enums.DhId.DH_30: (0xAADD9DB8DBE9C48B3FD4E6AE33C9FC07CB308DB3B3C9D20ED6639CCA703308717D4D9B009BC66842AECDA12AE6A380E62881FF2F2D82C68528AA6056583A48F3, (0x81AEE4BDD82ED9645A21322E9C4C6A9385ED9F70B5D916C1B43B62EEF4D0098EFF3B1F78E2D0D48D50D1687B93B97D5F7C6D5047406A5E688B352209BCB9F8227DDE385D566332ECC0EABFA9CF7822FDF209F70024A57B1AA000C55B881F8111B2DCDE494A5F485E5BCA4BD88A2763AED1CA2B2FA8F0540678CD1E0F3AD80892, 0x7830A3318B603B89E2327145AC234CC594CBDD8D3DF91610A83441CAEA9863BC2DED5D5AA8253AA10A2EF1C98B9AC8B57F1117A72BF2C7B9E7C1AC4D77FC94CA), 64),
enums.DhId.DH_31: (1<<32, X25519, 9),
enums.DhId.DH_32: (1<<56, X448, 5),
}
def DiffieHellman(group, peer):
if group not in PRIMES:
raise Exception(f'Unsupported DH Group DH_{group}')
p, g, l = PRIMES[group]
a = random.randrange(p>>8, p)
if callable(g):
return g(a, l), g(a, peer)
elif type(g) is tuple:
return ec_mul(g[0], l, a, p, g[1]).to_bytes(l*2, 'big'), ec_mul(int.from_bytes(peer, 'big'), l, a, p, g[1]).to_bytes(l*2, 'big')[:l]
else:
return pow(g, a, p).to_bytes(l, 'big'), pow(int.from_bytes(peer, 'big'), a, p).to_bytes(l, 'big')
| 96.075556 | 2,084 | 0.825554 | import hashlib, os, random, hmac
from Crypto.Cipher import AES, ChaCha20_Poly1305
from . import enums
class Prf:
DIGESTS_1 = {
enums.HashId_1.MD5: (hashlib.md5, 16),
enums.HashId_1.SHA: (hashlib.sha1, 20),
enums.HashId_1.SHA2_256: (hashlib.sha256, 32),
enums.HashId_1.SHA2_384: (hashlib.sha384, 48),
enums.HashId_1.SHA2_512: (hashlib.sha512, 64),
}
DIGESTS = {
enums.PrfId.PRF_HMAC_MD5: (hashlib.md5, 16),
enums.PrfId.PRF_HMAC_SHA1: (hashlib.sha1, 20),
enums.PrfId.PRF_HMAC_SHA2_256: (hashlib.sha256, 32),
enums.PrfId.PRF_HMAC_SHA2_384: (hashlib.sha384, 48),
enums.PrfId.PRF_HMAC_SHA2_512: (hashlib.sha512, 64),
}
def __init__(self, transform):
self.hasher, self.key_size = self.DIGESTS[transform] if type(transform) is enums.PrfId else self.DIGESTS_1[transform]
def prf(self, key, data):
return hmac.HMAC(key, data, digestmod=self.hasher).digest()
def prfplus(self, key, seed, count=True):
temp = bytes()
for i in range(1, 1024):
temp = self.prf(key, temp + seed + (bytes([i]) if count else b''))
yield from temp
class Integrity:
DIGESTS_1 = {
enums.IntegId_1.AUTH_HMAC_MD5: (hashlib.md5, 16, 12),
enums.IntegId_1.AUTH_HMAC_SHA1: (hashlib.sha1, 20, 12),
enums.IntegId_1.AUTH_HMAC_SHA2_256: (hashlib.sha256, 32, 16),
enums.IntegId_1.AUTH_HMAC_SHA2_384: (hashlib.sha384, 48, 24),
enums.IntegId_1.AUTH_HMAC_SHA2_512: (hashlib.sha512, 64, 32),
}
DIGESTS = {
enums.IntegId.AUTH_HMAC_MD5_96: (hashlib.md5, 16, 12),
enums.IntegId.AUTH_HMAC_SHA1_96: (hashlib.sha1, 20, 12),
enums.IntegId.AUTH_HMAC_MD5_128: (hashlib.md5, 16, 16),
enums.IntegId.AUTH_HMAC_SHA1_160: (hashlib.sha1, 20, 20),
enums.IntegId.AUTH_HMAC_SHA2_256_128: (hashlib.sha256, 32, 16),
enums.IntegId.AUTH_HMAC_SHA2_384_192: (hashlib.sha384, 48, 24),
enums.IntegId.AUTH_HMAC_SHA2_512_256: (hashlib.sha512, 64, 32),
}
def __init__(self, transform):
self.hasher, self.key_size, self.hash_size = self.DIGESTS[transform] if type(transform) is enums.IntegId else self.DIGESTS_1[transform]
def compute(self, key, data):
return hmac.HMAC(key, data, digestmod=self.hasher).digest()[:self.hash_size]
class Cipher:
def __init__(self, transform, keylen):
assert type(transform) is enums.EncrId and transform == enums.EncrId.ENCR_AES_CBC or \
type(transform) is enums.EncrId_1 and transform == enums.EncrId_1.AES_CBC
self.keylen = keylen
@property
def block_size(self):
return 16
@property
def key_size(self):
return self.keylen // 8
def encrypt(self, key, iv, data):
return AES.new(key, AES.MODE_CBC, iv=iv).encrypt(data)
def decrypt(self, key, iv, data):
return AES.new(key, AES.MODE_CBC, iv=iv).decrypt(data)
def generate_iv(self):
return os.urandom(self.block_size)
class Crypto:
def __init__(self, cipher, sk_e, integrity=None, sk_a=None, prf=None, sk_p=None, *, iv=None):
self.cipher = cipher
self.sk_e = sk_e
self.integrity = integrity
self.sk_a = sk_a
self.prf = prf
self.sk_p = sk_p
self.iv = {0: iv}
self.last_iv = None
self.m_id = set()
def decrypt_esp(self, encrypted):
iv = encrypted[:self.cipher.block_size]
ciphertext = encrypted[self.cipher.block_size:len(encrypted)-self.integrity.hash_size]
plain = self.cipher.decrypt(self.sk_e, bytes(iv), bytes(ciphertext))
next_header = plain[-1]
padlen = plain[-2]
return next_header, plain[:-2-padlen]
def encrypt_esp(self, next_header, plain):
iv = self.cipher.generate_iv()
padlen = self.cipher.block_size - ((len(plain)+1) % self.cipher.block_size) - 1
plain += b'\x00' * padlen + bytes([padlen, next_header])
encrypted = self.cipher.encrypt(self.sk_e, bytes(iv), bytes(plain))
return iv + encrypted + bytes(self.integrity.hash_size)
def encrypt_1(self, plain, m_id):
if m_id not in self.iv:
self.iv[m_id] = self.prf.hasher(self.iv[0]+m_id.to_bytes(4, 'big')).digest()[:self.cipher.block_size]
padlen = self.cipher.block_size - ((len(plain)+1) % self.cipher.block_size)
plain += b'\x00' * padlen + bytes([padlen])
encrypted = self.cipher.encrypt(self.sk_e, self.iv[m_id], bytes(plain))
self.iv[m_id] = encrypted[-self.cipher.block_size:]
return encrypted
def decrypt_1(self, encrypted, m_id):
if m_id not in self.iv:
self.iv[m_id] = self.prf.hasher(self.iv[0]+m_id.to_bytes(4, 'big')).digest()[:self.cipher.block_size]
plain = self.cipher.decrypt(self.sk_e, self.iv[m_id], encrypted)
self.iv[m_id] = encrypted[-self.cipher.block_size:]
padlen = plain[-1]
return plain
def decrypt(self, encrypted):
iv = encrypted[:self.cipher.block_size]
ciphertext = encrypted[self.cipher.block_size:len(encrypted)-self.integrity.hash_size]
plain = self.cipher.decrypt(self.sk_e, bytes(iv), bytes(ciphertext))
padlen = plain[-1]
return plain[:-1-padlen]
def encrypt(self, plain):
iv = self.cipher.generate_iv()
padlen = self.cipher.block_size - (len(plain) % self.cipher.block_size) - 1
plain += b'\x00' * padlen + bytes([padlen])
encrypted = self.cipher.encrypt(self.sk_e, bytes(iv), bytes(plain))
return iv + encrypted + bytes(self.integrity.hash_size)
def verify_checksum(self, encrypted):
checksum = self.integrity.compute(self.sk_a, encrypted[:len(encrypted)-self.integrity.hash_size])
assert checksum == encrypted[len(encrypted)-self.integrity.hash_size:]
def add_checksum(self, encrypted):
checksum = self.integrity.compute(self.sk_a, encrypted[:len(encrypted)-self.integrity.hash_size])
encrypted[len(encrypted)-len(checksum):] = checksum
def aead_chacha20poly1305_encrypt(key, counter, plain_text, auth_text):
cipher = ChaCha20_Poly1305.new(key=key, nonce=b'\x00\x00\x00\x00'+counter.to_bytes(8, 'little'))
cipher.update(auth_text)
cipher_text, digest = cipher.encrypt_and_digest(plain_text)
return cipher_text+digest
def aead_chacha20poly1305_decrypt(key, counter, cipher_text, auth_text):
cipher = ChaCha20_Poly1305.new(key=key, nonce=b'\x00\x00\x00\x00'+counter.to_bytes(8, 'little'))
cipher.update(auth_text)
return cipher.decrypt_and_verify(cipher_text[:-16], cipher_text[-16:])
def ec_add(P, Q, l, p, a):
if P == 0:
return Q
if P == Q:
z = (3*(P>>l)*(P>>l)+a) * pow(2*(P&(1<<l)-1), p-2, p)
else:
z = ((Q&(1<<l)-1) - (P&(1<<l)-1)) * pow((Q>>l)-(P>>l), p-2, p)
x = (z*z - (P>>l) - (Q>>l)) % p
return x<<l | (z*((P>>l)-x) - (P&(1<<l)-1)) % p
def ec_mul(P, l, i, p, a):
r = 0
while i > 0:
if i & 1:
r = ec_add(r, P, l<<3, p, a)
i, P = i>>1, ec_add(P, P, l<<3, p, a)
return r
def ec_scalar(k, u, p, a24, bits):
x_2, x_3, z_2, z_3, swap = 1, u, 0, 1, 0
for t in range(bits-1, -1, -1):
k_t = (k >> t) & 1
if swap^k_t:
x_2, x_3, z_2, z_3 = x_3, x_2, z_3, z_2
swap = k_t
A, B, C, D = x_2+z_2, x_2-z_2, x_3+z_3, x_3-z_3
AA, BB, DA, CB = A*A, B*B, D*A, C*B
E = AA - BB
x_3 = pow(DA + CB, 2, p)
z_3 = u * pow(DA - CB, 2, p) % p
x_2 = AA * BB % p
z_2 = E * (AA + a24*E) % p
if swap:
x_2, x_3, z_2, z_3 = x_3, x_2, z_3, z_2
return (x_2 * pow(z_2, p-2, p) % p)
def X25519(k, u):
u, k = int.from_bytes(u, 'little') if isinstance(u, bytes) else u, int.from_bytes(k, 'little')
k = k & ((1 << 256) - (1 << 255) - 8) | (1 << 254)
return ec_scalar(k, u, 2**255-19, 121665, 255).to_bytes(32, 'little')
def X448(k, u):
u, k = int.from_bytes(u, 'little') if isinstance(u, bytes) else u, int.from_bytes(k, 'little')
k = k & (-4) | (1 << 447)
return ec_scalar(k, u, 2**448-2**224-1, 39081, 448).to_bytes(56, 'little')
PRIMES = {
enums.DhId.DH_1: (0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A63A3620FFFFFFFFFFFFFFFF, 2, 96),
enums.DhId.DH_2: (0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF, 2, 128),
enums.DhId.DH_5: (0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF, 2, 192),
enums.DhId.DH_14: (0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF, 2, 256),
enums.DhId.DH_15: (0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF, 2, 384),
enums.DhId.DH_16: (0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199FFFFFFFFFFFFFFFF, 2, 512),
enums.DhId.DH_17: (0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C93402849236C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AACC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E6DCC4024FFFFFFFFFFFFFFFF, 2, 768),
enums.DhId.DH_18: (0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C93402849236C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AACC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E438777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F5683423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD922222E04A4037C0713EB57A81A23F0C73473FC646CEA306B4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A364597E899A0255DC164F31CC50846851DF9AB48195DED7EA1B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F924009438B481C6CD7889A002ED5EE382BC9190DA6FC026E479558E4475677E9AA9E3050E2765694DFC81F56E880B96E7160C980DD98EDD3DFFFFFFFFFFFFFFFFF, 2, 1024),
enums.DhId.DH_19: (0xFFFFFFFF00000001000000000000000000000000FFFFFFFFFFFFFFFFFFFFFFFF, (0x6B17D1F2E12C4247F8BCE6E563A440F277037D812DEB33A0F4A13945D898C2964FE342E2FE1A7F9B8EE7EB4A7C0F9E162BCE33576B315ECECBB6406837BF51F5, -3), 32),
enums.DhId.DH_20: (0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFFFF0000000000000000FFFFFFFF, (0xAA87CA22BE8B05378EB1C71EF320AD746E1D3B628BA79B9859F741E082542A385502F25DBF55296C3A545E3872760AB73617DE4A96262C6F5D9E98BF9292DC29F8F41DBD289A147CE9DA3113B5F0B8C00A60B1CE1D7E819D7A431D7C90EA0E5F, -3), 48),
enums.DhId.DH_21: (0x01FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF, (0x00C6858E06B70404E9CD9E3ECB662395B4429C648139053FB521F828AF606B4D3DBAA14B5E77EFE75928FE1DC127A2FFA8DE3348B3C1856A429BF97E7E31C2E5BD66011839296A789A3BC0045C8A5FB42C7D1BD998F54449579B446817AFBD17273E662C97EE72995EF42640C550B9013FAD0761353C7086A272C24088BE94769FD16650, -3), 66),
enums.DhId.DH_22: (0xB10B8F96A080E01DDE92DE5EAE5D54EC52C99FBCFB06A3C69A6A9DCA52D23B616073E28675A23D189838EF1E2EE652C013ECB4AEA906112324975C3CD49B83BFACCBDD7D90C4BD7098488E9C219A73724EFFD6FAE5644738FAA31A4FF55BCCC0A151AF5F0DC8B4BD45BF37DF365C1A65E68CFDA76D4DA708DF1FB2BC2E4A4371, 0xA4D1CBD5C3FD34126765A442EFB99905F8104DD258AC507FD6406CFF14266D31266FEA1E5C41564B777E690F5504F213160217B4B01B886A5E91547F9E2749F4D7FBD7D3B9A92EE1909D0D2263F80A76A6A24C087A091F531DBF0A0169B6A28AD662A4D18E73AFA32D779D5918D08BC8858F4DCEF97C2A24855E6EEB22B3B2E5, 128),
enums.DhId.DH_23: (0xAD107E1E9123A9D0D660FAA79559C51FA20D64E5683B9FD1B54B1597B61D0A75E6FA141DF95A56DBAF9A3C407BA1DF15EB3D688A309C180E1DE6B85A1274A0A66D3F8152AD6AC2129037C9EDEFDA4DF8D91E8FEF55B7394B7AD5B7D0B6C12207C9F98D11ED34DBF6C6BA0B2C8BBC27BE6A00E0A0B9C49708B3BF8A317091883681286130BC8985DB1602E714415D9330278273C7DE31EFDC7310F7121FD5A07415987D9ADC0A486DCDF93ACC44328387315D75E198C641A480CD86A1B9E587E8BE60E69CC928B2B9C52172E413042E9B23F10B0E16E79763C9B53DCF4BA80A29E3FB73C16B8E75B97EF363E2FFA31F71CF9DE5384E71B81C0AC4DFFE0C10E64F, 0xAC4032EF4F2D9AE39DF30B5C8FFDAC506CDEBE7B89998CAF74866A08CFE4FFE3A6824A4E10B9A6F0DD921F01A70C4AFAAB739D7700C29F52C57DB17C620A8652BE5E9001A8D66AD7C17669101999024AF4D027275AC1348BB8A762D0521BC98AE247150422EA1ED409939D54DA7460CDB5F6C6B250717CBEF180EB34118E98D119529A45D6F834566E3025E316A330EFBB77A86F0C1AB15B051AE3D428C8F8ACB70A8137150B8EEB10E183EDD19963DDD9E263E4770589EF6AA21E7F5F2FF381B539CCE3409D13CD566AFBB48D6C019181E1BCFE94B30269EDFE72FE9B6AA4BD7B5A0F1C71CFFF4C19C418E1F6EC017981BC087F2A7065B384B890D3191F2BFA, 256),
enums.DhId.DH_24: (0x87A8E61DB4B6663CFFBBD19C651959998CEEF608660DD0F25D2CEED4435E3B00E00DF8F1D61957D4FAF7DF4561B2AA3016C3D91134096FAA3BF4296D830E9A7C209E0C6497517ABD5A8A9D306BCF67ED91F9E6725B4758C022E0B1EF4275BF7B6C5BFC11D45F9088B941F54EB1E59BB8BC39A0BF12307F5C4FDB70C581B23F76B63ACAE1CAA6B7902D52526735488A0EF13C6D9A51BFA4AB3AD8347796524D8EF6A167B5A41825D967E144E5140564251CCACB83E6B486F6B3CA3F7971506026C0B857F689962856DED4010ABD0BE621C3A3960A54E710C375F26375D7014103A4B54330C198AF126116D2276E11715F693877FAD7EF09CADB094AE91E1A1597, 0x3FB32C9B73134D0B2E77506660EDBD484CA7B18F21EF205407F4793A1A0BA12510DBC15077BE463FFF4FED4AAC0BB555BE3A6C1B0C6B47B1BC3773BF7E8C6F62901228F8C28CBB18A55AE31341000A650196F931C77A57F2DDF463E5E9EC144B777DE62AAAB8A8628AC376D282D6ED3864E67982428EBC831D14348F6F2F9193B5045AF2767164E1DFC967C1FB3F2E55A4BD1BFFE83B9C80D052B985D182EA0ADB2A3B7313D3FE14C8484B1E052588B9B7D2BBD2DF016199ECD06E1557CD0915B3353BBB64E0EC377FD028370DF92B52C7891428CDC67EB6184B523D1DB246C32F63078490F00EF8D647D148D47954515E2327CFEF98C582664B4C0F6CC41659, 256),
enums.DhId.DH_25: (0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFFFFFFFFFFFF, (0x188DA80EB03090F67CBF20EB43A18800F4FF0AFD82FF101207192B95FFC8DA78631011ED6B24CDD573F977A11E794811, 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFFFFFFFFFFFC), 24),
enums.DhId.DH_26: (0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF000000000000000000000001, (0xB70E0CBD6BB4BF7F321390B94A03C1D356C21122343280D6115C1D21BD376388B5F723FB4C22DFE6CD4375A05A07476444D5819985007E34, 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFFFFFFFFFFFFFFFFFFFE), 28),
enums.DhId.DH_27: (0xD7C134AA264366862A18302575D1D787B09F075797DA89F57EC8C0FF, (0x0D9029AD2C7E5CF4340823B2A87DC68C9E4CE3174C1E6EFDEE12C07D58AA56F772C0726F24C6B89E4ECDAC24354B9E99CAA3F6D3761402CD, 0x68A5E62CA9CE6C1C299803A6C1530B514E182AD8B0042A59CAD29F43), 28),
enums.DhId.DH_28: (0xA9FB57DBA1EEA9BC3E660A909D838D726E3BF623D52620282013481D1F6E5377, (0x8BD2AEB9CB7E57CB2C4B482FFC81B7AFB9DE27E1E3BD23C23A4453BD9ACE3262547EF835C3DAC4FD97F8461A14611DC9C27745132DED8E545C1D54C72F046997, 0x7D5A0975FC2C3057EEF67530417AFFE7FB8055C126DC5C6CE94A4B44F330B5D9), 32),
enums.DhId.DH_29: (0x8CB91E82A3386D280F5D6F7E50E641DF152F7109ED5456B412B1DA197FB71123ACD3A729901D1A71874700133107EC53, (0x1D1C64F068CF45FFA2A63A81B7C13F6B8847A3E77EF14FE3DB7FCAFE0CBD10E8E826E03436D646AAEF87B2E247D4AF1E8ABE1D7520F9C2A45CB1EB8E95CFD55262B70B29FEEC5864E19C054FF99129280E4646217791811142820341263C5315, 0x7BC382C63D8C150C3C72080ACE05AFA0C2BEA28E4FB22787139165EFBA91F90F8AA5814A503AD4EB04A8C7DD22CE2826), 48),
enums.DhId.DH_30: (0xAADD9DB8DBE9C48B3FD4E6AE33C9FC07CB308DB3B3C9D20ED6639CCA703308717D4D9B009BC66842AECDA12AE6A380E62881FF2F2D82C68528AA6056583A48F3, (0x81AEE4BDD82ED9645A21322E9C4C6A9385ED9F70B5D916C1B43B62EEF4D0098EFF3B1F78E2D0D48D50D1687B93B97D5F7C6D5047406A5E688B352209BCB9F8227DDE385D566332ECC0EABFA9CF7822FDF209F70024A57B1AA000C55B881F8111B2DCDE494A5F485E5BCA4BD88A2763AED1CA2B2FA8F0540678CD1E0F3AD80892, 0x7830A3318B603B89E2327145AC234CC594CBDD8D3DF91610A83441CAEA9863BC2DED5D5AA8253AA10A2EF1C98B9AC8B57F1117A72BF2C7B9E7C1AC4D77FC94CA), 64),
enums.DhId.DH_31: (1<<32, X25519, 9),
enums.DhId.DH_32: (1<<56, X448, 5),
}
def DiffieHellman(group, peer):
if group not in PRIMES:
raise Exception(f'Unsupported DH Group DH_{group}')
p, g, l = PRIMES[group]
a = random.randrange(p>>8, p)
if callable(g):
return g(a, l), g(a, peer)
elif type(g) is tuple:
return ec_mul(g[0], l, a, p, g[1]).to_bytes(l*2, 'big'), ec_mul(int.from_bytes(peer, 'big'), l, a, p, g[1]).to_bytes(l*2, 'big')[:l]
else:
return pow(g, a, p).to_bytes(l, 'big'), pow(int.from_bytes(peer, 'big'), a, p).to_bytes(l, 'big')
| true | true |
f7f46e4ff4964d7d39f36c9ec844a49fda3d2b4d | 3,051 | py | Python | test/functional/rpc_deprecated.py | tngc-one/tngcoin | 1382521c4f897cf798e840fee2ce9abd70bbb99b | [
"MIT"
] | null | null | null | test/functional/rpc_deprecated.py | tngc-one/tngcoin | 1382521c4f897cf798e840fee2ce9abd70bbb99b | [
"MIT"
] | null | null | null | test/functional/rpc_deprecated.py | tngc-one/tngcoin | 1382521c4f897cf798e840fee2ce9abd70bbb99b | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2017-2019 The TNGC Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test deprecation of RPC calls."""
from test_framework.test_framework import TNGCTestFramework
from test_framework.util import assert_raises_rpc_error, find_vout_for_address
class DeprecatedRpcTest(TNGCTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
self.extra_args = [[], ['-deprecatedrpc=bumpfee']]
def run_test(self):
# This test should be used to verify correct behaviour of deprecated
# RPC methods with and without the -deprecatedrpc flags. For example:
#
# In set_test_params:
# self.extra_args = [[], ["-deprecatedrpc=generate"]]
#
# In run_test:
# self.log.info("Test generate RPC")
# assert_raises_rpc_error(-32, 'The wallet generate rpc method is deprecated', self.nodes[0].rpc.generate, 1)
# self.nodes[1].generate(1)
if self.is_wallet_compiled():
self.log.info("Test bumpfee RPC")
self.nodes[0].generate(101)
self.nodes[0].createwallet(wallet_name='nopriv', disable_private_keys=True)
noprivs0 = self.nodes[0].get_wallet_rpc('nopriv')
w0 = self.nodes[0].get_wallet_rpc(self.default_wallet_name)
self.nodes[1].createwallet(wallet_name='nopriv', disable_private_keys=True)
noprivs1 = self.nodes[1].get_wallet_rpc('nopriv')
address = w0.getnewaddress()
desc = w0.getaddressinfo(address)['desc']
change_addr = w0.getrawchangeaddress()
change_desc = w0.getaddressinfo(change_addr)['desc']
txid = w0.sendtoaddress(address=address, amount=10)
vout = find_vout_for_address(w0, txid, address)
self.nodes[0].generate(1)
rawtx = w0.createrawtransaction([{'txid': txid, 'vout': vout}], {w0.getnewaddress(): 5}, 0, True)
rawtx = w0.fundrawtransaction(rawtx, {'changeAddress': change_addr})
signed_tx = w0.signrawtransactionwithwallet(rawtx['hex'])['hex']
noprivs0.importmulti([{'desc': desc, 'timestamp': 0}, {'desc': change_desc, 'timestamp': 0, 'internal': True}])
noprivs1.importmulti([{'desc': desc, 'timestamp': 0}, {'desc': change_desc, 'timestamp': 0, 'internal': True}])
txid = w0.sendrawtransaction(signed_tx)
self.sync_all()
assert_raises_rpc_error(-32, 'Using bumpfee with wallets that have private keys disabled is deprecated. Use psbtbumpfee instead or restart tngcd with -deprecatedrpc=bumpfee. This functionality will be removed in 0.22', noprivs0.bumpfee, txid)
bumped_psbt = noprivs1.bumpfee(txid)
assert 'psbt' in bumped_psbt
else:
self.log.info("No tested deprecated RPC methods")
if __name__ == '__main__':
DeprecatedRpcTest().main()
| 50.016393 | 254 | 0.6588 |
from test_framework.test_framework import TNGCTestFramework
from test_framework.util import assert_raises_rpc_error, find_vout_for_address
class DeprecatedRpcTest(TNGCTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
self.extra_args = [[], ['-deprecatedrpc=bumpfee']]
def run_test(self):
if self.is_wallet_compiled():
self.log.info("Test bumpfee RPC")
self.nodes[0].generate(101)
self.nodes[0].createwallet(wallet_name='nopriv', disable_private_keys=True)
noprivs0 = self.nodes[0].get_wallet_rpc('nopriv')
w0 = self.nodes[0].get_wallet_rpc(self.default_wallet_name)
self.nodes[1].createwallet(wallet_name='nopriv', disable_private_keys=True)
noprivs1 = self.nodes[1].get_wallet_rpc('nopriv')
address = w0.getnewaddress()
desc = w0.getaddressinfo(address)['desc']
change_addr = w0.getrawchangeaddress()
change_desc = w0.getaddressinfo(change_addr)['desc']
txid = w0.sendtoaddress(address=address, amount=10)
vout = find_vout_for_address(w0, txid, address)
self.nodes[0].generate(1)
rawtx = w0.createrawtransaction([{'txid': txid, 'vout': vout}], {w0.getnewaddress(): 5}, 0, True)
rawtx = w0.fundrawtransaction(rawtx, {'changeAddress': change_addr})
signed_tx = w0.signrawtransactionwithwallet(rawtx['hex'])['hex']
noprivs0.importmulti([{'desc': desc, 'timestamp': 0}, {'desc': change_desc, 'timestamp': 0, 'internal': True}])
noprivs1.importmulti([{'desc': desc, 'timestamp': 0}, {'desc': change_desc, 'timestamp': 0, 'internal': True}])
txid = w0.sendrawtransaction(signed_tx)
self.sync_all()
assert_raises_rpc_error(-32, 'Using bumpfee with wallets that have private keys disabled is deprecated. Use psbtbumpfee instead or restart tngcd with -deprecatedrpc=bumpfee. This functionality will be removed in 0.22', noprivs0.bumpfee, txid)
bumped_psbt = noprivs1.bumpfee(txid)
assert 'psbt' in bumped_psbt
else:
self.log.info("No tested deprecated RPC methods")
if __name__ == '__main__':
DeprecatedRpcTest().main()
| true | true |
f7f46eaef097dec88622d986d7af48ad362ccfca | 3,141 | py | Python | python_modules/libraries/dagster-dbt/dagster_dbt/rpc/utils.py | asamoal/dagster | 08fad28e4b608608ce090ce2e8a52c2cf9dd1b64 | [
"Apache-2.0"
] | null | null | null | python_modules/libraries/dagster-dbt/dagster_dbt/rpc/utils.py | asamoal/dagster | 08fad28e4b608608ce090ce2e8a52c2cf9dd1b64 | [
"Apache-2.0"
] | null | null | null | python_modules/libraries/dagster-dbt/dagster_dbt/rpc/utils.py | asamoal/dagster | 08fad28e4b608608ce090ce2e8a52c2cf9dd1b64 | [
"Apache-2.0"
] | null | null | null | import logging
from collections import defaultdict
from enum import Enum
from typing import Dict, List
from requests import Response
from requests.exceptions import RequestException
from dagster import Failure, RetryRequested
from dagster.core.execution.context.compute import SolidExecutionContext
def fmt_rpc_logs(logs: List[Dict[str, str]]) -> Dict[int, str]:
d = defaultdict(list)
for log in logs:
levelname = log["levelname"]
d[getattr(logging, levelname)].append(
f"{log.get('timestamp')} - {levelname} - {log.get('message')}"
)
return {level: "\n".join(logs) for level, logs in d.items()}
def log_rpc(context: SolidExecutionContext, logs: List[Dict]) -> None:
if len(logs) > 0:
logs_fmt = fmt_rpc_logs(logs)
for level, logs_str in logs_fmt.items():
context.log.log(level=level, msg=logs_str)
class DBTErrors(Enum):
project_currently_compiling_error = 10010
runtime_error = 10001
server_error = -32000
project_compile_failure_error = 10011
rpc_process_killed_error = 10009
rpc_timeout_error = 10008
def raise_for_rpc_error(context: SolidExecutionContext, resp: Response) -> None:
error = resp.json().get("error")
if error is not None:
if error["code"] in [
DBTErrors.project_currently_compiling_error.value,
DBTErrors.runtime_error.value,
DBTErrors.server_error.value,
]:
context.log.warning(error["message"])
raise RetryRequested(max_retries=5, seconds_to_wait=30)
elif error["code"] == DBTErrors.project_compile_failure_error.value:
raise Failure(
description=error["message"],
metadata={
"RPC Error Code": str(error["code"]),
"RPC Error Cause": error["data"]["cause"]["message"],
},
)
elif error["code"] == DBTErrors.rpc_process_killed_error.value:
raise Failure(
description=error["message"],
metadata={
"RPC Error Code": str(error["code"]),
"RPC Signum": str(error["data"]["signum"]),
"RPC Error Message": error["data"]["message"],
},
)
elif error["code"] == DBTErrors.rpc_timeout_error.value:
raise Failure(
description=error["message"],
metadata={
"RPC Error Code": str(error["code"]),
"RPC Timeout": str(error["data"]["timeout"]),
"RPC Error Message": error["data"]["message"],
},
)
else:
raise Failure(
description=error["message"],
metadata={"RPC Error Code": str(error["code"])},
)
def is_fatal_code(e: RequestException) -> bool:
"""Helper function to determine if a Requests reponse status code
is a "fatal" status code. If it is, we will not request a solid retry."""
return 400 <= e.response.status_code < 500 and e.response.status_code != 429
| 36.103448 | 80 | 0.590895 | import logging
from collections import defaultdict
from enum import Enum
from typing import Dict, List
from requests import Response
from requests.exceptions import RequestException
from dagster import Failure, RetryRequested
from dagster.core.execution.context.compute import SolidExecutionContext
def fmt_rpc_logs(logs: List[Dict[str, str]]) -> Dict[int, str]:
d = defaultdict(list)
for log in logs:
levelname = log["levelname"]
d[getattr(logging, levelname)].append(
f"{log.get('timestamp')} - {levelname} - {log.get('message')}"
)
return {level: "\n".join(logs) for level, logs in d.items()}
def log_rpc(context: SolidExecutionContext, logs: List[Dict]) -> None:
if len(logs) > 0:
logs_fmt = fmt_rpc_logs(logs)
for level, logs_str in logs_fmt.items():
context.log.log(level=level, msg=logs_str)
class DBTErrors(Enum):
project_currently_compiling_error = 10010
runtime_error = 10001
server_error = -32000
project_compile_failure_error = 10011
rpc_process_killed_error = 10009
rpc_timeout_error = 10008
def raise_for_rpc_error(context: SolidExecutionContext, resp: Response) -> None:
error = resp.json().get("error")
if error is not None:
if error["code"] in [
DBTErrors.project_currently_compiling_error.value,
DBTErrors.runtime_error.value,
DBTErrors.server_error.value,
]:
context.log.warning(error["message"])
raise RetryRequested(max_retries=5, seconds_to_wait=30)
elif error["code"] == DBTErrors.project_compile_failure_error.value:
raise Failure(
description=error["message"],
metadata={
"RPC Error Code": str(error["code"]),
"RPC Error Cause": error["data"]["cause"]["message"],
},
)
elif error["code"] == DBTErrors.rpc_process_killed_error.value:
raise Failure(
description=error["message"],
metadata={
"RPC Error Code": str(error["code"]),
"RPC Signum": str(error["data"]["signum"]),
"RPC Error Message": error["data"]["message"],
},
)
elif error["code"] == DBTErrors.rpc_timeout_error.value:
raise Failure(
description=error["message"],
metadata={
"RPC Error Code": str(error["code"]),
"RPC Timeout": str(error["data"]["timeout"]),
"RPC Error Message": error["data"]["message"],
},
)
else:
raise Failure(
description=error["message"],
metadata={"RPC Error Code": str(error["code"])},
)
def is_fatal_code(e: RequestException) -> bool:
return 400 <= e.response.status_code < 500 and e.response.status_code != 429
| true | true |
f7f46eb2d5ae6aec92a804bbec911097120e9188 | 581 | py | Python | first_site/blog/migrations/0001_initial.py | babu-thomas/django-tutorial | 9f650f50ec481ffbac051760f5117ba8ce3b7101 | [
"MIT"
] | null | null | null | first_site/blog/migrations/0001_initial.py | babu-thomas/django-tutorial | 9f650f50ec481ffbac051760f5117ba8ce3b7101 | [
"MIT"
] | null | null | null | first_site/blog/migrations/0001_initial.py | babu-thomas/django-tutorial | 9f650f50ec481ffbac051760f5117ba8ce3b7101 | [
"MIT"
] | null | null | null | # Generated by Django 2.0.2 on 2018-02-14 09:02
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('body', models.TextField()),
('date', models.DateTimeField()),
],
),
]
| 24.208333 | 114 | 0.550775 |
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('body', models.TextField()),
('date', models.DateTimeField()),
],
),
]
| true | true |
f7f46f7e353ff8dfc8217b04b6e66ca2847063d8 | 2,873 | py | Python | Battle/Monsters.py | micro164/AdvGame | cf27f86b5773b97de195596c512879f9e8be1a04 | [
"MIT"
] | 3 | 2017-12-31T02:41:34.000Z | 2018-02-18T23:56:44.000Z | Battle/Monsters.py | micro164/AdvGame | cf27f86b5773b97de195596c512879f9e8be1a04 | [
"MIT"
] | null | null | null | Battle/Monsters.py | micro164/AdvGame | cf27f86b5773b97de195596c512879f9e8be1a04 | [
"MIT"
] | null | null | null | from Classes.Classes import Player
from Classes.Classes import Monster
# Stats for monsters
# 0-HP, 1-attack, 2-defense, 3-exp, 4.lvl, 5.MaxHP
Monsters = {'rat': list((50, 12, 7, 10, 1, 50)),
'Wild Chicken': list((40, 10, 5, 5, 1, 10)),
'Spider': list((65, 15, 10, 15, 3, 25)),
'goblin': list((80, 17, 7, 35, 5, 100)),
'Giant Spider': list((110, 25, 15, 75, 10, 75)),
'Giant Rat': list((130, 40, 35, 100, 12, 100)),
'Armored Goblin': list((150, 80, 45, 125, 15, 150)),
'Zombie': list((250, 100, 30, 150, 17, 250)),
'Goblin Zombie': list((400, 155, 50, 200, 20, 400)),
'Wolf': list((350, 195, 55, 190, 23, 200)),
'Undead Wolf': list((600, 220, 60, 250, 25, 400)),
'Ghost': list((800, 250, 65, 225, 30, 500)),
'Ghoul': list((825, 260, 70, 300, 35, 425)),
'Vampire': list((950, 290, 80, 500, 40, 650)),
'Cyclops': list((1200, 280, 130, 525, 45, 700)),
'Mummy': list((1675, 285, 265, 550, 50, 675)),
'Earth Elemental': list((1800, 270, 280, 700, 55, 800)),
'Wind Elemental': list((1750, 240, 290, 700, 55, 750)),
'Fire Elemental': list((1710, 220, 210, 700, 55, 710)),
'Water Elemental': list((1850, 280, 400, 700, 55, 850)),
'Basilisk': list((1900, 300, 490, 800, 60, 900)),
'Angel': list((2000, 320, 700, 1000, 65, 1000)),
'Griffon': list((2125, 350, 725, 1300, 70, 1125)),
'Baby Dragon': list((2500, 400, 775, 2000, 75, 1500)),
'Ifrit': list((2700, 450, 750, 1800, 80, 1700)),
'Phoenix': list((4000, 475, 850, 2400, 85, 2000)),
'Adamantoise': list((4000, 500, 900, 2200, 90, 3000)),
'Elder Lich': list((4500, 800, 975, 2500, 95, 3500)),
'Dragon': list((10000, 1200, 1100, 5000, 100, 5000))}
def monster_list():
"""Gives a list of monsters that the player can fight"""
temp = {}
for key, value in list(Monsters.items()):
if Player.lvl == 100:
if value[Monster.lvl] <= Player.lvl:
temp[key] = Monsters[key]
elif 1 <= value[Monster.lvl] <= (Player.lvl + 5):
temp[key] = Monsters[key]
return temp
| 58.632653 | 83 | 0.405848 | from Classes.Classes import Player
from Classes.Classes import Monster
Monsters = {'rat': list((50, 12, 7, 10, 1, 50)),
'Wild Chicken': list((40, 10, 5, 5, 1, 10)),
'Spider': list((65, 15, 10, 15, 3, 25)),
'goblin': list((80, 17, 7, 35, 5, 100)),
'Giant Spider': list((110, 25, 15, 75, 10, 75)),
'Giant Rat': list((130, 40, 35, 100, 12, 100)),
'Armored Goblin': list((150, 80, 45, 125, 15, 150)),
'Zombie': list((250, 100, 30, 150, 17, 250)),
'Goblin Zombie': list((400, 155, 50, 200, 20, 400)),
'Wolf': list((350, 195, 55, 190, 23, 200)),
'Undead Wolf': list((600, 220, 60, 250, 25, 400)),
'Ghost': list((800, 250, 65, 225, 30, 500)),
'Ghoul': list((825, 260, 70, 300, 35, 425)),
'Vampire': list((950, 290, 80, 500, 40, 650)),
'Cyclops': list((1200, 280, 130, 525, 45, 700)),
'Mummy': list((1675, 285, 265, 550, 50, 675)),
'Earth Elemental': list((1800, 270, 280, 700, 55, 800)),
'Wind Elemental': list((1750, 240, 290, 700, 55, 750)),
'Fire Elemental': list((1710, 220, 210, 700, 55, 710)),
'Water Elemental': list((1850, 280, 400, 700, 55, 850)),
'Basilisk': list((1900, 300, 490, 800, 60, 900)),
'Angel': list((2000, 320, 700, 1000, 65, 1000)),
'Griffon': list((2125, 350, 725, 1300, 70, 1125)),
'Baby Dragon': list((2500, 400, 775, 2000, 75, 1500)),
'Ifrit': list((2700, 450, 750, 1800, 80, 1700)),
'Phoenix': list((4000, 475, 850, 2400, 85, 2000)),
'Adamantoise': list((4000, 500, 900, 2200, 90, 3000)),
'Elder Lich': list((4500, 800, 975, 2500, 95, 3500)),
'Dragon': list((10000, 1200, 1100, 5000, 100, 5000))}
def monster_list():
temp = {}
for key, value in list(Monsters.items()):
if Player.lvl == 100:
if value[Monster.lvl] <= Player.lvl:
temp[key] = Monsters[key]
elif 1 <= value[Monster.lvl] <= (Player.lvl + 5):
temp[key] = Monsters[key]
return temp
| true | true |
f7f4707e31db79b9223f44141a5bb02dc06f3d78 | 1,089 | py | Python | generate_matrix_pairs/create_random_matrix.py | bentondrew/demo_software | 33c9bc9c0fc94b9c7910f7c1f348d5282546c845 | [
"Apache-2.0"
] | null | null | null | generate_matrix_pairs/create_random_matrix.py | bentondrew/demo_software | 33c9bc9c0fc94b9c7910f7c1f348d5282546c845 | [
"Apache-2.0"
] | null | null | null | generate_matrix_pairs/create_random_matrix.py | bentondrew/demo_software | 33c9bc9c0fc94b9c7910f7c1f348d5282546c845 | [
"Apache-2.0"
] | null | null | null | # Copyright 2016
# Drewan Tech, LLC
# ALL RIGHTS RESERVED
def create_matrix(number_of_rows,
number_of_columns,
scaling_factor):
"""Will create a matrix with the given size. The random numbers generated
will be in the range of 0 to the scaling factor.
"""
import random
from drewantech_common.value_checks import is_number_type_not_complex
if type(number_of_rows) is not int:
raise TypeError('Number of rows arg passed to create_matrix function is '
'not an int type')
if type(number_of_columns) is not int:
raise TypeError('Number of columns arg passed to create_matrix function '
'is not an int type')
if not is_number_type_not_complex(scaling_factor):
raise TypeError('Scaling factor arg passed to create_matrix function '
'is not an int or float type.')
return [[(random.random() * scaling_factor)
for column in range(number_of_columns)]
for row in range(number_of_rows)]
if __name__ == '__main__':
print(create_matrix(7, 3, 10))
| 36.3 | 77 | 0.678604 |
def create_matrix(number_of_rows,
number_of_columns,
scaling_factor):
import random
from drewantech_common.value_checks import is_number_type_not_complex
if type(number_of_rows) is not int:
raise TypeError('Number of rows arg passed to create_matrix function is '
'not an int type')
if type(number_of_columns) is not int:
raise TypeError('Number of columns arg passed to create_matrix function '
'is not an int type')
if not is_number_type_not_complex(scaling_factor):
raise TypeError('Scaling factor arg passed to create_matrix function '
'is not an int or float type.')
return [[(random.random() * scaling_factor)
for column in range(number_of_columns)]
for row in range(number_of_rows)]
if __name__ == '__main__':
print(create_matrix(7, 3, 10))
| true | true |
f7f47179de699961d9f8b77e8fa295cf2e988277 | 406 | py | Python | secrets.py | jimbobbennett/mandmcounter | 1edc39d42e0cd8249695a7f6397675b3291957bb | [
"MIT"
] | 1 | 2021-08-16T07:25:43.000Z | 2021-08-16T07:25:43.000Z | secrets.py | jimbobbennett/mandmcounter | 1edc39d42e0cd8249695a7f6397675b3291957bb | [
"MIT"
] | 2 | 2020-04-30T16:46:24.000Z | 2020-04-30T16:46:51.000Z | secrets.py | jimbobbennett/mandmcounter | 1edc39d42e0cd8249695a7f6397675b3291957bb | [
"MIT"
] | null | null | null | # This file is where you keep secret settings, passwords, and tokens!
# If you put them in the code you risk committing that info or sharing it
# which would be not great. So, instead, keep it all in this one file and
# keep it a secret.
secrets = {
'ssid' : '<ssid>',
'password' : '<password>',
'prediction_key' : '<prediction_key>',
'prediction_endpoint' : '<prediction_endpoint>'
}
| 33.833333 | 73 | 0.67734 |
secrets = {
'ssid' : '<ssid>',
'password' : '<password>',
'prediction_key' : '<prediction_key>',
'prediction_endpoint' : '<prediction_endpoint>'
}
| true | true |
f7f471983f80ead50724ee0c4ebf677c8cdf1a09 | 1,659 | py | Python | catalyst/rl/scripts/dump_redis.py | Felix-neko/catalyst | df80986f1c12ef6a3776637453a0c04aaef0068c | [
"Apache-2.0"
] | 1 | 2022-03-13T21:40:17.000Z | 2022-03-13T21:40:17.000Z | catalyst/rl/scripts/dump_redis.py | Felix-neko/catalyst | df80986f1c12ef6a3776637453a0c04aaef0068c | [
"Apache-2.0"
] | null | null | null | catalyst/rl/scripts/dump_redis.py | Felix-neko/catalyst | df80986f1c12ef6a3776637453a0c04aaef0068c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import argparse
import pickle
from tqdm import tqdm
from redis import Redis
from catalyst import utils
def build_args(parser):
parser.add_argument("--host", type=str, default="127.0.0.1")
parser.add_argument("--port", type=int, default=12000)
parser.add_argument("--out-pkl", type=str, required=True)
parser.add_argument("--chunk-size", type=int, default=None)
parser.add_argument("--start-from", type=int, default=0)
parser.add_argument("--min-reward", type=int, default=None)
return parser
def parse_args():
parser = argparse.ArgumentParser()
build_args(parser)
args = parser.parse_args()
return args
def main(args, _=None):
db = Redis(host=args.host, port=args.port)
redis_len = db.llen("trajectories") - 1
trajectories = []
for i in tqdm(range(args.start_from, redis_len)):
trajectory = db.lindex("trajectories", i)
if args.min_reward is not None:
trajectory = utils.unpack(trajectory)
if sum(trajectory["trajectory"][-2]) > args.min_reward:
trajectory = utils.pack(trajectory)
trajectories.append(trajectory)
else:
trajectories.append(trajectory)
if args.chunk_size is not None \
and (i - args.start_from) % args.chunk_size == 0:
with open(args.out_pkl.format(suffix=i), "wb") as fout:
pickle.dump(trajectories, fout)
trajectories = []
with open(args.out_pkl.format(suffix=i), "wb") as fout:
pickle.dump(trajectories, fout)
if __name__ == "__main__":
args = parse_args()
main(args)
| 29.105263 | 67 | 0.639542 |
import argparse
import pickle
from tqdm import tqdm
from redis import Redis
from catalyst import utils
def build_args(parser):
parser.add_argument("--host", type=str, default="127.0.0.1")
parser.add_argument("--port", type=int, default=12000)
parser.add_argument("--out-pkl", type=str, required=True)
parser.add_argument("--chunk-size", type=int, default=None)
parser.add_argument("--start-from", type=int, default=0)
parser.add_argument("--min-reward", type=int, default=None)
return parser
def parse_args():
parser = argparse.ArgumentParser()
build_args(parser)
args = parser.parse_args()
return args
def main(args, _=None):
db = Redis(host=args.host, port=args.port)
redis_len = db.llen("trajectories") - 1
trajectories = []
for i in tqdm(range(args.start_from, redis_len)):
trajectory = db.lindex("trajectories", i)
if args.min_reward is not None:
trajectory = utils.unpack(trajectory)
if sum(trajectory["trajectory"][-2]) > args.min_reward:
trajectory = utils.pack(trajectory)
trajectories.append(trajectory)
else:
trajectories.append(trajectory)
if args.chunk_size is not None \
and (i - args.start_from) % args.chunk_size == 0:
with open(args.out_pkl.format(suffix=i), "wb") as fout:
pickle.dump(trajectories, fout)
trajectories = []
with open(args.out_pkl.format(suffix=i), "wb") as fout:
pickle.dump(trajectories, fout)
if __name__ == "__main__":
args = parse_args()
main(args)
| true | true |
f7f472f1f362d209cfc3f18e5f76d0bcbdb75c33 | 15,792 | py | Python | test/hummingbot/strategy/dev_simple_trade/test_simple_trade.py | joedomino874/hummingbot | cb3ee5a30a2feb0a55ceca9d200c59662d7e3057 | [
"Apache-2.0"
] | 3,027 | 2019-04-04T18:52:17.000Z | 2022-03-30T09:38:34.000Z | test/hummingbot/strategy/dev_simple_trade/test_simple_trade.py | joedomino874/hummingbot | cb3ee5a30a2feb0a55ceca9d200c59662d7e3057 | [
"Apache-2.0"
] | 4,080 | 2019-04-04T19:51:11.000Z | 2022-03-31T23:45:21.000Z | test/hummingbot/strategy/dev_simple_trade/test_simple_trade.py | joedomino874/hummingbot | cb3ee5a30a2feb0a55ceca9d200c59662d7e3057 | [
"Apache-2.0"
] | 1,342 | 2019-04-04T20:50:53.000Z | 2022-03-31T15:22:36.000Z | #!/usr/bin/env python
from hummingbot.strategy.market_trading_pair_tuple import MarketTradingPairTuple
from decimal import Decimal
import logging; logging.basicConfig(level=logging.ERROR)
import pandas as pd
from typing import List
import unittest
from hummingbot.core.clock import (
Clock,
ClockMode
)
from hummingbot.core.event.event_logger import EventLogger
from hummingbot.core.event.events import (
MarketEvent,
OrderCancelledEvent,
TradeType,
OrderType,
OrderFilledEvent,
BuyOrderCompletedEvent,
SellOrderCompletedEvent,
TradeFee
)
from hummingbot.core.data_type.limit_order import LimitOrder
from hummingbot.strategy.dev_simple_trade.dev_simple_trade import SimpleTradeStrategy
from hummingbot.connector.exchange.paper_trade.paper_trade_exchange import QuantizationParams
from test.mock.mock_paper_exchange import MockPaperExchange
class SimpleTradeUnitTest(unittest.TestCase):
start: pd.Timestamp = pd.Timestamp("2019-01-01", tz="UTC")
end: pd.Timestamp = pd.Timestamp("2019-01-01 01:00:00", tz="UTC")
start_timestamp: float = start.timestamp()
end_timestamp: float = end.timestamp()
maker_trading_pairs: List[str] = ["COINALPHA-WETH", "COINALPHA", "WETH"]
clock_tick_size = 10
def setUp(self):
self.clock: Clock = Clock(ClockMode.BACKTEST, self.clock_tick_size, self.start_timestamp, self.end_timestamp)
self.market: MockPaperExchange = MockPaperExchange()
self.mid_price = 100
self.time_delay = 15
self.cancel_order_wait_time = 45
self.market.set_balanced_order_book(self.maker_trading_pairs[0],
mid_price=self.mid_price, min_price=1,
max_price=200, price_step_size=1, volume_step_size=10)
self.market.set_balance("COINALPHA", 500)
self.market.set_balance("WETH", 5000)
self.market.set_balance("QETH", 500)
self.market.set_quantization_param(
QuantizationParams(
self.maker_trading_pairs[0], 6, 6, 6, 6
)
)
self.market_info: MarketTradingPairTuple = MarketTradingPairTuple(
*(
[self.market] + self.maker_trading_pairs
)
)
logging_options: int = (SimpleTradeStrategy.OPTION_LOG_ALL &
(~SimpleTradeStrategy.OPTION_LOG_NULL_ORDER_SIZE))
# Define strategies to test
self.limit_buy_strategy: SimpleTradeStrategy = SimpleTradeStrategy(
[self.market_info],
order_type="limit",
order_price=Decimal("99"),
cancel_order_wait_time=self.cancel_order_wait_time,
is_buy=True,
time_delay=self.time_delay,
order_amount=Decimal("1.0"),
logging_options=logging_options
)
self.limit_sell_strategy: SimpleTradeStrategy = SimpleTradeStrategy(
[self.market_info],
order_type="limit",
order_price=Decimal("101"),
cancel_order_wait_time=self.cancel_order_wait_time,
is_buy=False,
time_delay=self.time_delay,
order_amount=Decimal("1.0"),
logging_options=logging_options
)
self.market_buy_strategy: SimpleTradeStrategy = SimpleTradeStrategy(
[self.market_info],
order_type="market",
order_price=None,
cancel_order_wait_time=self.cancel_order_wait_time,
is_buy=True,
time_delay=self.time_delay,
order_amount=Decimal("1.0"),
logging_options=logging_options
)
self.market_sell_strategy: SimpleTradeStrategy = SimpleTradeStrategy(
[self.market_info],
order_type="market",
order_price=None,
cancel_order_wait_time=self.cancel_order_wait_time,
is_buy=False,
time_delay=self.time_delay,
order_amount=Decimal("1.0"),
logging_options=logging_options
)
self.logging_options = logging_options
self.clock.add_iterator(self.market)
self.maker_order_fill_logger: EventLogger = EventLogger()
self.cancel_order_logger: EventLogger = EventLogger()
self.buy_order_completed_logger: EventLogger = EventLogger()
self.sell_order_completed_logger: EventLogger = EventLogger()
self.market.add_listener(MarketEvent.BuyOrderCompleted, self.buy_order_completed_logger)
self.market.add_listener(MarketEvent.SellOrderCompleted, self.sell_order_completed_logger)
self.market.add_listener(MarketEvent.OrderFilled, self.maker_order_fill_logger)
self.market.add_listener(MarketEvent.OrderCancelled, self.cancel_order_logger)
@staticmethod
def simulate_limit_order_fill(market: MockPaperExchange, limit_order: LimitOrder):
quote_currency_traded: Decimal = limit_order.price * limit_order.quantity
base_currency_traded: Decimal = limit_order.quantity
quote_currency: str = limit_order.quote_currency
base_currency: str = limit_order.base_currency
if limit_order.is_buy:
market.set_balance(quote_currency, market.get_balance(quote_currency) - quote_currency_traded)
market.set_balance(base_currency, market.get_balance(base_currency) + base_currency_traded)
market.trigger_event(MarketEvent.OrderFilled, OrderFilledEvent(
market.current_timestamp,
limit_order.client_order_id,
limit_order.trading_pair,
TradeType.BUY,
OrderType.LIMIT,
limit_order.price,
limit_order.quantity,
TradeFee(Decimal("0"))
))
market.trigger_event(MarketEvent.BuyOrderCompleted, BuyOrderCompletedEvent(
market.current_timestamp,
limit_order.client_order_id,
base_currency,
quote_currency,
quote_currency,
base_currency_traded,
quote_currency_traded,
Decimal("0"),
OrderType.LIMIT
))
else:
market.set_balance(quote_currency, market.get_balance(quote_currency) + quote_currency_traded)
market.set_balance(base_currency, market.get_balance(base_currency) - base_currency_traded)
market.trigger_event(MarketEvent.OrderFilled, OrderFilledEvent(
market.current_timestamp,
limit_order.client_order_id,
limit_order.trading_pair,
TradeType.SELL,
OrderType.LIMIT,
limit_order.price,
limit_order.quantity,
TradeFee(Decimal("0"))
))
market.trigger_event(MarketEvent.SellOrderCompleted, SellOrderCompletedEvent(
market.current_timestamp,
limit_order.client_order_id,
base_currency,
quote_currency,
quote_currency,
base_currency_traded,
quote_currency_traded,
Decimal("0"),
OrderType.LIMIT
))
def test_limit_buy_order(self):
self.clock.add_iterator(self.limit_buy_strategy)
# check no orders are placed before time delay
self.clock.backtest_til(self.start_timestamp + self.clock_tick_size)
self.assertEqual(0, len(self.limit_buy_strategy.active_bids))
# test whether number of orders is one after time delay
# check whether the order is buy
# check whether the price is correct
# check whether amount is correct
self.clock.backtest_til(self.start_timestamp + self.clock_tick_size + self.time_delay)
self.assertEqual(1, len(self.limit_buy_strategy.active_bids))
bid_order: LimitOrder = self.limit_buy_strategy.active_bids[0][1]
self.assertEqual(Decimal("99"), bid_order.price)
self.assertEqual(1, bid_order.quantity)
# Check whether order is cancelled after cancel_order_wait_time
self.clock.backtest_til(self.start_timestamp
+ self.clock_tick_size + self.time_delay + self.cancel_order_wait_time)
self.assertEqual(0, len(self.limit_buy_strategy.active_bids))
order_cancelled_events: List[OrderCancelledEvent] = [t for t in self.cancel_order_logger.event_log
if isinstance(t, OrderCancelledEvent)]
self.assertEqual(1, len(order_cancelled_events))
self.cancel_order_logger.clear()
def test_limit_sell_order(self):
self.clock.add_iterator(self.limit_sell_strategy)
# check no orders are placed before time delay
self.clock.backtest_til(self.start_timestamp + self.clock_tick_size)
self.assertEqual(0, len(self.limit_buy_strategy.active_asks))
# test whether number of orders is one
# check whether the order is sell
# check whether the price is correct
# check whether amount is correct
self.clock.backtest_til(self.start_timestamp + self.clock_tick_size + self.time_delay)
self.assertEqual(1, len(self.limit_sell_strategy.active_asks))
ask_order: LimitOrder = self.limit_sell_strategy.active_asks[0][1]
self.assertEqual(Decimal("101"), ask_order.price)
self.assertEqual(1, ask_order.quantity)
# Check whether order is cancelled after cancel_order_wait_time
self.clock.backtest_til(
self.start_timestamp + self.clock_tick_size + self.time_delay + self.cancel_order_wait_time)
self.assertEqual(0, len(self.limit_buy_strategy.active_bids))
order_cancelled_events: List[OrderCancelledEvent] = [t for t in self.cancel_order_logger.event_log
if isinstance(t, OrderCancelledEvent)]
self.assertEqual(1, len(order_cancelled_events))
self.cancel_order_logger.clear()
def test_market_buy_order(self):
self.clock.add_iterator(self.market_buy_strategy)
# check no orders are placed before time delay
self.clock.backtest_til(self.start_timestamp + self.clock_tick_size)
market_buy_events: List[BuyOrderCompletedEvent] = [t for t in self.buy_order_completed_logger.event_log
if isinstance(t, BuyOrderCompletedEvent)]
self.assertEqual(0, len(market_buy_events))
# test whether number of orders is one
# check whether the order is buy
# check whether the size is correct
self.clock.backtest_til(self.start_timestamp + self.clock_tick_size + self.time_delay)
market_buy_events: List[BuyOrderCompletedEvent] = [t for t in self.buy_order_completed_logger.event_log
if isinstance(t, BuyOrderCompletedEvent)]
self.assertEqual(1, len(market_buy_events))
amount: Decimal = sum(t.base_asset_amount for t in market_buy_events)
self.assertEqual(1, amount)
self.buy_order_completed_logger.clear()
def test_market_sell_order(self):
self.clock.add_iterator(self.market_sell_strategy)
# check no orders are placed before time delay
self.clock.backtest_til(self.start_timestamp + self.clock_tick_size)
market_buy_events: List[BuyOrderCompletedEvent] = [t for t in self.buy_order_completed_logger.event_log
if isinstance(t, BuyOrderCompletedEvent)]
self.assertEqual(0, len(market_buy_events))
# test whether number of orders is one
# check whether the order is sell
# check whether the size is correct
self.clock.backtest_til(self.start_timestamp + self.clock_tick_size + self.time_delay)
market_sell_events: List[SellOrderCompletedEvent] = [t for t in self.sell_order_completed_logger.event_log
if isinstance(t, SellOrderCompletedEvent)]
self.assertEqual(1, len(market_sell_events))
amount: Decimal = sum(t.base_asset_amount for t in market_sell_events)
self.assertEqual(1, amount)
self.sell_order_completed_logger.clear()
def test_order_filled_events(self):
self.clock.add_iterator(self.limit_buy_strategy)
self.clock.add_iterator(self.limit_sell_strategy)
# check no orders are placed before time delay
self.clock.backtest_til(self.start_timestamp + self.clock_tick_size)
self.assertEqual(0, len(self.limit_buy_strategy.active_bids))
# test whether number of orders is one
# check whether the order is sell
# check whether the price is correct
# check whether amount is correct
self.clock.backtest_til(self.start_timestamp + self.clock_tick_size + self.time_delay)
self.assertEqual(1, len(self.limit_sell_strategy.active_asks))
ask_order: LimitOrder = self.limit_sell_strategy.active_asks[0][1]
self.assertEqual(Decimal("101"), ask_order.price)
self.assertEqual(1, ask_order.quantity)
self.assertEqual(1, len(self.limit_buy_strategy.active_bids))
bid_order: LimitOrder = self.limit_buy_strategy.active_bids[0][1]
self.assertEqual(Decimal("99"), bid_order.price)
self.assertEqual(1, bid_order.quantity)
# Simulate market fill for limit buy and limit sell
self.simulate_limit_order_fill(self.market, bid_order)
self.simulate_limit_order_fill(self.market, ask_order)
fill_events = self.maker_order_fill_logger.event_log
self.assertEqual(2, len(fill_events))
bid_fills: List[OrderFilledEvent] = [evt for evt in fill_events if evt.trade_type is TradeType.SELL]
ask_fills: List[OrderFilledEvent] = [evt for evt in fill_events if evt.trade_type is TradeType.BUY]
self.assertEqual(1, len(bid_fills))
self.assertEqual(1, len(ask_fills))
def test_with_insufficient_balance(self):
# Set base balance to zero and check if sell strategies don't place orders
self.clock.add_iterator(self.limit_buy_strategy)
self.clock.add_iterator(self.market_buy_strategy)
self.market.set_balance("WETH", 0)
end_ts = self.start_timestamp + self.clock_tick_size + self.time_delay
self.clock.backtest_til(end_ts)
self.assertEqual(0, len(self.limit_buy_strategy.active_bids))
market_buy_events: List[BuyOrderCompletedEvent] = [t for t in self.buy_order_completed_logger.event_log
if isinstance(t, BuyOrderCompletedEvent)]
self.assertEqual(0, len(market_buy_events))
self.assertEqual(False, self.limit_buy_strategy.place_orders)
self.assertEqual(False, self.market_buy_strategy.place_orders)
self.clock.add_iterator(self.limit_sell_strategy)
self.clock.add_iterator(self.market_sell_strategy)
self.market.set_balance("COINALPHA", 0)
end_ts += self.clock_tick_size + self.time_delay
self.clock.backtest_til(end_ts)
self.assertEqual(0, len(self.limit_sell_strategy.active_asks))
market_sell_events: List[SellOrderCompletedEvent] = [t for t in self.sell_order_completed_logger.event_log
if isinstance(t, SellOrderCompletedEvent)]
self.assertEqual(0, len(market_sell_events))
self.assertEqual(False, self.limit_sell_strategy.place_orders)
self.assertEqual(False, self.market_sell_strategy.place_orders)
| 49.35 | 117 | 0.670276 |
from hummingbot.strategy.market_trading_pair_tuple import MarketTradingPairTuple
from decimal import Decimal
import logging; logging.basicConfig(level=logging.ERROR)
import pandas as pd
from typing import List
import unittest
from hummingbot.core.clock import (
Clock,
ClockMode
)
from hummingbot.core.event.event_logger import EventLogger
from hummingbot.core.event.events import (
MarketEvent,
OrderCancelledEvent,
TradeType,
OrderType,
OrderFilledEvent,
BuyOrderCompletedEvent,
SellOrderCompletedEvent,
TradeFee
)
from hummingbot.core.data_type.limit_order import LimitOrder
from hummingbot.strategy.dev_simple_trade.dev_simple_trade import SimpleTradeStrategy
from hummingbot.connector.exchange.paper_trade.paper_trade_exchange import QuantizationParams
from test.mock.mock_paper_exchange import MockPaperExchange
class SimpleTradeUnitTest(unittest.TestCase):
start: pd.Timestamp = pd.Timestamp("2019-01-01", tz="UTC")
end: pd.Timestamp = pd.Timestamp("2019-01-01 01:00:00", tz="UTC")
start_timestamp: float = start.timestamp()
end_timestamp: float = end.timestamp()
maker_trading_pairs: List[str] = ["COINALPHA-WETH", "COINALPHA", "WETH"]
clock_tick_size = 10
def setUp(self):
self.clock: Clock = Clock(ClockMode.BACKTEST, self.clock_tick_size, self.start_timestamp, self.end_timestamp)
self.market: MockPaperExchange = MockPaperExchange()
self.mid_price = 100
self.time_delay = 15
self.cancel_order_wait_time = 45
self.market.set_balanced_order_book(self.maker_trading_pairs[0],
mid_price=self.mid_price, min_price=1,
max_price=200, price_step_size=1, volume_step_size=10)
self.market.set_balance("COINALPHA", 500)
self.market.set_balance("WETH", 5000)
self.market.set_balance("QETH", 500)
self.market.set_quantization_param(
QuantizationParams(
self.maker_trading_pairs[0], 6, 6, 6, 6
)
)
self.market_info: MarketTradingPairTuple = MarketTradingPairTuple(
*(
[self.market] + self.maker_trading_pairs
)
)
logging_options: int = (SimpleTradeStrategy.OPTION_LOG_ALL &
(~SimpleTradeStrategy.OPTION_LOG_NULL_ORDER_SIZE))
self.limit_buy_strategy: SimpleTradeStrategy = SimpleTradeStrategy(
[self.market_info],
order_type="limit",
order_price=Decimal("99"),
cancel_order_wait_time=self.cancel_order_wait_time,
is_buy=True,
time_delay=self.time_delay,
order_amount=Decimal("1.0"),
logging_options=logging_options
)
self.limit_sell_strategy: SimpleTradeStrategy = SimpleTradeStrategy(
[self.market_info],
order_type="limit",
order_price=Decimal("101"),
cancel_order_wait_time=self.cancel_order_wait_time,
is_buy=False,
time_delay=self.time_delay,
order_amount=Decimal("1.0"),
logging_options=logging_options
)
self.market_buy_strategy: SimpleTradeStrategy = SimpleTradeStrategy(
[self.market_info],
order_type="market",
order_price=None,
cancel_order_wait_time=self.cancel_order_wait_time,
is_buy=True,
time_delay=self.time_delay,
order_amount=Decimal("1.0"),
logging_options=logging_options
)
self.market_sell_strategy: SimpleTradeStrategy = SimpleTradeStrategy(
[self.market_info],
order_type="market",
order_price=None,
cancel_order_wait_time=self.cancel_order_wait_time,
is_buy=False,
time_delay=self.time_delay,
order_amount=Decimal("1.0"),
logging_options=logging_options
)
self.logging_options = logging_options
self.clock.add_iterator(self.market)
self.maker_order_fill_logger: EventLogger = EventLogger()
self.cancel_order_logger: EventLogger = EventLogger()
self.buy_order_completed_logger: EventLogger = EventLogger()
self.sell_order_completed_logger: EventLogger = EventLogger()
self.market.add_listener(MarketEvent.BuyOrderCompleted, self.buy_order_completed_logger)
self.market.add_listener(MarketEvent.SellOrderCompleted, self.sell_order_completed_logger)
self.market.add_listener(MarketEvent.OrderFilled, self.maker_order_fill_logger)
self.market.add_listener(MarketEvent.OrderCancelled, self.cancel_order_logger)
@staticmethod
def simulate_limit_order_fill(market: MockPaperExchange, limit_order: LimitOrder):
quote_currency_traded: Decimal = limit_order.price * limit_order.quantity
base_currency_traded: Decimal = limit_order.quantity
quote_currency: str = limit_order.quote_currency
base_currency: str = limit_order.base_currency
if limit_order.is_buy:
market.set_balance(quote_currency, market.get_balance(quote_currency) - quote_currency_traded)
market.set_balance(base_currency, market.get_balance(base_currency) + base_currency_traded)
market.trigger_event(MarketEvent.OrderFilled, OrderFilledEvent(
market.current_timestamp,
limit_order.client_order_id,
limit_order.trading_pair,
TradeType.BUY,
OrderType.LIMIT,
limit_order.price,
limit_order.quantity,
TradeFee(Decimal("0"))
))
market.trigger_event(MarketEvent.BuyOrderCompleted, BuyOrderCompletedEvent(
market.current_timestamp,
limit_order.client_order_id,
base_currency,
quote_currency,
quote_currency,
base_currency_traded,
quote_currency_traded,
Decimal("0"),
OrderType.LIMIT
))
else:
market.set_balance(quote_currency, market.get_balance(quote_currency) + quote_currency_traded)
market.set_balance(base_currency, market.get_balance(base_currency) - base_currency_traded)
market.trigger_event(MarketEvent.OrderFilled, OrderFilledEvent(
market.current_timestamp,
limit_order.client_order_id,
limit_order.trading_pair,
TradeType.SELL,
OrderType.LIMIT,
limit_order.price,
limit_order.quantity,
TradeFee(Decimal("0"))
))
market.trigger_event(MarketEvent.SellOrderCompleted, SellOrderCompletedEvent(
market.current_timestamp,
limit_order.client_order_id,
base_currency,
quote_currency,
quote_currency,
base_currency_traded,
quote_currency_traded,
Decimal("0"),
OrderType.LIMIT
))
def test_limit_buy_order(self):
self.clock.add_iterator(self.limit_buy_strategy)
self.clock.backtest_til(self.start_timestamp + self.clock_tick_size)
self.assertEqual(0, len(self.limit_buy_strategy.active_bids))
self.clock.backtest_til(self.start_timestamp + self.clock_tick_size + self.time_delay)
self.assertEqual(1, len(self.limit_buy_strategy.active_bids))
bid_order: LimitOrder = self.limit_buy_strategy.active_bids[0][1]
self.assertEqual(Decimal("99"), bid_order.price)
self.assertEqual(1, bid_order.quantity)
self.clock.backtest_til(self.start_timestamp
+ self.clock_tick_size + self.time_delay + self.cancel_order_wait_time)
self.assertEqual(0, len(self.limit_buy_strategy.active_bids))
order_cancelled_events: List[OrderCancelledEvent] = [t for t in self.cancel_order_logger.event_log
if isinstance(t, OrderCancelledEvent)]
self.assertEqual(1, len(order_cancelled_events))
self.cancel_order_logger.clear()
def test_limit_sell_order(self):
self.clock.add_iterator(self.limit_sell_strategy)
self.clock.backtest_til(self.start_timestamp + self.clock_tick_size)
self.assertEqual(0, len(self.limit_buy_strategy.active_asks))
self.clock.backtest_til(self.start_timestamp + self.clock_tick_size + self.time_delay)
self.assertEqual(1, len(self.limit_sell_strategy.active_asks))
ask_order: LimitOrder = self.limit_sell_strategy.active_asks[0][1]
self.assertEqual(Decimal("101"), ask_order.price)
self.assertEqual(1, ask_order.quantity)
self.clock.backtest_til(
self.start_timestamp + self.clock_tick_size + self.time_delay + self.cancel_order_wait_time)
self.assertEqual(0, len(self.limit_buy_strategy.active_bids))
order_cancelled_events: List[OrderCancelledEvent] = [t for t in self.cancel_order_logger.event_log
if isinstance(t, OrderCancelledEvent)]
self.assertEqual(1, len(order_cancelled_events))
self.cancel_order_logger.clear()
def test_market_buy_order(self):
self.clock.add_iterator(self.market_buy_strategy)
self.clock.backtest_til(self.start_timestamp + self.clock_tick_size)
market_buy_events: List[BuyOrderCompletedEvent] = [t for t in self.buy_order_completed_logger.event_log
if isinstance(t, BuyOrderCompletedEvent)]
self.assertEqual(0, len(market_buy_events))
self.clock.backtest_til(self.start_timestamp + self.clock_tick_size + self.time_delay)
market_buy_events: List[BuyOrderCompletedEvent] = [t for t in self.buy_order_completed_logger.event_log
if isinstance(t, BuyOrderCompletedEvent)]
self.assertEqual(1, len(market_buy_events))
amount: Decimal = sum(t.base_asset_amount for t in market_buy_events)
self.assertEqual(1, amount)
self.buy_order_completed_logger.clear()
def test_market_sell_order(self):
self.clock.add_iterator(self.market_sell_strategy)
self.clock.backtest_til(self.start_timestamp + self.clock_tick_size)
market_buy_events: List[BuyOrderCompletedEvent] = [t for t in self.buy_order_completed_logger.event_log
if isinstance(t, BuyOrderCompletedEvent)]
self.assertEqual(0, len(market_buy_events))
self.clock.backtest_til(self.start_timestamp + self.clock_tick_size + self.time_delay)
market_sell_events: List[SellOrderCompletedEvent] = [t for t in self.sell_order_completed_logger.event_log
if isinstance(t, SellOrderCompletedEvent)]
self.assertEqual(1, len(market_sell_events))
amount: Decimal = sum(t.base_asset_amount for t in market_sell_events)
self.assertEqual(1, amount)
self.sell_order_completed_logger.clear()
def test_order_filled_events(self):
self.clock.add_iterator(self.limit_buy_strategy)
self.clock.add_iterator(self.limit_sell_strategy)
self.clock.backtest_til(self.start_timestamp + self.clock_tick_size)
self.assertEqual(0, len(self.limit_buy_strategy.active_bids))
self.clock.backtest_til(self.start_timestamp + self.clock_tick_size + self.time_delay)
self.assertEqual(1, len(self.limit_sell_strategy.active_asks))
ask_order: LimitOrder = self.limit_sell_strategy.active_asks[0][1]
self.assertEqual(Decimal("101"), ask_order.price)
self.assertEqual(1, ask_order.quantity)
self.assertEqual(1, len(self.limit_buy_strategy.active_bids))
bid_order: LimitOrder = self.limit_buy_strategy.active_bids[0][1]
self.assertEqual(Decimal("99"), bid_order.price)
self.assertEqual(1, bid_order.quantity)
self.simulate_limit_order_fill(self.market, bid_order)
self.simulate_limit_order_fill(self.market, ask_order)
fill_events = self.maker_order_fill_logger.event_log
self.assertEqual(2, len(fill_events))
bid_fills: List[OrderFilledEvent] = [evt for evt in fill_events if evt.trade_type is TradeType.SELL]
ask_fills: List[OrderFilledEvent] = [evt for evt in fill_events if evt.trade_type is TradeType.BUY]
self.assertEqual(1, len(bid_fills))
self.assertEqual(1, len(ask_fills))
def test_with_insufficient_balance(self):
self.clock.add_iterator(self.limit_buy_strategy)
self.clock.add_iterator(self.market_buy_strategy)
self.market.set_balance("WETH", 0)
end_ts = self.start_timestamp + self.clock_tick_size + self.time_delay
self.clock.backtest_til(end_ts)
self.assertEqual(0, len(self.limit_buy_strategy.active_bids))
market_buy_events: List[BuyOrderCompletedEvent] = [t for t in self.buy_order_completed_logger.event_log
if isinstance(t, BuyOrderCompletedEvent)]
self.assertEqual(0, len(market_buy_events))
self.assertEqual(False, self.limit_buy_strategy.place_orders)
self.assertEqual(False, self.market_buy_strategy.place_orders)
self.clock.add_iterator(self.limit_sell_strategy)
self.clock.add_iterator(self.market_sell_strategy)
self.market.set_balance("COINALPHA", 0)
end_ts += self.clock_tick_size + self.time_delay
self.clock.backtest_til(end_ts)
self.assertEqual(0, len(self.limit_sell_strategy.active_asks))
market_sell_events: List[SellOrderCompletedEvent] = [t for t in self.sell_order_completed_logger.event_log
if isinstance(t, SellOrderCompletedEvent)]
self.assertEqual(0, len(market_sell_events))
self.assertEqual(False, self.limit_sell_strategy.place_orders)
self.assertEqual(False, self.market_sell_strategy.place_orders)
| true | true |
f7f473a09d9667c168457528d798080dfdbdb6d7 | 1,539 | py | Python | updaterjob.py | bzaczynski/autoautelion | 5f60b74c64eb29d162b0ba8c2f3386e24bb7a11b | [
"MIT"
] | null | null | null | updaterjob.py | bzaczynski/autoautelion | 5f60b74c64eb29d162b0ba8c2f3386e24bb7a11b | [
"MIT"
] | 4 | 2018-12-27T13:32:30.000Z | 2021-04-21T14:19:33.000Z | updaterjob.py | bzaczynski/autoautelion | 5f60b74c64eb29d162b0ba8c2f3386e24bb7a11b | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
Parse Autelion, update Redis and send notification via email if necessary.
Usage:
$ export AUTELION_USERNAME=<your-username>
$ export AUTELION_PASSWORD=<your-password>
$ export REDIS_URL=redis://localhost
$ export SENDGRID_API_KEY=<your-api-key>
$ export EMAIL_ADDRESS=<your-email>
$ python updaterjob.py
"""
import os
import logging
import cache
import parser
import mailer
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def main():
"""Updater job entry point."""
logger.info('Updater job started')
try:
status = parser.parse_autelion()
if status is None:
logger.error('Updater job failed to parse Autelion')
else:
old_autelion = cache.get_autelion()
cache.set_autelion(status)
new_autelion = cache.get_autelion()
if new_autelion is None:
logger.error('Updater job failed to update cache')
else:
if old_autelion is None:
logger.info('There was no previous status')
mailer.send(new_autelion)
else:
if old_autelion.status == new_autelion.status:
logger.info('Previous status has not changed')
else:
logger.info('Previous status has changed')
mailer.send(new_autelion)
finally:
logger.info('Updater job finished')
if __name__ == '__main__':
main()
| 26.084746 | 74 | 0.610136 |
import os
import logging
import cache
import parser
import mailer
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def main():
logger.info('Updater job started')
try:
status = parser.parse_autelion()
if status is None:
logger.error('Updater job failed to parse Autelion')
else:
old_autelion = cache.get_autelion()
cache.set_autelion(status)
new_autelion = cache.get_autelion()
if new_autelion is None:
logger.error('Updater job failed to update cache')
else:
if old_autelion is None:
logger.info('There was no previous status')
mailer.send(new_autelion)
else:
if old_autelion.status == new_autelion.status:
logger.info('Previous status has not changed')
else:
logger.info('Previous status has changed')
mailer.send(new_autelion)
finally:
logger.info('Updater job finished')
if __name__ == '__main__':
main()
| true | true |
f7f474576db9fc12c99ece1134fc7235cb750c99 | 37,459 | py | Python | luna/gateware/interface/ulpi.py | ktemkin/luna | 661dc89f7f60ba8a51165f7f8037ad2d5854cf34 | [
"BSD-3-Clause"
] | 4 | 2020-02-11T18:40:02.000Z | 2020-04-03T13:07:38.000Z | luna/gateware/interface/ulpi.py | ktemkin/luna | 661dc89f7f60ba8a51165f7f8037ad2d5854cf34 | [
"BSD-3-Clause"
] | null | null | null | luna/gateware/interface/ulpi.py | ktemkin/luna | 661dc89f7f60ba8a51165f7f8037ad2d5854cf34 | [
"BSD-3-Clause"
] | null | null | null | # nmigen: UnusedElaboratable=no
#
# This file is part of LUNA.
#
""" ULPI interfacing hardware. """
from nmigen import Signal, Module, Cat, Elaboratable, ClockSignal, Record, ResetSignal, Const
import unittest
from nmigen.back.pysim import Simulator
from ..test import LunaGatewareTestCase, ulpi_domain_test_case, sync_test_case
from ..utils import rising_edge_detector, falling_edge_detector
class ULPIRegisterWindow(Elaboratable):
""" Gateware interface that handles ULPI register reads and writes.
I/O ports:
# ULPI signals:
I: ulpi_data_in[8] -- input value of the ULPI data lines
O: ulpi_data_out[8] -- output value of the ULPI data lines
O: ulpi_out_en -- true iff we're trying to drive the ULPI data lines
# Controller signals:
O: busy -- indicates when the register window is busy processing a transaction
I: address[6] -- the address of the register to work with
O: done -- strobe that indicates when a register request is complete
I: read_request -- strobe that requests a register read
O: read_data[8] -- data read from the relevant register read
I: write_request -- strobe that indicates a register write
I: write_data[8] -- data to be written during a register write
"""
COMMAND_REG_WRITE = 0b10000000
COMMAND_REG_READ = 0b11000000
def __init__(self):
#
# I/O port.
#
self.ulpi_data_in = Signal(8)
self.ulpi_data_out = Signal(8)
self.ulpi_out_req = Signal()
self.ulpi_dir = Signal()
self.ulpi_next = Signal()
self.ulpi_stop = Signal()
self.busy = Signal()
self.address = Signal(6)
self.done = Signal()
self.read_request = Signal()
self.read_data = Signal(8)
self.write_request = Signal()
self.write_data = Signal(8)
def elaborate(self, platform):
m = Module()
current_address = Signal(6)
current_write = Signal(8)
# Keep our control signals low unless explicitly asserted.
m.d.ulpi += [
self.ulpi_out_req.eq(0),
self.ulpi_stop .eq(0),
self.done .eq(0)
]
with m.FSM(domain='ulpi') as fsm:
# We're busy whenever we're not IDLE; indicate so.
m.d.comb += self.busy.eq(~fsm.ongoing('IDLE'))
# IDLE: wait for a request to be made
with m.State('IDLE'):
# Apply a NOP whenever we're idle.
#
# This doesn't technically help for normal ULPI
# operation, as the controller should handle this,
# but it cleans up the output in our tests and allows
# this unit to be used standalone.
m.d.ulpi += self.ulpi_data_out.eq(0)
# Constantly latch in our arguments while IDLE.
# We'll stop latching these in as soon as we're busy.
m.d.ulpi += [
current_address .eq(self.address),
current_write .eq(self.write_data)
]
with m.If(self.read_request):
m.next = 'START_READ'
with m.If(self.write_request):
m.next = 'START_WRITE'
#
# Read handling.
#
# START_READ: wait for the bus to be idle, so we can transmit.
with m.State('START_READ'):
# Wait for the bus to be idle.
with m.If(~self.ulpi_dir):
m.next = 'SEND_READ_ADDRESS'
# Once it is, start sending our command.
m.d.ulpi += [
self.ulpi_data_out .eq(self.COMMAND_REG_READ | self.address),
self.ulpi_out_req .eq(1)
]
# SEND_READ_ADDRESS: Request sending the read address, which we
# start sending on the next clock cycle. Note that we don't want
# to come into this state writing, as we need to lead with a
# bus-turnaround cycle.
with m.State('SEND_READ_ADDRESS'):
m.d.ulpi += self.ulpi_out_req.eq(1)
# If DIR has become asserted, we're being interrupted.
# We'll have to restart the read after the interruption is over.
with m.If(self.ulpi_dir):
m.next = 'START_READ'
m.d.ulpi += self.ulpi_out_req.eq(0)
# If NXT becomes asserted without us being interrupted by
# DIR, then the PHY has accepted the read. Release our write
# request, so the next cycle can properly act as a bus turnaround.
with m.Elif(self.ulpi_next):
m.d.ulpi += [
self.ulpi_out_req .eq(0),
self.ulpi_data_out .eq(0),
]
m.next = 'READ_TURNAROUND'
# READ_TURNAROUND: wait for the PHY to take control of the ULPI bus.
with m.State('READ_TURNAROUND'):
# After one cycle, we should have a data byte ready.
m.next = 'READ_COMPLETE'
# READ_COMPLETE: the ULPI read exchange is complete, and the read data is ready.
with m.State('READ_COMPLETE'):
m.next = 'IDLE'
# Latch in the data, and indicate that we have new, valid data.
m.d.ulpi += [
self.read_data .eq(self.ulpi_data_in),
self.done .eq(1)
]
#
# Write handling.
#
# START_WRITE: wait for the bus to be idle, so we can transmit.
with m.State('START_WRITE'):
# Wait for the bus to be idle.
with m.If(~self.ulpi_dir):
m.next = 'SEND_WRITE_ADDRESS'
# Once it is, start sending our command.
m.d.ulpi += [
self.ulpi_data_out .eq(self.COMMAND_REG_WRITE | self.address),
self.ulpi_out_req .eq(1)
]
# SEND_WRITE_ADDRESS: Continue sending the write address until the
# target device accepts it.
with m.State('SEND_WRITE_ADDRESS'):
m.d.ulpi += self.ulpi_out_req.eq(1)
# If DIR has become asserted, we're being interrupted.
# We'll have to restart the write after the interruption is over.
with m.If(self.ulpi_dir):
m.next = 'START_WRITE'
m.d.ulpi += self.ulpi_out_req.eq(0)
# Hold our address until the PHY has accepted the command;
# and then move to presenting the PHY with the value to be written.
with m.Elif(self.ulpi_next):
m.d.ulpi += self.ulpi_data_out.eq(self.write_data)
m.next = 'HOLD_WRITE'
# Hold the write data on the bus until the device acknowledges it.
with m.State('HOLD_WRITE'):
m.d.ulpi += self.ulpi_out_req.eq(1)
# Handle interruption.
with m.If(self.ulpi_dir):
m.next = 'START_WRITE'
m.d.ulpi += self.ulpi_out_req.eq(0)
# Hold the data present until the device has accepted it.
# Once it has, pulse STP for a cycle to complete the transaction.
with m.Elif(self.ulpi_next):
m.d.ulpi += [
self.ulpi_data_out.eq(0),
self.ulpi_out_req.eq(0),
self.ulpi_stop.eq(1),
self.done.eq(1)
]
m.next = 'IDLE'
return m
class TestULPIRegisters(LunaGatewareTestCase):
FRAGMENT_UNDER_TEST = ULPIRegisterWindow
ULPI_CLOCK_FREQUENCY = 60e6
SYNC_CLOCK_FREQUENCY = None
def initialize_signals(self):
yield self.dut.ulpi_dir.eq(0)
yield self.dut.read_request.eq(0)
yield self.dut.write_request.eq(0)
@ulpi_domain_test_case
def test_idle_behavior(self):
""" Ensure we apply a NOP whenever we're not actively performing a command. """
self.assertEqual((yield self.dut.ulpi_data_out), 0)
@ulpi_domain_test_case
def test_register_read(self):
""" Validates a register read. """
# Poison the register value with a fail value (0xBD).
yield self.dut.ulpi_data_in.eq(0xBD)
# Set up a read request.
yield self.dut.address.eq(0)
yield
# After a read request, we should be busy...
yield from self.pulse(self.dut.read_request)
self.assertEqual((yield self.dut.busy), 1)
# ... and then, since dir is unasserted, we should have a read command.
yield
self.assertEqual((yield self.dut.ulpi_data_out), 0b11000000)
# We should continue to present the command...
yield from self.advance_cycles(10)
self.assertEqual((yield self.dut.ulpi_data_out), 0b11000000)
self.assertEqual((yield self.dut.busy), 1)
# ... until the host accepts it.
yield self.dut.ulpi_next.eq(1)
yield
# We should then wait for a single bus turnaround cycle before reading.
yield
# And then should read whatever value is present.
yield self.dut.ulpi_data_in.eq(0x07)
yield
yield
self.assertEqual((yield self.dut.read_data), 0x07)
# Finally, we should return to idle.
self.assertEqual((yield self.dut.busy), 0)
@ulpi_domain_test_case
def test_interrupted_read(self):
""" Validates how a register read works when interrupted by a change in DIR. """
# Set up a read request while DIR is asserted.
yield self.dut.ulpi_dir.eq(1)
yield self.dut.address.eq(0)
yield from self.pulse(self.dut.read_request)
# We shouldn't try to output anything until DIR is de-asserted.
yield from self.advance_cycles(1)
self.assertEqual((yield self.dut.ulpi_out_req), 0)
yield from self.advance_cycles(10)
self.assertEqual((yield self.dut.ulpi_out_req), 0)
# De-assert DIR, and let the platform apply a read command.
yield self.dut.ulpi_dir.eq(0)
yield from self.advance_cycles(2)
self.assertEqual((yield self.dut.ulpi_data_out), 0b11000000)
# Assert DIR again; interrupting the read. This should bring
# the platform back to its "waiting for the bus" state.
yield self.dut.ulpi_dir.eq(1)
yield from self.advance_cycles(2)
self.assertEqual((yield self.dut.ulpi_out_req), 0)
# Clear DIR, and validate that the device starts driving the command again
yield self.dut.ulpi_dir.eq(0)
yield from self.advance_cycles(2)
self.assertEqual((yield self.dut.ulpi_data_out), 0b11000000)
# Apply NXT so the read can finally continue.
yield self.dut.ulpi_next.eq(1)
yield
# We should then wait for a single bus turnaround cycle before reading.
yield
# And then should read whatever value is present.
yield self.dut.ulpi_data_in.eq(0x07)
yield
yield
self.assertEqual((yield self.dut.read_data), 0x07)
# Finally, we should return to idle.
self.assertEqual((yield self.dut.busy), 0)
@ulpi_domain_test_case
def test_register_write(self):
# Set up a write request.
yield self.dut.address.eq(0b10)
yield self.dut.write_data.eq(0xBC)
yield
# Starting the request should make us busy.
yield from self.pulse(self.dut.write_request)
self.assertEqual((yield self.dut.busy), 1)
# ... and then, since dir is unasserted, we should have a write command.
yield
self.assertEqual((yield self.dut.ulpi_data_out), 0b10000010)
# We should continue to present the command...
yield from self.advance_cycles(10)
self.assertEqual((yield self.dut.ulpi_data_out), 0b10000010)
self.assertEqual((yield self.dut.busy), 1)
# ... until the host accepts it.
yield self.dut.ulpi_next.eq(1)
yield
# We should then present the data to be written...
yield self.dut.ulpi_next.eq(0)
yield
self.assertEqual((yield self.dut.ulpi_data_out), 0xBC)
# ... and continue doing so until the host accepts it...
yield from self.advance_cycles(10)
self.assertEqual((yield self.dut.ulpi_data_out), 0xBC)
yield self.dut.ulpi_next.eq(1)
yield from self.advance_cycles(2)
# ... at which point stop should be asserted for one cycle.
self.assertEqual((yield self.dut.ulpi_stop), 1)
yield
# Finally, we should go idle.
self.assertEqual((yield self.dut.ulpi_stop), 0)
self.assertEqual((yield self.dut.busy), 0)
class ULPIRxEventDecoder(Elaboratable):
""" Simple piece of gateware that tracks receive events.
I/O port:
I: ulpi_data_in[8] -- The current input state of the ULPI data lines.
I: ulpi_dir -- The ULPI bus-direction signal.
I: ulpi_nxt -- The ULPI 'next' throttle signal.
I: register_operation_in_progress
Signal that should be true iff we're performing a register operation.
O: last_rx_command -- The full byte value of the last RxCmd.
O: line_state[2] -- The states of the two USB lines.
O: rx_active -- True when a packet receipt is active.
O: rx_error -- True when a packet receive error has occurred.
O: host_disconnect -- True if the host has just disconnected.
O: id_digital -- Digital value of the ID pin.
O: vbus_valid -- True iff a valid VBUS voltage is present
O: session_end -- True iff a session has just ended.
# Strobes indicating signal changes.
O: rx_start -- True iff an RxEvent has changed the value of RxActive from 0 -> 1.
O: rx_stop -- True iff an RxEvent has changed the value of RxActive from 1 -> 0.
"""
def __init__(self, *, ulpi_bus):
#
# I/O port.
#
self.ulpi = ulpi_bus
self.register_operation_in_progress = Signal()
# Optional: signal that allows access to the last RxCmd byte.
self.last_rx_command = Signal(8)
self.line_state = Signal(2)
self.rx_active = Signal()
self.rx_error = Signal()
self.host_disconnect = Signal()
self.id_digital = Signal()
self.vbus_valid = Signal()
self.session_valid = Signal()
self.session_end = Signal()
# RxActive strobes.
self.rx_start = Signal()
self.rx_stop = Signal()
def elaborate(self, platform):
m = Module()
# An RxCmd is present when three conditions are met:
# - We're not actively undergoing a register read.
# - Direction has been high for more than one cycle.
# - NXT is low.
# To implement the first condition, we'll first create a delayed
# version of DIR, and then logically AND it with the current value.
direction_delayed = Signal()
m.d.ulpi += direction_delayed.eq(self.ulpi.dir)
receiving = Signal()
m.d.comb += receiving.eq(direction_delayed & self.ulpi.dir)
# Default our strobes to 0, unless asserted.
m.d.ulpi += [
self.rx_start .eq(0),
self.rx_stop .eq(0)
]
# Sample the DATA lines whenever these conditions are met.
with m.If(receiving & ~self.ulpi.nxt & ~self.register_operation_in_progress):
m.d.ulpi += self.last_rx_command.eq(self.ulpi.data.i)
# If RxActive has just changed, strobe the start or stop signals,
rx_active = self.ulpi.data.i[4]
with m.If(~self.rx_active & rx_active):
m.d.ulpi += self.rx_start.eq(1)
with m.If(self.rx_active & ~rx_active):
m.d.ulpi += self.rx_stop.eq(1)
# Break the most recent RxCmd into its UMTI-equivalent signals.
# From table 3.8.1.2 in the ULPI spec; rev 1.1/Oct-20-2004.
m.d.comb += [
self.line_state .eq(self.last_rx_command[0:2]),
self.vbus_valid .eq(self.last_rx_command[2:4] == 0b11),
self.session_valid .eq(self.last_rx_command[2:4] == 0b10),
self.session_end .eq(self.last_rx_command[2:4] == 0b00),
self.rx_active .eq(self.last_rx_command[4]),
self.rx_error .eq(self.last_rx_command[4:6] == 0b11),
self.host_disconnect .eq(self.last_rx_command[4:6] == 0b10),
self.id_digital .eq(self.last_rx_command[6]),
]
return m
class ULPIRxEventDecoderTest(LunaGatewareTestCase):
ULPI_CLOCK_FREQUENCY = 60e6
SYNC_CLOCK_FREQUENCY = None
def instantiate_dut(self):
self.ulpi = Record([
("dir", 1),
("nxt", 1),
("data", [
("i", 8),
])
])
return ULPIRxEventDecoder(ulpi_bus=self.ulpi)
def initialize_signals(self):
yield self.ulpi.dir.eq(0)
yield self.ulpi.nxt.eq(0)
yield self.ulpi.data.i.eq(0)
yield self.dut.register_operation_in_progress.eq(0)
@ulpi_domain_test_case
def test_decode(self):
# Provide a test value.
yield self.ulpi.data.i.eq(0xAB)
# First, set DIR and NXT at the same time, and verify that we
# don't register an RxEvent.
yield self.ulpi.dir.eq(1)
yield self.ulpi.nxt.eq(1)
yield from self.advance_cycles(5)
self.assertEqual((yield self.dut.last_rx_command), 0x00)
# Nothing should change when we drop DIR and NXT.
yield self.ulpi.dir.eq(0)
yield self.ulpi.nxt.eq(0)
yield
self.assertEqual((yield self.dut.last_rx_command), 0x00)
# Setting DIR but not NXT should trigger an RxEvent; but not
# until one cycle of "bus turnaround" has passed.
yield self.ulpi.dir.eq(1)
yield self.ulpi.data.i.eq(0x12)
yield
self.assertEqual((yield self.dut.last_rx_command), 0x00)
yield self.ulpi.data.i.eq(0b00011110)
yield from self.advance_cycles(2)
self.assertEqual((yield self.dut.last_rx_command), 0b00011110)
# Validate that we're decoding this RxCommand correctly.
self.assertEqual((yield self.dut.line_state), 0b10)
self.assertEqual((yield self.dut.vbus_valid), 1)
self.assertEqual((yield self.dut.rx_active), 1)
self.assertEqual((yield self.dut.rx_error), 0)
self.assertEqual((yield self.dut.host_disconnect), 0)
class ULPIControlTranslator(Elaboratable):
""" Gateware that translates ULPI control signals to their UMTI equivalents.
I/O port:
I: bus_idle -- Indicates that the ULPI bus is idle, and thus capable of
performing register writes.
I: xcvr_select[2] -- selects the operating speed of the transciever;
00 = HS, 01 = FS, 10 = LS, 11 = LS on FS bus
I: term_select -- enables termination for the given operating mode; see spec
I: op_mode -- selects the operating mode of the transciever;
00 = normal, 01 = non-driving, 10 = disable bit-stuff/NRZI
I: suspend -- places the transceiver into suspend mode; active high
I: id_pullup -- when set, places a 100kR pull-up on the ID pin
I: dp_pulldown -- when set, enables a 15kR pull-down on D+; intended for host mode
I: dm_pulldown -- when set, enables a 15kR pull-down on D+; intended for host mode
I: chrg_vbus -- when set, connects a resistor from VBUS to GND to discharge VBUS
I: dischrg_vbus -- when set, connects a resistor from VBUS to 3V3 to charge VBUS above SessValid
"""
def __init__(self, *, register_window, own_register_window=False):
"""
Parmaeters:
register_window -- The ULPI register window to work with.
own_register_window -- True iff we're the owner of this register window.
Typically, we'll use the register window for a broader controller;
but this can be set to True to indicate that we need to consider this
register window our own, and thus a submodule.
"""
self.register_window = register_window
self.own_register_window = own_register_window
#
# I/O port
#
self.xcvr_select = Signal(2, reset=0b01)
self.term_select = Signal()
self.op_mode = Signal(2)
self.suspend = Signal()
self.id_pullup = Signal()
self.dp_pulldown = Signal(reset=1)
self.dm_pulldown = Signal(reset=1)
self.chrg_vbus = Signal()
self.dischrg_vbus = Signal()
# Extra/non-UMTI properties.
self.use_external_vbus_indicator = Signal(reset=1)
#
# Internal variables.
#
self._register_signals = {}
def add_composite_register(self, m, address, value, *, reset_value=0):
""" Adds a ULPI register that's composed of multiple control signals.
Params:
address -- The register number in the ULPI register space.
value -- An 8-bit signal composing the bits that should be placed in
the given register.
reset_value -- If provided, the given value will be assumed as the reset value
-- of the given register; allowing us to avoid an initial write.
"""
current_register_value = Signal(8, reset=reset_value, name=f"current_register_value_{address:02x}")
# Create internal signals that request register updates.
write_requested = Signal(name=f"write_requested_{address:02x}")
write_value = Signal(8, name=f"write_value_{address:02x}")
write_done = Signal(name=f"write_done_{address:02x}")
self._register_signals[address] = {
'write_requested': write_requested,
'write_value': write_value,
'write_done': write_done
}
# If we've just finished a write, update our current register value.
with m.If(write_done):
m.d.ulpi += current_register_value.eq(write_value),
# If we have a mismatch between the requested and actual register value,
# request a write of the new value.
m.d.comb += write_requested.eq(current_register_value != value)
with m.If(current_register_value != value):
m.d.ulpi += write_value.eq(value)
def populate_ulpi_registers(self, m):
""" Creates translator objects that map our control signals to ULPI registers. """
# Function control.
function_control = Cat(self.xcvr_select, self.term_select, self.op_mode, Const(0), ~self.suspend, Const(0))
self.add_composite_register(m, 0x04, function_control, reset_value=0b01000001)
# OTG control.
otg_control = Cat(
self.id_pullup, self.dp_pulldown, self.dm_pulldown, self.dischrg_vbus,
self.chrg_vbus, Const(0), Const(0), self.use_external_vbus_indicator
)
self.add_composite_register(m, 0x0A, otg_control, reset_value=0b00000110)
def elaborate(self, platform):
m = Module()
if self.own_register_window:
m.submodules.reg_window = self.register_window
# Add the registers that represent each of our signals.
self.populate_ulpi_registers(m)
# Generate logic to handle changes on each of our registers.
first_element = True
for address, signals in self._register_signals.items():
conditional = m.If if first_element else m.Elif
first_element = False
# If we're requesting a write on the given register, pass that to our
# register window.
with conditional(signals['write_requested']):
m.d.comb += [
# Control signals.
signals['write_done'] .eq(self.register_window.done),
# Register window signals.
self.register_window.address .eq(address),
self.register_window.write_data .eq(signals['write_value']),
self.register_window.write_request .eq(signals['write_requested'] & ~self.register_window.done)
]
# If no register accesses are active, provide default signal values.
with m.Else():
m.d.comb += self.register_window.write_request.eq(0)
# Ensure our register window is never performing a read.
m.d.comb += self.register_window.read_request.eq(0)
return m
class ControlTranslatorTest(LunaGatewareTestCase):
ULPI_CLOCK_FREQUENCY = 60e6
SYNC_CLOCK_FREQUENCY = None
def instantiate_dut(self):
self.reg_window = ULPIRegisterWindow()
return ULPIControlTranslator(register_window=self.reg_window, own_register_window=True)
def initialize_signals(self):
dut = self.dut
# Initialize our register signals to their default values.
yield dut.xcvr_select.eq(1)
yield dut.dm_pulldown.eq(1)
yield dut.dp_pulldown.eq(1)
yield dut.use_external_vbus_indicator.eq(0)
@ulpi_domain_test_case
def test_multiwrite_behavior(self):
# Give our initialization some time to settle,
# and verify that we haven't initiated anyting in that interim.
yield from self.advance_cycles(10)
self.assertEqual((yield self.reg_window.write_request), 0)
# Change signals that span two registers.
yield self.dut.op_mode.eq(0b11)
yield self.dut.dp_pulldown.eq(0)
yield self.dut.dm_pulldown.eq(0)
yield
yield
# Once we've changed these, we should start trying to apply
# our new value to the function control register.
self.assertEqual((yield self.reg_window.address), 0x04)
self.assertEqual((yield self.reg_window.write_data), 0b01011001)
# which should occur until the data and address are accepted.
yield self.reg_window.ulpi_next.eq(1)
yield from self.wait_until(self.reg_window.done, timeout=10)
yield
yield
# We should then experience a write to the function control register.
self.assertEqual((yield self.reg_window.address), 0x0A)
self.assertEqual((yield self.reg_window.write_data), 0b00000000)
# Wait for that action to complete..
yield self.reg_window.ulpi_next.eq(1)
yield from self.wait_until(self.reg_window.done, timeout=10)
yield
yield
# After which we shouldn't be trying to write anything at all.
self.assertEqual((yield self.reg_window.address), 0)
self.assertEqual((yield self.reg_window.write_data), 0)
self.assertEqual((yield self.reg_window.write_request), 0)
class UMTITranslator(Elaboratable):
""" Gateware that translates a ULPI interface into a simpler UMTI one.
I/O port:
O: busy -- signal that's true iff the ULPI interface is being used
for a register or transmit command
# See the UMTI specification for most signals.
# Data signals:
I: data_in[8] -- data to be transmitted; valid when tx_valid is asserted
O: data_out[8] -- data received from the PHY; valid when rx_valid is asserted
I: tx_valid -- set to true when data is to be transmitted; indicates the data_in
byte is valid; de-asserting this line terminates the transmission
O: rx_valid -- indicates that the data present on data_out is new and valid data;
goes high for a single ULPI clock cycle to indicate new data is ready
O: tx_ready -- indicates the the PHY is ready to accept a new byte of data, and that the
transmitter should move on to the next byte after the given cycle
O: rx_active -- indicates that the PHY is actively receiving data from the host; data is
slewed on data_out by rx_valid
O: rx_error -- indicates that an error has occurred in the current transmission
# Extra signals:
O: rx_complete -- strobe that goes high for one cycle when a packet rx is complete
# Signals for diagnostic use:
O: last_rxcmd -- The byte content of the last RxCmd.
I: address -- The ULPI register address to work with.
O: read_data[8] -- The contents of the most recently read ULPI command.
I: write_data[8] -- The data to be written on the next write request.
I: manual_read -- Strobe that triggers a diagnostic read.
I: manual_write -- Strobe that triggers a diagnostic write.
"""
# UMTI status signals translated from the ULPI bus.
RXEVENT_STATUS_SIGNALS = [
'line_state', 'vbus_valid', 'session_valid', 'session_end',
'rx_error', 'host_disconnect', 'id_digital'
]
# Control signals that we control through our control translator.
CONTROL_SIGNALS = [
('xcvr_select', 2), ('term_select', 1), ('op_mode', 2), ('suspend', 1),
('id_pullup', 1), ('dm_pulldown', 1), ('dp_pulldown', 1), ('chrg_vbus', 1),
('dischrg_vbus', 1), ('use_external_vbus_indicator', 1)
]
def __dir__(self):
""" Extend our properties list of contain all of the above fields, for proper autocomplete. """
properties = list(super().__dir__())
properties.extend(self.RXEVENT_STATUS_SIGNALS)
properties.extend(self.DATA_STATUS_SIGNALS)
properties.extend(self.CONTROL_SIGNALS)
return properties
def __init__(self, *, ulpi, use_platform_registers=True):
""" Params:
ulpi -- The ULPI bus to communicate with.
use_platform_registers -- If True (or not provided), any extra registers writes provided in
the platform definition will be applied automatically.
"""
self.use_platform_registers = use_platform_registers
#
# I/O port
#
self.ulpi = ulpi
self.busy = Signal()
# Data signals.
self.data_out = Signal(8)
self.rx_valid = Signal()
self.data_in = Signal(8)
# Status signals.
self.rx_active = Signal()
# RxEvent-based flags.
for signal_name in self.RXEVENT_STATUS_SIGNALS:
self.__dict__[signal_name] = Signal(name=signal_name)
# Control signals.
for signal_name, size in self.CONTROL_SIGNALS:
self.__dict__[signal_name] = Signal(size, name=signal_name)
# Diagnostic I/O.
self.last_rx_command = Signal(8)
#
# Internal
#
# Create a list of extra registers to be set.
self._extra_registers = {}
def add_extra_register(self, write_address, write_value, *, default_value=None):
""" Adds logic to configure an extra ULPI register. Useful for configuring vendor registers.
Params:
write_address -- The write address of the target ULPI register.
write_value -- The value to be written. If a Signal is provided; the given register will be
set post-reset, if necessary; and then dynamically updated each time the signal changes.
If an integer constant is provided, this value will be written once upon startup.
default_value -- The default value the register is expected to have post-reset; used to determine
if the value needs to be updated post-reset. If a Signal is provided for write_value,
this must be provided; if an integer is provided for write_value, this is optional.
"""
# Ensure we have a default_value if we have a Signal(); as this will determine
# whether we need to update the register post-reset.
if (default_value is None) and isinstance(write_value, Signal):
raise ValueError("if write_value is a signal, default_value must be provided")
# Otherwise, we'll pick a value that ensures the write always occurs.
elif default_value is None:
default_value = ~write_value
self._extra_registers[write_address] = {'value': write_value, 'default': default_value}
def elaborate(self, platform):
m = Module()
# Create the component parts of our ULPI interfacing hardware.
m.submodules.register_window = register_window = ULPIRegisterWindow()
m.submodules.control_translator = control_translator = ULPIControlTranslator(register_window=register_window)
m.submodules.rxevent_decoder = rxevent_decoder = ULPIRxEventDecoder(ulpi_bus=self.ulpi)
# If we're choosing to honor any registers defined in the platform file, apply those
# before continuing with elaboration.
if self.use_platform_registers and hasattr(platform, 'ulpi_extra_registers'):
for address, value in platform.ulpi_extra_registers.items():
self.add_extra_register(address, value)
# Add any extra registers provided by the user to our control translator.
for address, values in self._extra_registers.items():
control_translator.add_composite_register(m, address, values['value'], reset_value=values['default'])
# Connect our ULPI control signals to each of our subcomponents.
m.d.comb += [
# Drive the bus whenever the target PHY isn't.
self.ulpi.data.oe .eq(~self.ulpi.dir),
# Generate our busy signal.
self.busy .eq(register_window.busy),
# Connect up our clock and reset signals.
self.ulpi.clk .eq(ClockSignal("ulpi")),
self.ulpi.rst .eq(ResetSignal("ulpi")),
# Connect our data inputs to the event decoder.
# Note that the event decoder is purely passive.
rxevent_decoder.register_operation_in_progress.eq(register_window.busy),
self.last_rx_command .eq(rxevent_decoder.last_rx_command),
# Connect our signals to our register window.
register_window.ulpi_data_in .eq(self.ulpi.data.i),
register_window.ulpi_dir .eq(self.ulpi.dir),
register_window.ulpi_next .eq(self.ulpi.nxt),
self.ulpi.data.o .eq(register_window.ulpi_data_out),
self.ulpi.stp .eq(register_window.ulpi_stop),
]
# Connect our RxEvent status signals from our RxEvent decoder.
for signal_name in self.RXEVENT_STATUS_SIGNALS:
signal = getattr(rxevent_decoder, signal_name)
m.d.comb += self.__dict__[signal_name].eq(signal)
# Connect our control signals through the control translator.
for signal_name, _ in self.CONTROL_SIGNALS:
signal = getattr(control_translator, signal_name)
m.d.comb += signal.eq(self.__dict__[signal_name])
# RxActive handler:
# A transmission starts when DIR goes high with NXT, or when an RxEvent indicates
# a switch from RxActive = 0 to RxActive = 1. A transmission stops when DIR drops low,
# or when the RxEvent RxActive bit drops from 1 to 0, or an error occurs.
dir_rising_edge = rising_edge_detector(m, self.ulpi.dir, domain=m.d.ulpi)
dir_based_start = dir_rising_edge & self.ulpi.nxt
with m.If(~self.ulpi.dir | rxevent_decoder.rx_stop):
# TODO: this should probably also trigger if RxError
m.d.ulpi += self.rx_active.eq(0)
with m.Elif(dir_based_start | rxevent_decoder.rx_start):
m.d.ulpi += self.rx_active.eq(1)
# Data-out: we'll connect this almost direct through from our ULPI
# interface, as it's essentially the same as in the UMTI spec. We'll
# add a one cycle processing delay so it matches the rest of our signals.
# RxValid: equivalent to NXT whenever a Rx is active.
m.d.ulpi += [
self.data_out .eq(self.ulpi.data.i),
self.rx_valid .eq(self.ulpi.nxt & self.rx_active)
]
return m
if __name__ == "__main__":
unittest.main()
| 38.106816 | 117 | 0.602419 |
from nmigen import Signal, Module, Cat, Elaboratable, ClockSignal, Record, ResetSignal, Const
import unittest
from nmigen.back.pysim import Simulator
from ..test import LunaGatewareTestCase, ulpi_domain_test_case, sync_test_case
from ..utils import rising_edge_detector, falling_edge_detector
class ULPIRegisterWindow(Elaboratable):
COMMAND_REG_WRITE = 0b10000000
COMMAND_REG_READ = 0b11000000
def __init__(self):
self.ulpi_data_in = Signal(8)
self.ulpi_data_out = Signal(8)
self.ulpi_out_req = Signal()
self.ulpi_dir = Signal()
self.ulpi_next = Signal()
self.ulpi_stop = Signal()
self.busy = Signal()
self.address = Signal(6)
self.done = Signal()
self.read_request = Signal()
self.read_data = Signal(8)
self.write_request = Signal()
self.write_data = Signal(8)
def elaborate(self, platform):
m = Module()
current_address = Signal(6)
current_write = Signal(8)
m.d.ulpi += [
self.ulpi_out_req.eq(0),
self.ulpi_stop .eq(0),
self.done .eq(0)
]
with m.FSM(domain='ulpi') as fsm:
m.d.comb += self.busy.eq(~fsm.ongoing('IDLE'))
with m.State('IDLE'):
#
# This doesn't technically help for normal ULPI
m.d.ulpi += self.ulpi_data_out.eq(0)
m.d.ulpi += [
current_address .eq(self.address),
current_write .eq(self.write_data)
]
with m.If(self.read_request):
m.next = 'START_READ'
with m.If(self.write_request):
m.next = 'START_WRITE'
with m.State('START_READ'):
with m.If(~self.ulpi_dir):
m.next = 'SEND_READ_ADDRESS'
m.d.ulpi += [
self.ulpi_data_out .eq(self.COMMAND_REG_READ | self.address),
self.ulpi_out_req .eq(1)
]
# to come into this state writing, as we need to lead with a
# bus-turnaround cycle.
with m.State('SEND_READ_ADDRESS'):
m.d.ulpi += self.ulpi_out_req.eq(1)
# If DIR has become asserted, we're being interrupted.
with m.If(self.ulpi_dir):
m.next = 'START_READ'
m.d.ulpi += self.ulpi_out_req.eq(0)
# If NXT becomes asserted without us being interrupted by
# DIR, then the PHY has accepted the read. Release our write
# request, so the next cycle can properly act as a bus turnaround.
with m.Elif(self.ulpi_next):
m.d.ulpi += [
self.ulpi_out_req .eq(0),
self.ulpi_data_out .eq(0),
]
m.next = 'READ_TURNAROUND'
# READ_TURNAROUND: wait for the PHY to take control of the ULPI bus.
with m.State('READ_TURNAROUND'):
# After one cycle, we should have a data byte ready.
m.next = 'READ_COMPLETE'
# READ_COMPLETE: the ULPI read exchange is complete, and the read data is ready.
with m.State('READ_COMPLETE'):
m.next = 'IDLE'
# Latch in the data, and indicate that we have new, valid data.
m.d.ulpi += [
self.read_data .eq(self.ulpi_data_in),
self.done .eq(1)
]
#
# Write handling.
#
# START_WRITE: wait for the bus to be idle, so we can transmit.
with m.State('START_WRITE'):
# Wait for the bus to be idle.
with m.If(~self.ulpi_dir):
m.next = 'SEND_WRITE_ADDRESS'
# Once it is, start sending our command.
m.d.ulpi += [
self.ulpi_data_out .eq(self.COMMAND_REG_WRITE | self.address),
self.ulpi_out_req .eq(1)
]
# SEND_WRITE_ADDRESS: Continue sending the write address until the
# target device accepts it.
with m.State('SEND_WRITE_ADDRESS'):
m.d.ulpi += self.ulpi_out_req.eq(1)
# If DIR has become asserted, we're being interrupted.
with m.If(self.ulpi_dir):
m.next = 'START_WRITE'
m.d.ulpi += self.ulpi_out_req.eq(0)
# Hold our address until the PHY has accepted the command;
# and then move to presenting the PHY with the value to be written.
with m.Elif(self.ulpi_next):
m.d.ulpi += self.ulpi_data_out.eq(self.write_data)
m.next = 'HOLD_WRITE'
# Hold the write data on the bus until the device acknowledges it.
with m.State('HOLD_WRITE'):
m.d.ulpi += self.ulpi_out_req.eq(1)
# Handle interruption.
with m.If(self.ulpi_dir):
m.next = 'START_WRITE'
m.d.ulpi += self.ulpi_out_req.eq(0)
# Hold the data present until the device has accepted it.
# Once it has, pulse STP for a cycle to complete the transaction.
with m.Elif(self.ulpi_next):
m.d.ulpi += [
self.ulpi_data_out.eq(0),
self.ulpi_out_req.eq(0),
self.ulpi_stop.eq(1),
self.done.eq(1)
]
m.next = 'IDLE'
return m
class TestULPIRegisters(LunaGatewareTestCase):
FRAGMENT_UNDER_TEST = ULPIRegisterWindow
ULPI_CLOCK_FREQUENCY = 60e6
SYNC_CLOCK_FREQUENCY = None
def initialize_signals(self):
yield self.dut.ulpi_dir.eq(0)
yield self.dut.read_request.eq(0)
yield self.dut.write_request.eq(0)
@ulpi_domain_test_case
def test_idle_behavior(self):
self.assertEqual((yield self.dut.ulpi_data_out), 0)
@ulpi_domain_test_case
def test_register_read(self):
# Poison the register value with a fail value (0xBD).
yield self.dut.ulpi_data_in.eq(0xBD)
# Set up a read request.
yield self.dut.address.eq(0)
yield
# After a read request, we should be busy...
yield from self.pulse(self.dut.read_request)
self.assertEqual((yield self.dut.busy), 1)
# ... and then, since dir is unasserted, we should have a read command.
yield
self.assertEqual((yield self.dut.ulpi_data_out), 0b11000000)
# We should continue to present the command...
yield from self.advance_cycles(10)
self.assertEqual((yield self.dut.ulpi_data_out), 0b11000000)
self.assertEqual((yield self.dut.busy), 1)
# ... until the host accepts it.
yield self.dut.ulpi_next.eq(1)
yield
# We should then wait for a single bus turnaround cycle before reading.
yield
# And then should read whatever value is present.
yield self.dut.ulpi_data_in.eq(0x07)
yield
yield
self.assertEqual((yield self.dut.read_data), 0x07)
# Finally, we should return to idle.
self.assertEqual((yield self.dut.busy), 0)
@ulpi_domain_test_case
def test_interrupted_read(self):
# Set up a read request while DIR is asserted.
yield self.dut.ulpi_dir.eq(1)
yield self.dut.address.eq(0)
yield from self.pulse(self.dut.read_request)
# We shouldn't try to output anything until DIR is de-asserted.
yield from self.advance_cycles(1)
self.assertEqual((yield self.dut.ulpi_out_req), 0)
yield from self.advance_cycles(10)
self.assertEqual((yield self.dut.ulpi_out_req), 0)
yield self.dut.ulpi_dir.eq(0)
yield from self.advance_cycles(2)
self.assertEqual((yield self.dut.ulpi_data_out), 0b11000000)
yield self.dut.ulpi_dir.eq(1)
yield from self.advance_cycles(2)
self.assertEqual((yield self.dut.ulpi_out_req), 0)
yield self.dut.ulpi_dir.eq(0)
yield from self.advance_cycles(2)
self.assertEqual((yield self.dut.ulpi_data_out), 0b11000000)
yield self.dut.ulpi_next.eq(1)
yield
yield
yield self.dut.ulpi_data_in.eq(0x07)
yield
yield
self.assertEqual((yield self.dut.read_data), 0x07)
self.assertEqual((yield self.dut.busy), 0)
@ulpi_domain_test_case
def test_register_write(self):
yield self.dut.address.eq(0b10)
yield self.dut.write_data.eq(0xBC)
yield
yield from self.pulse(self.dut.write_request)
self.assertEqual((yield self.dut.busy), 1)
yield
self.assertEqual((yield self.dut.ulpi_data_out), 0b10000010)
yield from self.advance_cycles(10)
self.assertEqual((yield self.dut.ulpi_data_out), 0b10000010)
self.assertEqual((yield self.dut.busy), 1)
yield self.dut.ulpi_next.eq(1)
yield
yield self.dut.ulpi_next.eq(0)
yield
self.assertEqual((yield self.dut.ulpi_data_out), 0xBC)
yield from self.advance_cycles(10)
self.assertEqual((yield self.dut.ulpi_data_out), 0xBC)
yield self.dut.ulpi_next.eq(1)
yield from self.advance_cycles(2)
self.assertEqual((yield self.dut.ulpi_stop), 1)
yield
self.assertEqual((yield self.dut.ulpi_stop), 0)
self.assertEqual((yield self.dut.busy), 0)
class ULPIRxEventDecoder(Elaboratable):
def __init__(self, *, ulpi_bus):
self.ulpi = ulpi_bus
self.register_operation_in_progress = Signal()
self.last_rx_command = Signal(8)
self.line_state = Signal(2)
self.rx_active = Signal()
self.rx_error = Signal()
self.host_disconnect = Signal()
self.id_digital = Signal()
self.vbus_valid = Signal()
self.session_valid = Signal()
self.session_end = Signal()
self.rx_start = Signal()
self.rx_stop = Signal()
def elaborate(self, platform):
m = Module()
# - Direction has been high for more than one cycle.
# - NXT is low.
# To implement the first condition, we'll first create a delayed
direction_delayed = Signal()
m.d.ulpi += direction_delayed.eq(self.ulpi.dir)
receiving = Signal()
m.d.comb += receiving.eq(direction_delayed & self.ulpi.dir)
m.d.ulpi += [
self.rx_start .eq(0),
self.rx_stop .eq(0)
]
with m.If(receiving & ~self.ulpi.nxt & ~self.register_operation_in_progress):
m.d.ulpi += self.last_rx_command.eq(self.ulpi.data.i)
rx_active = self.ulpi.data.i[4]
with m.If(~self.rx_active & rx_active):
m.d.ulpi += self.rx_start.eq(1)
with m.If(self.rx_active & ~rx_active):
m.d.ulpi += self.rx_stop.eq(1)
m.d.comb += [
self.line_state .eq(self.last_rx_command[0:2]),
self.vbus_valid .eq(self.last_rx_command[2:4] == 0b11),
self.session_valid .eq(self.last_rx_command[2:4] == 0b10),
self.session_end .eq(self.last_rx_command[2:4] == 0b00),
self.rx_active .eq(self.last_rx_command[4]),
self.rx_error .eq(self.last_rx_command[4:6] == 0b11),
self.host_disconnect .eq(self.last_rx_command[4:6] == 0b10),
self.id_digital .eq(self.last_rx_command[6]),
]
return m
class ULPIRxEventDecoderTest(LunaGatewareTestCase):
ULPI_CLOCK_FREQUENCY = 60e6
SYNC_CLOCK_FREQUENCY = None
def instantiate_dut(self):
self.ulpi = Record([
("dir", 1),
("nxt", 1),
("data", [
("i", 8),
])
])
return ULPIRxEventDecoder(ulpi_bus=self.ulpi)
def initialize_signals(self):
yield self.ulpi.dir.eq(0)
yield self.ulpi.nxt.eq(0)
yield self.ulpi.data.i.eq(0)
yield self.dut.register_operation_in_progress.eq(0)
@ulpi_domain_test_case
def test_decode(self):
yield self.ulpi.data.i.eq(0xAB)
yield self.ulpi.dir.eq(1)
yield self.ulpi.nxt.eq(1)
yield from self.advance_cycles(5)
self.assertEqual((yield self.dut.last_rx_command), 0x00)
# Nothing should change when we drop DIR and NXT.
yield self.ulpi.dir.eq(0)
yield self.ulpi.nxt.eq(0)
yield
self.assertEqual((yield self.dut.last_rx_command), 0x00)
# Setting DIR but not NXT should trigger an RxEvent; but not
# until one cycle of "bus turnaround" has passed.
yield self.ulpi.dir.eq(1)
yield self.ulpi.data.i.eq(0x12)
yield
self.assertEqual((yield self.dut.last_rx_command), 0x00)
yield self.ulpi.data.i.eq(0b00011110)
yield from self.advance_cycles(2)
self.assertEqual((yield self.dut.last_rx_command), 0b00011110)
# Validate that we're decoding this RxCommand correctly.
self.assertEqual((yield self.dut.line_state), 0b10)
self.assertEqual((yield self.dut.vbus_valid), 1)
self.assertEqual((yield self.dut.rx_active), 1)
self.assertEqual((yield self.dut.rx_error), 0)
self.assertEqual((yield self.dut.host_disconnect), 0)
class ULPIControlTranslator(Elaboratable):
def __init__(self, *, register_window, own_register_window=False):
self.register_window = register_window
self.own_register_window = own_register_window
self.xcvr_select = Signal(2, reset=0b01)
self.term_select = Signal()
self.op_mode = Signal(2)
self.suspend = Signal()
self.id_pullup = Signal()
self.dp_pulldown = Signal(reset=1)
self.dm_pulldown = Signal(reset=1)
self.chrg_vbus = Signal()
self.dischrg_vbus = Signal()
self.use_external_vbus_indicator = Signal(reset=1)
self._register_signals = {}
def add_composite_register(self, m, address, value, *, reset_value=0):
current_register_value = Signal(8, reset=reset_value, name=f"current_register_value_{address:02x}")
write_requested = Signal(name=f"write_requested_{address:02x}")
write_value = Signal(8, name=f"write_value_{address:02x}")
write_done = Signal(name=f"write_done_{address:02x}")
self._register_signals[address] = {
'write_requested': write_requested,
'write_value': write_value,
'write_done': write_done
}
with m.If(write_done):
m.d.ulpi += current_register_value.eq(write_value),
# If we have a mismatch between the requested and actual register value,
# request a write of the new value.
m.d.comb += write_requested.eq(current_register_value != value)
with m.If(current_register_value != value):
m.d.ulpi += write_value.eq(value)
def populate_ulpi_registers(self, m):
# Function control.
function_control = Cat(self.xcvr_select, self.term_select, self.op_mode, Const(0), ~self.suspend, Const(0))
self.add_composite_register(m, 0x04, function_control, reset_value=0b01000001)
# OTG control.
otg_control = Cat(
self.id_pullup, self.dp_pulldown, self.dm_pulldown, self.dischrg_vbus,
self.chrg_vbus, Const(0), Const(0), self.use_external_vbus_indicator
)
self.add_composite_register(m, 0x0A, otg_control, reset_value=0b00000110)
def elaborate(self, platform):
m = Module()
if self.own_register_window:
m.submodules.reg_window = self.register_window
# Add the registers that represent each of our signals.
self.populate_ulpi_registers(m)
# Generate logic to handle changes on each of our registers.
first_element = True
for address, signals in self._register_signals.items():
conditional = m.If if first_element else m.Elif
first_element = False
# If we're requesting a write on the given register, pass that to our
with conditional(signals['write_requested']):
m.d.comb += [
signals['write_done'] .eq(self.register_window.done),
self.register_window.address .eq(address),
self.register_window.write_data .eq(signals['write_value']),
self.register_window.write_request .eq(signals['write_requested'] & ~self.register_window.done)
]
with m.Else():
m.d.comb += self.register_window.write_request.eq(0)
m.d.comb += self.register_window.read_request.eq(0)
return m
class ControlTranslatorTest(LunaGatewareTestCase):
ULPI_CLOCK_FREQUENCY = 60e6
SYNC_CLOCK_FREQUENCY = None
def instantiate_dut(self):
self.reg_window = ULPIRegisterWindow()
return ULPIControlTranslator(register_window=self.reg_window, own_register_window=True)
def initialize_signals(self):
dut = self.dut
yield dut.xcvr_select.eq(1)
yield dut.dm_pulldown.eq(1)
yield dut.dp_pulldown.eq(1)
yield dut.use_external_vbus_indicator.eq(0)
@ulpi_domain_test_case
def test_multiwrite_behavior(self):
yield from self.advance_cycles(10)
self.assertEqual((yield self.reg_window.write_request), 0)
# Change signals that span two registers.
yield self.dut.op_mode.eq(0b11)
yield self.dut.dp_pulldown.eq(0)
yield self.dut.dm_pulldown.eq(0)
yield
yield
# Once we've changed these, we should start trying to apply
self.assertEqual((yield self.reg_window.address), 0x04)
self.assertEqual((yield self.reg_window.write_data), 0b01011001)
yield self.reg_window.ulpi_next.eq(1)
yield from self.wait_until(self.reg_window.done, timeout=10)
yield
yield
self.assertEqual((yield self.reg_window.address), 0x0A)
self.assertEqual((yield self.reg_window.write_data), 0b00000000)
yield self.reg_window.ulpi_next.eq(1)
yield from self.wait_until(self.reg_window.done, timeout=10)
yield
yield
self.assertEqual((yield self.reg_window.address), 0)
self.assertEqual((yield self.reg_window.write_data), 0)
self.assertEqual((yield self.reg_window.write_request), 0)
class UMTITranslator(Elaboratable):
# UMTI status signals translated from the ULPI bus.
RXEVENT_STATUS_SIGNALS = [
'line_state', 'vbus_valid', 'session_valid', 'session_end',
'rx_error', 'host_disconnect', 'id_digital'
]
# Control signals that we control through our control translator.
CONTROL_SIGNALS = [
('xcvr_select', 2), ('term_select', 1), ('op_mode', 2), ('suspend', 1),
('id_pullup', 1), ('dm_pulldown', 1), ('dp_pulldown', 1), ('chrg_vbus', 1),
('dischrg_vbus', 1), ('use_external_vbus_indicator', 1)
]
def __dir__(self):
properties = list(super().__dir__())
properties.extend(self.RXEVENT_STATUS_SIGNALS)
properties.extend(self.DATA_STATUS_SIGNALS)
properties.extend(self.CONTROL_SIGNALS)
return properties
def __init__(self, *, ulpi, use_platform_registers=True):
self.use_platform_registers = use_platform_registers
#
# I/O port
#
self.ulpi = ulpi
self.busy = Signal()
# Data signals.
self.data_out = Signal(8)
self.rx_valid = Signal()
self.data_in = Signal(8)
# Status signals.
self.rx_active = Signal()
# RxEvent-based flags.
for signal_name in self.RXEVENT_STATUS_SIGNALS:
self.__dict__[signal_name] = Signal(name=signal_name)
# Control signals.
for signal_name, size in self.CONTROL_SIGNALS:
self.__dict__[signal_name] = Signal(size, name=signal_name)
# Diagnostic I/O.
self.last_rx_command = Signal(8)
#
# Internal
#
# Create a list of extra registers to be set.
self._extra_registers = {}
def add_extra_register(self, write_address, write_value, *, default_value=None):
# Ensure we have a default_value if we have a Signal(); as this will determine
# whether we need to update the register post-reset.
if (default_value is None) and isinstance(write_value, Signal):
raise ValueError("if write_value is a signal, default_value must be provided")
# Otherwise, we'll pick a value that ensures the write always occurs.
elif default_value is None:
default_value = ~write_value
self._extra_registers[write_address] = {'value': write_value, 'default': default_value}
def elaborate(self, platform):
m = Module()
m.submodules.register_window = register_window = ULPIRegisterWindow()
m.submodules.control_translator = control_translator = ULPIControlTranslator(register_window=register_window)
m.submodules.rxevent_decoder = rxevent_decoder = ULPIRxEventDecoder(ulpi_bus=self.ulpi)
# before continuing with elaboration.
if self.use_platform_registers and hasattr(platform, 'ulpi_extra_registers'):
for address, value in platform.ulpi_extra_registers.items():
self.add_extra_register(address, value)
# Add any extra registers provided by the user to our control translator.
for address, values in self._extra_registers.items():
control_translator.add_composite_register(m, address, values['value'], reset_value=values['default'])
# Connect our ULPI control signals to each of our subcomponents.
m.d.comb += [
# Drive the bus whenever the target PHY isn't.
self.ulpi.data.oe .eq(~self.ulpi.dir),
self.busy .eq(register_window.busy),
self.ulpi.clk .eq(ClockSignal("ulpi")),
self.ulpi.rst .eq(ResetSignal("ulpi")),
rxevent_decoder.register_operation_in_progress.eq(register_window.busy),
self.last_rx_command .eq(rxevent_decoder.last_rx_command),
register_window.ulpi_data_in .eq(self.ulpi.data.i),
register_window.ulpi_dir .eq(self.ulpi.dir),
register_window.ulpi_next .eq(self.ulpi.nxt),
self.ulpi.data.o .eq(register_window.ulpi_data_out),
self.ulpi.stp .eq(register_window.ulpi_stop),
]
for signal_name in self.RXEVENT_STATUS_SIGNALS:
signal = getattr(rxevent_decoder, signal_name)
m.d.comb += self.__dict__[signal_name].eq(signal)
for signal_name, _ in self.CONTROL_SIGNALS:
signal = getattr(control_translator, signal_name)
m.d.comb += signal.eq(self.__dict__[signal_name])
dir_rising_edge = rising_edge_detector(m, self.ulpi.dir, domain=m.d.ulpi)
dir_based_start = dir_rising_edge & self.ulpi.nxt
with m.If(~self.ulpi.dir | rxevent_decoder.rx_stop):
m.d.ulpi += self.rx_active.eq(0)
with m.Elif(dir_based_start | rxevent_decoder.rx_start):
m.d.ulpi += self.rx_active.eq(1)
# interface, as it's essentially the same as in the UMTI spec. We'll
# add a one cycle processing delay so it matches the rest of our signals.
# RxValid: equivalent to NXT whenever a Rx is active.
m.d.ulpi += [
self.data_out .eq(self.ulpi.data.i),
self.rx_valid .eq(self.ulpi.nxt & self.rx_active)
]
return m
if __name__ == "__main__":
unittest.main()
| true | true |
f7f4749dcf3bfc4f04d3741a736ed87925969e4b | 193 | py | Python | examples_elo_ratings/latex_cov_elo_ratings_2.py | MarcoGorelli/precise | 227d017d45f1c4b39887a85133f3d62950a1e341 | [
"MIT"
] | 40 | 2022-01-13T00:40:59.000Z | 2022-03-31T20:33:19.000Z | examples_elo_ratings/latex_cov_elo_ratings_2.py | MarcoGorelli/precise | 227d017d45f1c4b39887a85133f3d62950a1e341 | [
"MIT"
] | 14 | 2022-01-08T16:00:12.000Z | 2022-03-16T00:12:04.000Z | examples_elo_ratings/latex_cov_elo_ratings_2.py | MarcoGorelli/precise | 227d017d45f1c4b39887a85133f3d62950a1e341 | [
"MIT"
] | 9 | 2022-01-26T21:14:43.000Z | 2022-03-21T17:32:02.000Z | from precise.skatervaluation.battlelatex.tables import elo_latex_table
if __name__=='__main__':
ltx = elo_latex_table(genre='manager_var',category='stocks_20_days_p2_n60')
print(ltx) | 38.6 | 80 | 0.792746 | from precise.skatervaluation.battlelatex.tables import elo_latex_table
if __name__=='__main__':
ltx = elo_latex_table(genre='manager_var',category='stocks_20_days_p2_n60')
print(ltx) | true | true |
f7f474efa1179d1343e1690744401425b2e9ed31 | 783 | py | Python | unit_tests/python_lib/test_create_roles_cf.py | adrianmkng/watchmen | 4be15ad64a5d54d4f546ca8c139fa41fd42dd6aa | [
"Apache-2.0"
] | 190 | 2017-12-13T05:01:42.000Z | 2021-11-15T23:35:54.000Z | unit_tests/python_lib/test_create_roles_cf.py | adrianmkng/watchmen | 4be15ad64a5d54d4f546ca8c139fa41fd42dd6aa | [
"Apache-2.0"
] | 2 | 2018-08-31T04:53:03.000Z | 2018-11-14T00:13:49.000Z | unit_tests/python_lib/test_create_roles_cf.py | adrianmkng/watchmen | 4be15ad64a5d54d4f546ca8c139fa41fd42dd6aa | [
"Apache-2.0"
] | 22 | 2017-12-13T04:36:46.000Z | 2021-07-29T07:37:41.000Z | # Copyright 2017 Insurance Australia Group Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from mock import patch
import python_lib.create_roles_cf as roles_cf
@patch("python_lib.get_accounts.get_accounts")
def test_main(mock_get_accounts):
assert roles_cf.main() is None
| 37.285714 | 74 | 0.780332 |
from mock import patch
import python_lib.create_roles_cf as roles_cf
@patch("python_lib.get_accounts.get_accounts")
def test_main(mock_get_accounts):
assert roles_cf.main() is None
| true | true |
f7f4752faa807dd34b3fca8daacd73ca74bacc7b | 2,486 | py | Python | linear.py | jasonrobwebster/sampling-importance-resampling-example | 250e54815f73ccf071a4dad8d62a2bd7ec38c0c2 | [
"MIT"
] | null | null | null | linear.py | jasonrobwebster/sampling-importance-resampling-example | 250e54815f73ccf071a4dad8d62a2bd7ec38c0c2 | [
"MIT"
] | null | null | null | linear.py | jasonrobwebster/sampling-importance-resampling-example | 250e54815f73ccf071a4dad8d62a2bd7ec38c0c2 | [
"MIT"
] | null | null | null | import numpy as np
from scipy.special import softmax
import seaborn as sns
import matplotlib.pyplot as plt
if __name__ == '__main__':
data_size = 100
true_grad = 3
true_intercept = 1
true_sig = 1
x = np.linspace(0, 10, data_size)
# y = m x + c
y_obs = true_grad * x + true_intercept + np.random.normal(loc=0, scale=true_sig, size=data_size)
M = 2000000
m = M // 20 # M/m is usually around 20
print(f'Generating {M} initial samples, and {m} re-samples')
# sample M params from initial prior
grad_prior = np.random.uniform(-10, 10, M) # m ~ U(-10, 10)
intercept_prior = np.random.uniform(-10, 10, M) # c ~ U(-10, 10)
sig_prior = np.random.uniform(0.1, 20, M) # sig ~ U(0.1, 10)
# calculate importance weights, assuming that we model y ~ N(mu, sig)
exponent = 1 / (2 * sig_prior ** 2) \
* np.sum([(y_obs[i] - (grad_prior * x[i] + intercept_prior)) ** 2 for i in range(data_size)], axis=0)
log_weights = - data_size * np.log(sig_prior * np.sqrt(2 * np.pi)) - exponent
weights = softmax(log_weights)
# resample params using the above weights to get posterior
grad_posterior = np.random.choice(grad_prior, m, p=weights)
intercept_posterior = np.random.choice(intercept_prior, m, p=weights)
sig_posterior = np.random.choice(sig_prior, m, p=weights)
# report summary stats
print(f'True gradient: {true_grad}')
print(f'True intercept: {true_intercept}')
print(f'True sigma: {true_sig}')
print(f'Gradient posterior: mean={np.mean(grad_posterior):.3} - sd={np.std(grad_posterior):.3}')
print(f'Intercept posterior: mean={np.mean(intercept_posterior):.3} - sd={np.std(intercept_posterior):.3}')
print(f'Sigma posterior: mean={np.mean(sig_posterior):.3} - sd={np.std(sig_posterior):.3}')
# plot the new samples
fig, axes = plt.subplots(1, 4, figsize=(12, 3))
axes[0].set_title('Data')
axes[1].set_title('Gradient Posterior')
axes[2].set_title('Intercept Posterior')
axes[3].set_title('Sigma Posterior')
axes[0].plot(x, y_obs, 'x')
sns.distplot(grad_posterior, ax=axes[1])
sns.distplot(intercept_posterior, ax=axes[2])
sns.distplot(sig_posterior, ax=axes[3])
plt.show()
fig, ax = plt.subplots(figsize=(5, 5))
ax.set_xlabel('Gradient')
ax.set_ylabel('Intercept')
ax.set_title('Joint distribution p(m, c)')
sns.kdeplot(grad_posterior, intercept_posterior, shade=True, ax=ax)
plt.show()
| 37.666667 | 116 | 0.65889 | import numpy as np
from scipy.special import softmax
import seaborn as sns
import matplotlib.pyplot as plt
if __name__ == '__main__':
data_size = 100
true_grad = 3
true_intercept = 1
true_sig = 1
x = np.linspace(0, 10, data_size)
y_obs = true_grad * x + true_intercept + np.random.normal(loc=0, scale=true_sig, size=data_size)
M = 2000000
m = M // 20
print(f'Generating {M} initial samples, and {m} re-samples')
grad_prior = np.random.uniform(-10, 10, M)
intercept_prior = np.random.uniform(-10, 10, M)
sig_prior = np.random.uniform(0.1, 20, M)
exponent = 1 / (2 * sig_prior ** 2) \
* np.sum([(y_obs[i] - (grad_prior * x[i] + intercept_prior)) ** 2 for i in range(data_size)], axis=0)
log_weights = - data_size * np.log(sig_prior * np.sqrt(2 * np.pi)) - exponent
weights = softmax(log_weights)
grad_posterior = np.random.choice(grad_prior, m, p=weights)
intercept_posterior = np.random.choice(intercept_prior, m, p=weights)
sig_posterior = np.random.choice(sig_prior, m, p=weights)
print(f'True gradient: {true_grad}')
print(f'True intercept: {true_intercept}')
print(f'True sigma: {true_sig}')
print(f'Gradient posterior: mean={np.mean(grad_posterior):.3} - sd={np.std(grad_posterior):.3}')
print(f'Intercept posterior: mean={np.mean(intercept_posterior):.3} - sd={np.std(intercept_posterior):.3}')
print(f'Sigma posterior: mean={np.mean(sig_posterior):.3} - sd={np.std(sig_posterior):.3}')
fig, axes = plt.subplots(1, 4, figsize=(12, 3))
axes[0].set_title('Data')
axes[1].set_title('Gradient Posterior')
axes[2].set_title('Intercept Posterior')
axes[3].set_title('Sigma Posterior')
axes[0].plot(x, y_obs, 'x')
sns.distplot(grad_posterior, ax=axes[1])
sns.distplot(intercept_posterior, ax=axes[2])
sns.distplot(sig_posterior, ax=axes[3])
plt.show()
fig, ax = plt.subplots(figsize=(5, 5))
ax.set_xlabel('Gradient')
ax.set_ylabel('Intercept')
ax.set_title('Joint distribution p(m, c)')
sns.kdeplot(grad_posterior, intercept_posterior, shade=True, ax=ax)
plt.show()
| true | true |
f7f475ab6e1f740c4c2b4435ac10911c22345637 | 1,637 | py | Python | Plateau/preprocess_image.py | RichardLeeK/CNM | a3c15cb0a0373d6ad03c5a815a7e020f90ab8522 | [
"Apache-2.0"
] | null | null | null | Plateau/preprocess_image.py | RichardLeeK/CNM | a3c15cb0a0373d6ad03c5a815a7e020f90ab8522 | [
"Apache-2.0"
] | null | null | null | Plateau/preprocess_image.py | RichardLeeK/CNM | a3c15cb0a0373d6ad03c5a815a7e020f90ab8522 | [
"Apache-2.0"
] | null | null | null | import numpy as np
from env import Env
def fill(image,x_idx,y_idx,bound,value):
if (x_idx<0) or (x_idx>=900):
return image
elif (y_idx<0) or (y_idx>=110):
return image
elif image[x_idx][y_idx]>=bound:
return image
else:
image[x_idx][y_idx]=value
return image
def fill_edge(image,x_idx,y_idx,value,bound,dist=1):
fill(image,x_idx-dist,y_idx,bound,value)
fill(image,x_idx-dist,y_idx-dist,bound,value)
fill(image,x_idx-dist,y_idx+dist,bound,value)
fill(image,x_idx+dist,y_idx,bound,value)
fill(image,x_idx+dist,y_idx-dist,bound,value)
fill(image,x_idx+dist,y_idx+dist,bound,value)
fill(image,x_idx,y_idx-dist,bound,value)
fill(image,x_idx,y_idx+dist,bound,value)
def transform_img(data,window=900,y_range=110,step=60):
icps=np.int64(data[1])
# icps=np.array([icp for icp in icps if 0<icp<=y_range])
image_set=[]
start_time=0
while start_time<(len(icps)-window):
image=np.zeros((window,y_range), dtype=np.uint8)
for time_idx in range(0,window):
time=start_time+time_idx
y_idx=icps[time]-1
if y_idx<y_range:
image[time_idx][y_idx]=255
fill_edge(image,time_idx,y_idx,value=128,bound=255,dist=1)
image_set.append(image.T)
start_time=start_time+step
return np.array(image_set)
def transform_imgdict(dataset,window=900,y_range=110,step=60):
imgdict=dict()
for i in range(len(dataset)):
imgset=transform_img(dataset[i],window=window,y_range=y_range,step=step)
imgdict[i]=imgset
return imgdict | 32.74 | 80 | 0.661576 | import numpy as np
from env import Env
def fill(image,x_idx,y_idx,bound,value):
if (x_idx<0) or (x_idx>=900):
return image
elif (y_idx<0) or (y_idx>=110):
return image
elif image[x_idx][y_idx]>=bound:
return image
else:
image[x_idx][y_idx]=value
return image
def fill_edge(image,x_idx,y_idx,value,bound,dist=1):
fill(image,x_idx-dist,y_idx,bound,value)
fill(image,x_idx-dist,y_idx-dist,bound,value)
fill(image,x_idx-dist,y_idx+dist,bound,value)
fill(image,x_idx+dist,y_idx,bound,value)
fill(image,x_idx+dist,y_idx-dist,bound,value)
fill(image,x_idx+dist,y_idx+dist,bound,value)
fill(image,x_idx,y_idx-dist,bound,value)
fill(image,x_idx,y_idx+dist,bound,value)
def transform_img(data,window=900,y_range=110,step=60):
icps=np.int64(data[1])
image_set=[]
start_time=0
while start_time<(len(icps)-window):
image=np.zeros((window,y_range), dtype=np.uint8)
for time_idx in range(0,window):
time=start_time+time_idx
y_idx=icps[time]-1
if y_idx<y_range:
image[time_idx][y_idx]=255
fill_edge(image,time_idx,y_idx,value=128,bound=255,dist=1)
image_set.append(image.T)
start_time=start_time+step
return np.array(image_set)
def transform_imgdict(dataset,window=900,y_range=110,step=60):
imgdict=dict()
for i in range(len(dataset)):
imgset=transform_img(dataset[i],window=window,y_range=y_range,step=step)
imgdict[i]=imgset
return imgdict | true | true |
f7f47717f8bea61e7631353678087d94dea1474a | 9,039 | py | Python | test/test_conv_encoding.py | NVlabs/sionna | 488e6c3ff6ff2b3313d0ca0f94e4247b8dd6ff35 | [
"Apache-2.0"
] | 163 | 2022-03-22T19:47:47.000Z | 2022-03-31T23:56:45.000Z | test/test_conv_encoding.py | Maryammhsnv/sionna | 527d0f7866b379afffad34a6bef7ed3bf6f33ad2 | [
"Apache-2.0"
] | 2 | 2022-03-24T12:43:07.000Z | 2022-03-29T07:17:16.000Z | test/test_conv_encoding.py | Maryammhsnv/sionna | 527d0f7866b379afffad34a6bef7ed3bf6f33ad2 | [
"Apache-2.0"
] | 19 | 2022-03-23T02:31:22.000Z | 2022-03-30T06:35:12.000Z | #
# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
try:
import sionna
except ImportError as e:
import sys
sys.path.append("../")
import unittest
import numpy as np
import tensorflow as tf
gpus = tf.config.list_physical_devices('GPU')
print('Number of GPUs available :', len(gpus))
if gpus:
gpu_num = 0 # Number of the GPU to be used
try:
tf.config.set_visible_devices(gpus[gpu_num], 'GPU')
print('Only GPU number', gpu_num, 'used.')
tf.config.experimental.set_memory_growth(gpus[gpu_num], True)
except RuntimeError as e:
print(e)
from sionna.fec.conv import ConvEncoder
from sionna.utils import BinarySource
class TestConvEncoding(unittest.TestCase):
def test_output_dim(self):
r"""Test with allzero codeword that output dims are correct (=n) and output also equals all-zero."""
bs = 10
coderates = [1/2, 1/3]
ks = [10, 20, 50, 100]
for rate in coderates:
for k in ks:
n = int(k/rate) # calculate coderate
enc = ConvEncoder(rate=rate, constraint_length=5)
u = np.zeros([bs, k])
c = enc(u).numpy()
self.assertTrue(c.shape[-1]==n)
# also check that all-zero input yields all-zero output
c_hat = np.zeros([bs, n])
self.assertTrue(np.array_equal(c, c_hat))
# test that output dim can change (in eager mode)
k = k+1 # increase length
n = int(k/rate) # calculate coderate
u = np.zeros([bs, k])
c = enc(u).numpy()
self.assertTrue(c.shape[-1]==n)
# also check that all-zero input yields all-zero output
c_hat = np.zeros([bs, n])
self.assertTrue(np.array_equal(c, c_hat))
def test_invalid_inputs(self):
r"""Test with invalid rate values and invalid constraint lengths as input.
Only rates [1/2, 1/3] and constraint lengths [3, 4, 5, 6, 7, 8] are accepted currently."""
rate_invalid = [0.2, 0.45, 0.01]
rate_valid = [1/3, 1/2]
constraint_length_invalid = [2, 9, 0]
constraint_length_valid = [3, 4, 5, 6, 7, 8]
for rate in rate_valid:
for mu in constraint_length_invalid:
with self.assertRaises(AssertionError):
enc = ConvEncoder(rate=rate, constraint_length=mu)
for rate in rate_invalid:
for mu in constraint_length_valid:
with self.assertRaises(AssertionError):
enc = ConvEncoder(rate=rate, constraint_length= mu)
gmat = [['101', '111', '000'], ['000', '010', '011']]
with self.assertRaises(AssertionError):
enc = ConvEncoder(gen_poly=gmat)
def test_polynomial_input(self):
r"""Test that different formats of input polynomials are accepted and raises exceptions when the generator polynomials fail assertions."""
bs = 10
k = 100
rate = 1/2
n = int(k/rate) # calculate coderate
u = np.zeros([bs, k])
g1 = ['101', '111']
g2 = np.array(g1)
g = [g1, g2]
for gen_poly in g:
enc = ConvEncoder(gen_poly=gen_poly)
c = enc(u).numpy()
self.assertTrue(c.shape[-1]==n)
# also check that all-zero input yields all-zero output
c_hat = np.zeros([bs, n])
self.assertTrue(np.array_equal(c, c_hat))
def util_check_assertion_err(gen_poly_, msg_):
with self.assertRaises(AssertionError) as exception_context:
enc = ConvEncoder(gen_poly=gen_poly_)
self.assertEqual(str(exception_context.exception), msg_)
gs = [
['1001', '111'],
['1001', 111],
('1211', '1101')]
msg_s = [
"Each polynomial must be of same length.",
"Each polynomial must be a string.",
"Each Polynomial must be a string of 0/1 s."
]
for idx, g in enumerate(gs):
util_check_assertion_err(g,msg_s[idx])
def test_keras(self):
"""Test that Keras model can be compiled (+supports dynamic shapes)."""
bs = 10
k = 100
source = BinarySource()
inputs = tf.keras.Input(shape=(k), dtype=tf.float32)
x = ConvEncoder(rate=0.5, constraint_length=4)(inputs)
model = tf.keras.Model(inputs=inputs, outputs=x)
b = source([bs, k])
model(b)
# call twice to see that bs can change
b2 = source([bs+1, k])
model(b2)
model.summary()
source = BinarySource()
enc = ConvEncoder(rate=0.5, constraint_length=8)
u = source([1, 32])
x = enc(u)
print(x.shape)
u = source([2, 30])
x = enc(u)
print(x.shape)
def test_multi_dimensional(self):
"""Test against arbitrary shapes
"""
k = 120
n = 240 # rate must be 1/2 or 1/3
source = BinarySource()
enc = ConvEncoder(rate=k/n, constraint_length=5)
b = source([100, k])
b_res = tf.reshape(b, [4, 5, 5, k])
# encode 2D Tensor
c = enc(b).numpy()
# encode 4D Tensor
c_res = enc(b_res).numpy()
# test that shape was preserved
self.assertTrue(c_res.shape[:-1]==b_res.shape[:-1])
# and reshape to 2D shape
c_res = tf.reshape(c_res, [100,n])
# both version should yield same result
self.assertTrue(np.array_equal(c, c_res))
def test_ref_implementation(self):
r"""Test against pre-encoded codewords from reference implementation.
"""
ref_path = 'codes/conv/'
gs = [
['101', '111'],
['1101', '1111'],
['101', '111', '111'],
['101', '111', '111', '111']]
gen_strs = [
'conv_rate_half_57_',
'conv_rate_half_6474_',
'conv_rate_onethird_577_',
'conv_rate_onefourth_5777_']
rs=[1/2, 1/2, 1/3, 1/4]
mus = [3, 4, 3, 3]
for idx, gen_poly in enumerate(gs):
enc = ConvEncoder(gen_poly=gen_poly)
gen_str = gen_strs[idx]
u = np.load(ref_path + gen_str + 'ref_u.npy')
cref = np.load(ref_path + gen_str + 'ref_x.npy')
c = enc(u).numpy()
self.assertTrue(np.array_equal(c, cref))
if idx in [0, 2]:
enc = ConvEncoder(rate=rs[idx], constraint_length=mus[idx])
c = enc(u).numpy()
self.assertTrue(np.array_equal(c, cref))
def test_batch(self):
"""Test that all samples in batch yield same output (for same input).
"""
bs = 100
k = 120
source = BinarySource()
enc = ConvEncoder(rate=0.5, constraint_length=7)
b = source([1, 15, k])
b_rep = tf.tile(b, [bs, 1, 1])
# and run tf version (to be tested)
c = enc(b_rep).numpy()
for i in range(bs):
self.assertTrue(np.array_equal(c[0,:,:], c[i,:,:]))
def test_dtypes_flexible(self):
"""Test that encoder supports variable dtypes and
yields same result."""
dt_supported = (tf.float16, tf.float32, tf.float64, tf.int8,
tf.int32, tf.int64, tf.uint8, tf.uint16, tf.uint32)
bs = 10
k = 32
source = BinarySource()
enc_ref = ConvEncoder(rate=0.5,
constraint_length=7,
output_dtype=tf.float32)
u = source([bs, k])
c_ref = enc_ref(u)
for dt in dt_supported:
enc = ConvEncoder(rate=0.5,
constraint_length=7,
output_dtype=dt)
u_dt = tf.cast(u, dt)
c = enc(u_dt)
c_32 = tf.cast(c, tf.float32)
self.assertTrue(np.array_equal(c_ref.numpy(), c_32.numpy()))
def test_tf_fun(self):
"""Test that tf.function decorator works and XLA is supported"""
@tf.function
def run_graph(u):
return enc(u)
@tf.function(jit_compile=True)
def run_graph_xla(u):
return enc(u)
bs = 10
k = 100
source = BinarySource()
enc = ConvEncoder(rate=0.5, constraint_length=7)
# test that for arbitrary input only 0,1 values are outputed
u = source([bs, k])
x = run_graph(u).numpy()
# execute the graph twice
x = run_graph(u).numpy()
# and change batch_size
u = source([bs+1, k])
x = run_graph(u).numpy()
#check XLA
x = run_graph_xla(u).numpy()
u = source([bs, k])
x = run_graph_xla(u).numpy()
| 32.397849 | 146 | 0.538666 |
try:
import sionna
except ImportError as e:
import sys
sys.path.append("../")
import unittest
import numpy as np
import tensorflow as tf
gpus = tf.config.list_physical_devices('GPU')
print('Number of GPUs available :', len(gpus))
if gpus:
gpu_num = 0
try:
tf.config.set_visible_devices(gpus[gpu_num], 'GPU')
print('Only GPU number', gpu_num, 'used.')
tf.config.experimental.set_memory_growth(gpus[gpu_num], True)
except RuntimeError as e:
print(e)
from sionna.fec.conv import ConvEncoder
from sionna.utils import BinarySource
class TestConvEncoding(unittest.TestCase):
def test_output_dim(self):
bs = 10
coderates = [1/2, 1/3]
ks = [10, 20, 50, 100]
for rate in coderates:
for k in ks:
n = int(k/rate)
enc = ConvEncoder(rate=rate, constraint_length=5)
u = np.zeros([bs, k])
c = enc(u).numpy()
self.assertTrue(c.shape[-1]==n)
c_hat = np.zeros([bs, n])
self.assertTrue(np.array_equal(c, c_hat))
k = k+1
n = int(k/rate)
u = np.zeros([bs, k])
c = enc(u).numpy()
self.assertTrue(c.shape[-1]==n)
c_hat = np.zeros([bs, n])
self.assertTrue(np.array_equal(c, c_hat))
def test_invalid_inputs(self):
rate_invalid = [0.2, 0.45, 0.01]
rate_valid = [1/3, 1/2]
constraint_length_invalid = [2, 9, 0]
constraint_length_valid = [3, 4, 5, 6, 7, 8]
for rate in rate_valid:
for mu in constraint_length_invalid:
with self.assertRaises(AssertionError):
enc = ConvEncoder(rate=rate, constraint_length=mu)
for rate in rate_invalid:
for mu in constraint_length_valid:
with self.assertRaises(AssertionError):
enc = ConvEncoder(rate=rate, constraint_length= mu)
gmat = [['101', '111', '000'], ['000', '010', '011']]
with self.assertRaises(AssertionError):
enc = ConvEncoder(gen_poly=gmat)
def test_polynomial_input(self):
bs = 10
k = 100
rate = 1/2
n = int(k/rate)
u = np.zeros([bs, k])
g1 = ['101', '111']
g2 = np.array(g1)
g = [g1, g2]
for gen_poly in g:
enc = ConvEncoder(gen_poly=gen_poly)
c = enc(u).numpy()
self.assertTrue(c.shape[-1]==n)
c_hat = np.zeros([bs, n])
self.assertTrue(np.array_equal(c, c_hat))
def util_check_assertion_err(gen_poly_, msg_):
with self.assertRaises(AssertionError) as exception_context:
enc = ConvEncoder(gen_poly=gen_poly_)
self.assertEqual(str(exception_context.exception), msg_)
gs = [
['1001', '111'],
['1001', 111],
('1211', '1101')]
msg_s = [
"Each polynomial must be of same length.",
"Each polynomial must be a string.",
"Each Polynomial must be a string of 0/1 s."
]
for idx, g in enumerate(gs):
util_check_assertion_err(g,msg_s[idx])
def test_keras(self):
bs = 10
k = 100
source = BinarySource()
inputs = tf.keras.Input(shape=(k), dtype=tf.float32)
x = ConvEncoder(rate=0.5, constraint_length=4)(inputs)
model = tf.keras.Model(inputs=inputs, outputs=x)
b = source([bs, k])
model(b)
b2 = source([bs+1, k])
model(b2)
model.summary()
source = BinarySource()
enc = ConvEncoder(rate=0.5, constraint_length=8)
u = source([1, 32])
x = enc(u)
print(x.shape)
u = source([2, 30])
x = enc(u)
print(x.shape)
def test_multi_dimensional(self):
k = 120
n = 240
source = BinarySource()
enc = ConvEncoder(rate=k/n, constraint_length=5)
b = source([100, k])
b_res = tf.reshape(b, [4, 5, 5, k])
c = enc(b).numpy()
c_res = enc(b_res).numpy()
self.assertTrue(c_res.shape[:-1]==b_res.shape[:-1])
c_res = tf.reshape(c_res, [100,n])
self.assertTrue(np.array_equal(c, c_res))
def test_ref_implementation(self):
ref_path = 'codes/conv/'
gs = [
['101', '111'],
['1101', '1111'],
['101', '111', '111'],
['101', '111', '111', '111']]
gen_strs = [
'conv_rate_half_57_',
'conv_rate_half_6474_',
'conv_rate_onethird_577_',
'conv_rate_onefourth_5777_']
rs=[1/2, 1/2, 1/3, 1/4]
mus = [3, 4, 3, 3]
for idx, gen_poly in enumerate(gs):
enc = ConvEncoder(gen_poly=gen_poly)
gen_str = gen_strs[idx]
u = np.load(ref_path + gen_str + 'ref_u.npy')
cref = np.load(ref_path + gen_str + 'ref_x.npy')
c = enc(u).numpy()
self.assertTrue(np.array_equal(c, cref))
if idx in [0, 2]:
enc = ConvEncoder(rate=rs[idx], constraint_length=mus[idx])
c = enc(u).numpy()
self.assertTrue(np.array_equal(c, cref))
def test_batch(self):
bs = 100
k = 120
source = BinarySource()
enc = ConvEncoder(rate=0.5, constraint_length=7)
b = source([1, 15, k])
b_rep = tf.tile(b, [bs, 1, 1])
c = enc(b_rep).numpy()
for i in range(bs):
self.assertTrue(np.array_equal(c[0,:,:], c[i,:,:]))
def test_dtypes_flexible(self):
dt_supported = (tf.float16, tf.float32, tf.float64, tf.int8,
tf.int32, tf.int64, tf.uint8, tf.uint16, tf.uint32)
bs = 10
k = 32
source = BinarySource()
enc_ref = ConvEncoder(rate=0.5,
constraint_length=7,
output_dtype=tf.float32)
u = source([bs, k])
c_ref = enc_ref(u)
for dt in dt_supported:
enc = ConvEncoder(rate=0.5,
constraint_length=7,
output_dtype=dt)
u_dt = tf.cast(u, dt)
c = enc(u_dt)
c_32 = tf.cast(c, tf.float32)
self.assertTrue(np.array_equal(c_ref.numpy(), c_32.numpy()))
def test_tf_fun(self):
@tf.function
def run_graph(u):
return enc(u)
@tf.function(jit_compile=True)
def run_graph_xla(u):
return enc(u)
bs = 10
k = 100
source = BinarySource()
enc = ConvEncoder(rate=0.5, constraint_length=7)
u = source([bs, k])
x = run_graph(u).numpy()
x = run_graph(u).numpy()
u = source([bs+1, k])
x = run_graph(u).numpy()
x = run_graph_xla(u).numpy()
u = source([bs, k])
x = run_graph_xla(u).numpy()
| true | true |
f7f477391091346bfb78da2e8f38930aa03f7abf | 2,715 | py | Python | sky.py | cthulahoops/vrc3d | bc9b2e7c6cd4bfccb90385e8d33d893686bae9cf | [
"MIT"
] | 2 | 2021-06-28T16:04:25.000Z | 2021-06-30T05:03:37.000Z | sky.py | cthulahoops/vrc3d | bc9b2e7c6cd4bfccb90385e8d33d893686bae9cf | [
"MIT"
] | null | null | null | sky.py | cthulahoops/vrc3d | bc9b2e7c6cd4bfccb90385e8d33d893686bae9cf | [
"MIT"
] | 1 | 2021-06-29T16:45:56.000Z | 2021-06-29T16:45:56.000Z | from math import cos, sin, radians
from collections import namedtuple
from skyfield import api
from textures import Texture
from scene import Scene, Quad
from shader import Shader
from vector import Vector
from matrix import Matrix
# Bridge St
LONGITUDE = -73.985
LATITUDE = 39.6913
TS = api.load.timescale()
PLANETS = api.load("de421.bsp")
SUN, EARTH, MOON = PLANETS["sun"], PLANETS["earth"], PLANETS["moon"]
BROOKLYN = EARTH + api.wgs84.latlon(LATITUDE, LONGITUDE)
def to_cartesian(alt, az, _distance=None):
x = sin(az.radians) * cos(alt.radians)
z = -cos(az.radians) * cos(alt.radians)
y = sin(alt.radians)
return Vector(x, y, z)
AltAz = namedtuple("AltAz", ("alt", "az"))
Astronomy = namedtuple("Astronomy", ("sun_altaz", "sun_position", "moon_matrix", "moon_position", "celestial_matrix"))
def astronomy(utctime):
t = TS.from_datetime(utctime.replace(tzinfo=api.utc))
observer = BROOKLYN.at(t)
declination_matrix = Matrix.rotate(radians(90 - LATITUDE), Vector(1.0, 0.0, 0.0))
rotation_matrix = Matrix.rotate(radians(LONGITUDE + (360 * t.gmst / 24)), Vector(0.0, 1.0, 0.0))
celestrial_matrix = declination_matrix @ rotation_matrix
(moon_alt, moon_az, _) = observer.observe(MOON).apparent().altaz()
moon_matrix = Matrix.rotate_2d(moon_az.radians, moon_alt.radians)
moon_position = to_cartesian(moon_alt, moon_az)
(sun_alt, sun_az, _) = observer.observe(SUN).apparent().altaz()
sun_altaz = AltAz(sun_alt, sun_az)
sun_position = to_cartesian(sun_alt, sun_az)
return Astronomy(sun_altaz, sun_position, moon_matrix, moon_position, celestrial_matrix)
class Sky:
def __init__(self, show_grid, show_atmosphere):
self.starmap_texture = Texture("starmap")
self.moon_texture = Texture("moon")
self.show_grid = show_grid
self.show_atmosphere = show_atmosphere
self.shader = Shader("sky")
self.scene = Scene(max_vertices=13)
self.scene.add_entity(1, Quad())
def draw(self, camera, astro):
self.shader.use()
self.shader["rotation_matrix"] = camera.rotate
self.shader["projection_matrix"] = camera.project
self.starmap_texture.activate(0)
self.shader["stars_array_sampler"] = 0
self.moon_texture.activate(1)
self.shader["moon_array_sampler"] = 1
self.shader["celestial_matrix"] = astro.celestial_matrix
self.shader["sun_position"] = astro.sun_position
self.shader["moon_position"] = astro.moon_position
self.shader["moon_matrix"] = astro.moon_matrix
self.shader["show_grid"] = self.show_grid
self.shader["show_atmosphere"] = self.show_atmosphere
self.scene.draw()
| 33.109756 | 118 | 0.692818 | from math import cos, sin, radians
from collections import namedtuple
from skyfield import api
from textures import Texture
from scene import Scene, Quad
from shader import Shader
from vector import Vector
from matrix import Matrix
LONGITUDE = -73.985
LATITUDE = 39.6913
TS = api.load.timescale()
PLANETS = api.load("de421.bsp")
SUN, EARTH, MOON = PLANETS["sun"], PLANETS["earth"], PLANETS["moon"]
BROOKLYN = EARTH + api.wgs84.latlon(LATITUDE, LONGITUDE)
def to_cartesian(alt, az, _distance=None):
x = sin(az.radians) * cos(alt.radians)
z = -cos(az.radians) * cos(alt.radians)
y = sin(alt.radians)
return Vector(x, y, z)
AltAz = namedtuple("AltAz", ("alt", "az"))
Astronomy = namedtuple("Astronomy", ("sun_altaz", "sun_position", "moon_matrix", "moon_position", "celestial_matrix"))
def astronomy(utctime):
t = TS.from_datetime(utctime.replace(tzinfo=api.utc))
observer = BROOKLYN.at(t)
declination_matrix = Matrix.rotate(radians(90 - LATITUDE), Vector(1.0, 0.0, 0.0))
rotation_matrix = Matrix.rotate(radians(LONGITUDE + (360 * t.gmst / 24)), Vector(0.0, 1.0, 0.0))
celestrial_matrix = declination_matrix @ rotation_matrix
(moon_alt, moon_az, _) = observer.observe(MOON).apparent().altaz()
moon_matrix = Matrix.rotate_2d(moon_az.radians, moon_alt.radians)
moon_position = to_cartesian(moon_alt, moon_az)
(sun_alt, sun_az, _) = observer.observe(SUN).apparent().altaz()
sun_altaz = AltAz(sun_alt, sun_az)
sun_position = to_cartesian(sun_alt, sun_az)
return Astronomy(sun_altaz, sun_position, moon_matrix, moon_position, celestrial_matrix)
class Sky:
def __init__(self, show_grid, show_atmosphere):
self.starmap_texture = Texture("starmap")
self.moon_texture = Texture("moon")
self.show_grid = show_grid
self.show_atmosphere = show_atmosphere
self.shader = Shader("sky")
self.scene = Scene(max_vertices=13)
self.scene.add_entity(1, Quad())
def draw(self, camera, astro):
self.shader.use()
self.shader["rotation_matrix"] = camera.rotate
self.shader["projection_matrix"] = camera.project
self.starmap_texture.activate(0)
self.shader["stars_array_sampler"] = 0
self.moon_texture.activate(1)
self.shader["moon_array_sampler"] = 1
self.shader["celestial_matrix"] = astro.celestial_matrix
self.shader["sun_position"] = astro.sun_position
self.shader["moon_position"] = astro.moon_position
self.shader["moon_matrix"] = astro.moon_matrix
self.shader["show_grid"] = self.show_grid
self.shader["show_atmosphere"] = self.show_atmosphere
self.scene.draw()
| true | true |
f7f47929860f53cf30c2375dc6743b8f548a3e00 | 663 | py | Python | catalog/migrations/0002_bookinstance_borrower.py | vlms/django_local_library | 755775f8bfafa08440453247d21a42ec72c48eab | [
"MIT"
] | null | null | null | catalog/migrations/0002_bookinstance_borrower.py | vlms/django_local_library | 755775f8bfafa08440453247d21a42ec72c48eab | [
"MIT"
] | null | null | null | catalog/migrations/0002_bookinstance_borrower.py | vlms/django_local_library | 755775f8bfafa08440453247d21a42ec72c48eab | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-11-19 14:31
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('catalog', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='bookinstance',
name='borrower',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL),
),
]
| 27.625 | 134 | 0.68175 |
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('catalog', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='bookinstance',
name='borrower',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL),
),
]
| true | true |
f7f47991f4fa640cc0eca0031ccfa55625f84abc | 4,510 | py | Python | example/ner/few-shot/run.py | hphphp123321/DeepKE | 94b39a20db0d848ccea81ea56fef4587ac31e2bc | [
"MIT"
] | 1 | 2021-11-10T07:57:11.000Z | 2021-11-10T07:57:11.000Z | example/ner/few-shot/run.py | 807953261/DeepKE | f7efd3fc87d3bf88783a41efc3c09dca7a986013 | [
"MIT"
] | 1 | 2021-11-05T04:25:25.000Z | 2021-11-05T04:25:25.000Z | example/ner/few-shot/run.py | 807953261/DeepKE | f7efd3fc87d3bf88783a41efc3c09dca7a986013 | [
"MIT"
] | null | null | null | import os
import hydra
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]='1'
import logging
import sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "../")))
from hydra import utils
from torch.utils.data import DataLoader
from deepke.name_entity_re.few_shot.models.model import PromptBartModel, PromptGeneratorModel
from deepke.name_entity_re.few_shot.module.datasets import ConllNERProcessor, ConllNERDataset
from deepke.name_entity_re.few_shot.module.train import Trainer
from deepke.name_entity_re.few_shot.module.metrics import Seq2SeqSpanMetric
from deepke.name_entity_re.few_shot.utils.util import get_loss, set_seed
from deepke.name_entity_re.few_shot.module.mapping_type import mit_movie_mapping, mit_restaurant_mapping, atis_mapping
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
from tensorboardX import SummaryWriter
writer = SummaryWriter(log_dir='logs')
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
DATASET_CLASS = {
'conll2003': ConllNERDataset,
'mit-movie': ConllNERDataset,
'mit-restaurant': ConllNERDataset,
'atis': ConllNERDataset
}
DATA_PROCESS = {
'conll2003': ConllNERProcessor,
'mit-movie': ConllNERProcessor,
'mit-restaurant': ConllNERProcessor,
'atis': ConllNERProcessor
}
DATA_PATH = {
'conll2003': {'train': 'data/conll2003/train.txt',
'dev': 'data/conll2003/dev.txt',
'test': 'data/conll2003/test.txt'},
'mit-movie': {'train': 'data/mit-movie/20-shot-train.txt',
'dev': 'data/mit-movie/test.txt'},
'mit-restaurant': {'train': 'data/mit-restaurant/10-shot-train.txt',
'dev': 'data/mit-restaurant/test.txt'},
'atis': {'train': 'data/atis/20-shot-train.txt',
'dev': 'data/atis/test.txt'}
}
MAPPING = {
'conll2003': {'loc': '<<location>>',
'per': '<<person>>',
'org': '<<organization>>',
'misc': '<<others>>'},
'mit-movie': mit_movie_mapping,
'mit-restaurant': mit_restaurant_mapping,
'atis': atis_mapping
}
@hydra.main(config_path="conf/config.yaml")
def main(cfg):
cwd = utils.get_original_cwd()
cfg.cwd = cwd
print(cfg)
data_path = DATA_PATH[cfg.dataset_name]
for mode, path in data_path.items():
data_path[mode] = os.path.join(cfg.cwd, path)
dataset_class, data_process = DATASET_CLASS[cfg.dataset_name], DATA_PROCESS[cfg.dataset_name]
mapping = MAPPING[cfg.dataset_name]
set_seed(cfg.seed) # set seed, default is 1
if cfg.save_path is not None: # make save_path dir
cfg.save_path = os.path.join(cfg.save_path, cfg.dataset_name+"_"+str(cfg.batch_size)+"_"+str(cfg.learning_rate)+cfg.notes)
if not os.path.exists(cfg.save_path):
os.makedirs(cfg.save_path, exist_ok=True)
process = data_process(data_path=data_path, mapping=mapping, bart_name=cfg.bart_name, learn_weights=cfg.learn_weights)
train_dataset = dataset_class(data_processor=process, mode='train')
train_dataloader = DataLoader(train_dataset, collate_fn=train_dataset.collate_fn, batch_size=cfg.batch_size, num_workers=4)
dev_dataset = dataset_class(data_processor=process, mode='dev')
dev_dataloader = DataLoader(dev_dataset, collate_fn=dev_dataset.collate_fn, batch_size=cfg.batch_size, num_workers=4)
label_ids = list(process.mapping2id.values())
prompt_model = PromptBartModel(tokenizer=process.tokenizer, label_ids=label_ids, args=cfg)
model = PromptGeneratorModel(prompt_model=prompt_model, bos_token_id=0,
eos_token_id=1,
max_length=cfg.tgt_max_len, max_len_a=cfg.src_seq_ratio,num_beams=cfg.num_beams, do_sample=False,
repetition_penalty=1, length_penalty=cfg.length_penalty, pad_token_id=1,
restricter=None)
metrics = Seq2SeqSpanMetric(eos_token_id=1, num_labels=len(label_ids), target_type='word')
loss = get_loss
trainer = Trainer(train_data=train_dataloader, dev_data=dev_dataloader, test_data=None, model=model, args=cfg, logger=logger, loss=loss,
metrics=metrics, writer=writer)
trainer.train()
writer.close()
if __name__ == "__main__":
main()
| 40.630631 | 140 | 0.688248 | import os
import hydra
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]='1'
import logging
import sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "../")))
from hydra import utils
from torch.utils.data import DataLoader
from deepke.name_entity_re.few_shot.models.model import PromptBartModel, PromptGeneratorModel
from deepke.name_entity_re.few_shot.module.datasets import ConllNERProcessor, ConllNERDataset
from deepke.name_entity_re.few_shot.module.train import Trainer
from deepke.name_entity_re.few_shot.module.metrics import Seq2SeqSpanMetric
from deepke.name_entity_re.few_shot.utils.util import get_loss, set_seed
from deepke.name_entity_re.few_shot.module.mapping_type import mit_movie_mapping, mit_restaurant_mapping, atis_mapping
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
from tensorboardX import SummaryWriter
writer = SummaryWriter(log_dir='logs')
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
DATASET_CLASS = {
'conll2003': ConllNERDataset,
'mit-movie': ConllNERDataset,
'mit-restaurant': ConllNERDataset,
'atis': ConllNERDataset
}
DATA_PROCESS = {
'conll2003': ConllNERProcessor,
'mit-movie': ConllNERProcessor,
'mit-restaurant': ConllNERProcessor,
'atis': ConllNERProcessor
}
DATA_PATH = {
'conll2003': {'train': 'data/conll2003/train.txt',
'dev': 'data/conll2003/dev.txt',
'test': 'data/conll2003/test.txt'},
'mit-movie': {'train': 'data/mit-movie/20-shot-train.txt',
'dev': 'data/mit-movie/test.txt'},
'mit-restaurant': {'train': 'data/mit-restaurant/10-shot-train.txt',
'dev': 'data/mit-restaurant/test.txt'},
'atis': {'train': 'data/atis/20-shot-train.txt',
'dev': 'data/atis/test.txt'}
}
MAPPING = {
'conll2003': {'loc': '<<location>>',
'per': '<<person>>',
'org': '<<organization>>',
'misc': '<<others>>'},
'mit-movie': mit_movie_mapping,
'mit-restaurant': mit_restaurant_mapping,
'atis': atis_mapping
}
@hydra.main(config_path="conf/config.yaml")
def main(cfg):
cwd = utils.get_original_cwd()
cfg.cwd = cwd
print(cfg)
data_path = DATA_PATH[cfg.dataset_name]
for mode, path in data_path.items():
data_path[mode] = os.path.join(cfg.cwd, path)
dataset_class, data_process = DATASET_CLASS[cfg.dataset_name], DATA_PROCESS[cfg.dataset_name]
mapping = MAPPING[cfg.dataset_name]
set_seed(cfg.seed)
if cfg.save_path is not None:
cfg.save_path = os.path.join(cfg.save_path, cfg.dataset_name+"_"+str(cfg.batch_size)+"_"+str(cfg.learning_rate)+cfg.notes)
if not os.path.exists(cfg.save_path):
os.makedirs(cfg.save_path, exist_ok=True)
process = data_process(data_path=data_path, mapping=mapping, bart_name=cfg.bart_name, learn_weights=cfg.learn_weights)
train_dataset = dataset_class(data_processor=process, mode='train')
train_dataloader = DataLoader(train_dataset, collate_fn=train_dataset.collate_fn, batch_size=cfg.batch_size, num_workers=4)
dev_dataset = dataset_class(data_processor=process, mode='dev')
dev_dataloader = DataLoader(dev_dataset, collate_fn=dev_dataset.collate_fn, batch_size=cfg.batch_size, num_workers=4)
label_ids = list(process.mapping2id.values())
prompt_model = PromptBartModel(tokenizer=process.tokenizer, label_ids=label_ids, args=cfg)
model = PromptGeneratorModel(prompt_model=prompt_model, bos_token_id=0,
eos_token_id=1,
max_length=cfg.tgt_max_len, max_len_a=cfg.src_seq_ratio,num_beams=cfg.num_beams, do_sample=False,
repetition_penalty=1, length_penalty=cfg.length_penalty, pad_token_id=1,
restricter=None)
metrics = Seq2SeqSpanMetric(eos_token_id=1, num_labels=len(label_ids), target_type='word')
loss = get_loss
trainer = Trainer(train_data=train_dataloader, dev_data=dev_dataloader, test_data=None, model=model, args=cfg, logger=logger, loss=loss,
metrics=metrics, writer=writer)
trainer.train()
writer.close()
if __name__ == "__main__":
main()
| true | true |
f7f479aad3d4b6a023500f79e3d8289e8e4cd552 | 1,910 | py | Python | code/model_zoo/basset.py | p-koo/exponential_activations | 7e48054b64a565364439c45932338a09eb2eb4d3 | [
"MIT"
] | 1 | 2021-09-18T04:09:07.000Z | 2021-09-18T04:09:07.000Z | code/model_zoo/basset.py | koo-lab/exponential_activations | 9032a360c1abb0f07b824e3ce6d20707efe306fd | [
"MIT"
] | null | null | null | code/model_zoo/basset.py | koo-lab/exponential_activations | 9032a360c1abb0f07b824e3ce6d20707efe306fd | [
"MIT"
] | 4 | 2020-08-03T02:08:42.000Z | 2021-10-01T18:46:47.000Z | from tensorflow import keras
from tfomics import layers, utils
def model(activation='relu'):
# input layer
inputs = keras.layers.Input(shape=(600,4))
activation = utils.activation_fn(activation)
# layer 1
nn = layers.conv_layer(inputs,
num_filters=300,
kernel_size=19, # 192
padding='same',
activation=activation,
dropout=0.2,
l2=1e-6,
bn=True)
nn = keras.layers.MaxPool1D(pool_size=3)(nn)
# layer 2
nn = layers.conv_layer(nn,
num_filters=200,
kernel_size=11, # 56
padding='valid',
activation='relu',
dropout=0.2,
l2=1e-6,
bn=True)
nn = keras.layers.MaxPool1D(pool_size=4)(nn)
# layer 3
nn = layers.conv_layer(nn,
num_filters=200,
kernel_size=7, # 56
padding='valid',
activation='relu',
dropout=0.2,
l2=1e-6,
bn=True)
nn = keras.layers.MaxPool1D(pool_size=4)(nn)
# layer 4
nn = keras.layers.Flatten()(nn)
nn = layers.dense_layer(nn, num_units=1000, activation='relu', dropout=0.5, l2=1e-6, bn=True)
# layer 5
nn = layers.dense_layer(nn, num_units=1000, activation='relu', dropout=0.5, l2=1e-6, bn=True)
# Output layer
logits = keras.layers.Dense(164, activation='linear', use_bias=True)(nn)
outputs = keras.layers.Activation('sigmoid')(logits)
model = keras.Model(inputs=inputs, outputs=outputs)
return model
| 31.833333 | 97 | 0.469634 | from tensorflow import keras
from tfomics import layers, utils
def model(activation='relu'):
inputs = keras.layers.Input(shape=(600,4))
activation = utils.activation_fn(activation)
nn = layers.conv_layer(inputs,
num_filters=300,
kernel_size=19,
padding='same',
activation=activation,
dropout=0.2,
l2=1e-6,
bn=True)
nn = keras.layers.MaxPool1D(pool_size=3)(nn)
nn = layers.conv_layer(nn,
num_filters=200,
kernel_size=11,
padding='valid',
activation='relu',
dropout=0.2,
l2=1e-6,
bn=True)
nn = keras.layers.MaxPool1D(pool_size=4)(nn)
nn = layers.conv_layer(nn,
num_filters=200,
kernel_size=7,
padding='valid',
activation='relu',
dropout=0.2,
l2=1e-6,
bn=True)
nn = keras.layers.MaxPool1D(pool_size=4)(nn)
nn = keras.layers.Flatten()(nn)
nn = layers.dense_layer(nn, num_units=1000, activation='relu', dropout=0.5, l2=1e-6, bn=True)
nn = layers.dense_layer(nn, num_units=1000, activation='relu', dropout=0.5, l2=1e-6, bn=True)
logits = keras.layers.Dense(164, activation='linear', use_bias=True)(nn)
outputs = keras.layers.Activation('sigmoid')(logits)
model = keras.Model(inputs=inputs, outputs=outputs)
return model
| true | true |
f7f47a92fd7c1d74ca751cff56758e0579e38e65 | 825 | py | Python | agroop/agroop.py | arcsecond-io/agroop | 8c0db4ffa37d9f45985394e52bb5f06eb4bebe6a | [
"MIT"
] | null | null | null | agroop/agroop.py | arcsecond-io/agroop | 8c0db4ffa37d9f45985394e52bb5f06eb4bebe6a | [
"MIT"
] | null | null | null | agroop/agroop.py | arcsecond-io/agroop | 8c0db4ffa37d9f45985394e52bb5f06eb4bebe6a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import click
import json
from astropy.coordinates import SkyCoord
from agroop.options import State
ECHO_PREFIX = u' • '
__all__ = [""]
def parse_coord_string(s, state):
try:
if state.verbose:
click.echo(ECHO_PREFIX + 'Parsing REF coordinates input "{}"...'.format(s))
return SkyCoord(s.replace(',', ' '), frame='icrs', unit='deg')
except ValueError:
if state.verbose:
click.echo(ECHO_PREFIX + 'REF coordinates can be parsed. Looking for an object with name "{}"...'.format(s))
from arcsecond import ArcsecondAPI
obj = ArcsecondAPI(ArcsecondAPI.ENDPOINT_OBJECTS).read(s)
coords = obj.get('ICRS_coordinates')
return SkyCoord(coords.get('right_ascension'), coords.get('declination'), frame='icrs', unit='deg')
| 31.730769 | 120 | 0.652121 |
import click
import json
from astropy.coordinates import SkyCoord
from agroop.options import State
ECHO_PREFIX = u' • '
__all__ = [""]
def parse_coord_string(s, state):
try:
if state.verbose:
click.echo(ECHO_PREFIX + 'Parsing REF coordinates input "{}"...'.format(s))
return SkyCoord(s.replace(',', ' '), frame='icrs', unit='deg')
except ValueError:
if state.verbose:
click.echo(ECHO_PREFIX + 'REF coordinates can be parsed. Looking for an object with name "{}"...'.format(s))
from arcsecond import ArcsecondAPI
obj = ArcsecondAPI(ArcsecondAPI.ENDPOINT_OBJECTS).read(s)
coords = obj.get('ICRS_coordinates')
return SkyCoord(coords.get('right_ascension'), coords.get('declination'), frame='icrs', unit='deg')
| true | true |
f7f47ad8f1967d539e5880a2674e047fef43b42d | 132,890 | py | Python | zerver/tests/test_markdown.py | usnp/zulip | 594f2d4086f2d50d7caffc9c9693cc0ac920e047 | [
"Apache-2.0"
] | null | null | null | zerver/tests/test_markdown.py | usnp/zulip | 594f2d4086f2d50d7caffc9c9693cc0ac920e047 | [
"Apache-2.0"
] | null | null | null | zerver/tests/test_markdown.py | usnp/zulip | 594f2d4086f2d50d7caffc9c9693cc0ac920e047 | [
"Apache-2.0"
] | null | null | null | import copy
import os
import re
from textwrap import dedent
from typing import Any, Dict, List, Optional, Set, Tuple, cast
from unittest import mock
import orjson
from django.conf import settings
from django.test import override_settings
from markdown import Markdown
from zerver.lib.actions import (
change_user_is_active,
do_add_alert_words,
do_change_user_setting,
do_create_realm,
do_remove_realm_emoji,
do_set_realm_property,
)
from zerver.lib.alert_words import get_alert_word_automaton
from zerver.lib.camo import get_camo_url
from zerver.lib.create_user import create_user
from zerver.lib.emoji import get_emoji_url
from zerver.lib.exceptions import JsonableError, MarkdownRenderingException
from zerver.lib.markdown import (
MarkdownListPreprocessor,
MessageRenderingResult,
clear_state_for_testing,
content_has_emoji_syntax,
fetch_tweet_data,
get_tweet_id,
image_preview_enabled,
markdown_convert,
maybe_update_markdown_engines,
possible_linked_stream_names,
topic_links,
url_embed_preview_enabled,
url_to_a,
)
from zerver.lib.markdown.fenced_code import FencedBlockPreprocessor
from zerver.lib.mdiff import diff_strings
from zerver.lib.mention import (
MentionData,
get_possible_mentions_info,
possible_mentions,
possible_user_group_mentions,
)
from zerver.lib.message import render_markdown
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.tex import render_tex
from zerver.lib.user_groups import create_user_group
from zerver.models import (
Message,
RealmEmoji,
RealmFilter,
Stream,
UserGroup,
UserMessage,
UserProfile,
flush_linkifiers,
flush_per_request_caches,
get_client,
get_realm,
get_stream,
linkifiers_for_realm,
realm_in_local_linkifiers_cache,
)
class SimulatedFencedBlockPreprocessor(FencedBlockPreprocessor):
# Simulate code formatting.
def format_code(self, lang: Optional[str], code: str) -> str:
return (lang or "") + ":" + code
def placeholder(self, s: str) -> str:
return "**" + s.strip("\n") + "**"
class FencedBlockPreprocessorTest(ZulipTestCase):
def test_simple_quoting(self) -> None:
processor = FencedBlockPreprocessor(Markdown())
markdown_input = [
"~~~ quote",
"hi",
"bye",
"",
"",
]
expected = [
"",
"> hi",
"> bye",
"> ",
"> ",
"",
"",
]
lines = processor.run(markdown_input)
self.assertEqual(lines, expected)
def test_serial_quoting(self) -> None:
processor = FencedBlockPreprocessor(Markdown())
markdown_input = [
"~~~ quote",
"hi",
"~~~",
"",
"~~~ quote",
"bye",
"",
"",
]
expected = [
"",
"> hi",
"",
"",
"",
"> bye",
"> ",
"> ",
"",
"",
]
lines = processor.run(markdown_input)
self.assertEqual(lines, expected)
def test_serial_code(self) -> None:
processor = SimulatedFencedBlockPreprocessor(Markdown())
markdown_input = [
"``` .py",
"hello()",
"```",
"",
"```vb.net",
"goodbye()",
"```",
"",
"```c#",
"weirdchar()",
"```",
"",
"```",
"no-highlight()",
"```",
"",
]
expected = [
"",
"**py:hello()**",
"",
"",
"",
"**vb.net:goodbye()**",
"",
"",
"",
"**c#:weirdchar()**",
"",
"",
"",
"**:no-highlight()**",
"",
"",
]
lines = processor.run(markdown_input)
self.assertEqual(lines, expected)
def test_nested_code(self) -> None:
processor = SimulatedFencedBlockPreprocessor(Markdown())
markdown_input = [
"~~~ quote",
"hi",
"``` .py",
"hello()",
"```",
"",
"",
]
expected = [
"",
"> hi",
"> ",
"> **py:hello()**",
"> ",
"> ",
"> ",
"",
"",
]
lines = processor.run(markdown_input)
self.assertEqual(lines, expected)
def markdown_convert_wrapper(content: str) -> str:
return markdown_convert(
content=content,
message_realm=get_realm("zulip"),
).rendered_content
class MarkdownMiscTest(ZulipTestCase):
def test_diffs_work_as_expected(self) -> None:
str1 = "<p>The quick brown fox jumps over the lazy dog. Animal stories are fun, yeah</p>"
str2 = "<p>The fast fox jumps over the lazy dogs and cats. Animal stories are fun</p>"
expected_diff = "\u001b[34m-\u001b[0m <p>The \u001b[33mquick brown\u001b[0m fox jumps over the lazy dog. Animal stories are fun\u001b[31m, yeah\u001b[0m</p>\n\u001b[34m+\u001b[0m <p>The \u001b[33mfast\u001b[0m fox jumps over the lazy dog\u001b[32ms and cats\u001b[0m. Animal stories are fun</p>\n"
self.assertEqual(diff_strings(str1, str2), expected_diff)
def test_get_possible_mentions_info(self) -> None:
realm = get_realm("zulip")
def make_user(email: str, full_name: str) -> UserProfile:
return create_user(
email=email,
password="whatever",
realm=realm,
full_name=full_name,
)
fred1 = make_user("fred1@example.com", "Fred Flintstone")
change_user_is_active(fred1, False)
fred2 = make_user("fred2@example.com", "Fred Flintstone")
fred3 = make_user("fred3@example.com", "Fred Flintstone")
change_user_is_active(fred3, False)
fred4 = make_user("fred4@example.com", "Fred Flintstone")
lst = get_possible_mentions_info(
realm.id, {"Fred Flintstone", "Cordelia, LEAR's daughter", "Not A User"}
)
set_of_names = set(map(lambda x: x["full_name"].lower(), lst))
self.assertEqual(set_of_names, {"fred flintstone", "cordelia, lear's daughter"})
by_id = {row["id"]: row for row in lst}
self.assertEqual(
by_id.get(fred2.id),
dict(
email=fred2.email,
full_name="Fred Flintstone",
id=fred2.id,
),
)
self.assertEqual(
by_id.get(fred4.id),
dict(
email=fred4.email,
full_name="Fred Flintstone",
id=fred4.id,
),
)
def test_mention_data(self) -> None:
realm = get_realm("zulip")
hamlet = self.example_user("hamlet")
cordelia = self.example_user("cordelia")
content = "@**King Hamlet** @**Cordelia, lear's daughter**"
mention_data = MentionData(realm.id, content)
self.assertEqual(mention_data.get_user_ids(), {hamlet.id, cordelia.id})
self.assertEqual(
mention_data.get_user_by_id(hamlet.id),
dict(
email=hamlet.email,
full_name=hamlet.full_name,
id=hamlet.id,
),
)
user = mention_data.get_user_by_name("king hamLET")
assert user is not None
self.assertEqual(user["email"], hamlet.email)
self.assertFalse(mention_data.message_has_wildcards())
content = "@**King Hamlet** @**Cordelia, lear's daughter** @**all**"
mention_data = MentionData(realm.id, content)
self.assertTrue(mention_data.message_has_wildcards())
def test_invalid_katex_path(self) -> None:
with self.settings(DEPLOY_ROOT="/nonexistent"):
with self.assertLogs(level="ERROR") as m:
render_tex("random text")
self.assertEqual(m.output, ["ERROR:root:Cannot find KaTeX for latex rendering!"])
class MarkdownListPreprocessorTest(ZulipTestCase):
# We test that the preprocessor inserts blank lines at correct places.
# We use <> to indicate that we need to insert a blank line here.
def split_message(self, msg: str) -> Tuple[List[str], List[str]]:
original = msg.replace("<>", "").split("\n")
expected = re.split(r"\n|<>", msg)
return original, expected
def test_basic_list(self) -> None:
preprocessor = MarkdownListPreprocessor()
original, expected = self.split_message("List without a gap\n<>* One\n* Two")
self.assertEqual(preprocessor.run(original), expected)
def test_list_after_quotes(self) -> None:
preprocessor = MarkdownListPreprocessor()
original, expected = self.split_message(
"```quote\nSomething\n```\n\nList without a gap\n<>* One\n* Two"
)
self.assertEqual(preprocessor.run(original), expected)
def test_list_in_code(self) -> None:
preprocessor = MarkdownListPreprocessor()
original, expected = self.split_message("```\nList without a gap\n* One\n* Two\n```")
self.assertEqual(preprocessor.run(original), expected)
def test_complex_nesting_with_different_fences(self) -> None:
preprocessor = MarkdownListPreprocessor()
msg = """```quote
In quote. We should convert a list here:<>
* one
* two
~~~
This is a nested code fence, do not make changes here:
* one
* two
````quote
Quote in code fence. Should not convert:
* one
* two
````
~~~
Back in the quote. We should convert:<>
* one
* two
```
Outside. Should convert:<>
* one
* two
"""
original, expected = self.split_message(msg)
self.assertEqual(preprocessor.run(original), expected)
def test_complex_nesting_with_same_fence(self) -> None:
preprocessor = MarkdownListPreprocessor()
msg = """```quote
In quote. We should convert a list here:<>
* one
* two
```python
This is a nested code fence, do not make changes here:
* one
* two
```quote
Quote in code fence. Should not convert:
* one
* two
```
```
Back in the quote. We should convert:<>
* one
* two
```
Outside. Should convert:<>
* one
* two
"""
original, expected = self.split_message(msg)
self.assertEqual(preprocessor.run(original), expected)
class MarkdownTest(ZulipTestCase):
def setUp(self) -> None:
super().setUp()
clear_state_for_testing()
def assertEqual(self, first: Any, second: Any, msg: str = "") -> None:
if isinstance(first, str) and isinstance(second, str):
if first != second:
raise AssertionError(
"Actual and expected outputs do not match; showing diff.\n"
+ diff_strings(first, second)
+ msg
)
else:
super().assertEqual(first, second)
def load_markdown_tests(self) -> Tuple[Dict[str, Any], List[List[str]]]:
test_fixtures = {}
with open(
os.path.join(os.path.dirname(__file__), "fixtures/markdown_test_cases.json"), "rb"
) as f:
data = orjson.loads(f.read())
for test in data["regular_tests"]:
test_fixtures[test["name"]] = test
return test_fixtures, data["linkify_tests"]
def test_markdown_no_ignores(self) -> None:
# We do not want any ignored tests to be committed and merged.
format_tests, linkify_tests = self.load_markdown_tests()
for name, test in format_tests.items():
message = f'Test "{name}" shouldn\'t be ignored.'
is_ignored = test.get("ignore", False)
self.assertFalse(is_ignored, message)
def test_markdown_fixtures(self) -> None:
format_tests, linkify_tests = self.load_markdown_tests()
valid_keys = {
"name",
"input",
"expected_output",
"backend_only_rendering",
"marked_expected_output",
"text_content",
"translate_emoticons",
"ignore",
}
for name, test in format_tests.items():
with self.subTest(markdown_test_case=name):
# Check that there aren't any unexpected keys as those are often typos
self.assert_length(set(test.keys()) - valid_keys, 0)
# Ignore tests if specified
if test.get("ignore", False):
continue # nocoverage
if test.get("translate_emoticons", False):
# Create a userprofile and send message with it.
user_profile = self.example_user("othello")
do_change_user_setting(user_profile, "translate_emoticons", True)
msg = Message(sender=user_profile, sending_client=get_client("test"))
rendering_result = render_markdown(msg, test["input"])
converted = rendering_result.rendered_content
else:
converted = markdown_convert_wrapper(test["input"])
self.assertEqual(converted, test["expected_output"])
def replaced(payload: str, url: str, phrase: str = "") -> str:
if url[:4] == "http":
href = url
elif "@" in url:
href = "mailto:" + url
else:
href = "http://" + url
return payload % (f'<a href="{href}">{url}</a>',)
with mock.patch(
"zerver.lib.url_preview.preview.link_embed_data_from_cache", return_value=None
):
for inline_url, reference, url in linkify_tests:
try:
match = replaced(reference, url, phrase=inline_url)
except TypeError:
match = reference
converted = markdown_convert_wrapper(inline_url)
self.assertEqual(match, converted)
def test_inline_file(self) -> None:
msg = "Check out this file file:///Volumes/myserver/Users/Shared/pi.py"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
'<p>Check out this file <a href="file:///Volumes/myserver/Users/Shared/pi.py">file:///Volumes/myserver/Users/Shared/pi.py</a></p>',
)
clear_state_for_testing()
with self.settings(ENABLE_FILE_LINKS=False):
realm = do_create_realm(string_id="file_links_test", name="file_links_test")
maybe_update_markdown_engines(realm.id, False)
self.assertEqual(
markdown_convert(msg, message_realm=realm).rendered_content,
"<p>Check out this file file:///Volumes/myserver/Users/Shared/pi.py</p>",
)
def test_inline_bitcoin(self) -> None:
msg = "To bitcoin:1A1zP1eP5QGefi2DMPTfTL5SLmv7DivfNa or not to bitcoin"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
'<p>To <a href="bitcoin:1A1zP1eP5QGefi2DMPTfTL5SLmv7DivfNa">bitcoin:1A1zP1eP5QGefi2DMPTfTL5SLmv7DivfNa</a> or not to bitcoin</p>',
)
def test_inline_youtube(self) -> None:
msg = "Check out the debate: http://www.youtube.com/watch?v=hx1mjT73xYE"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
f"""<p>Check out the debate: <a href="http://www.youtube.com/watch?v=hx1mjT73xYE">http://www.youtube.com/watch?v=hx1mjT73xYE</a></p>\n<div class="youtube-video message_inline_image"><a data-id="hx1mjT73xYE" href="http://www.youtube.com/watch?v=hx1mjT73xYE"><img src="{get_camo_url("https://i.ytimg.com/vi/hx1mjT73xYE/default.jpg")}"></a></div>""",
)
msg = "http://www.youtube.com/watch?v=hx1mjT73xYE"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
f"""<p><a href="http://www.youtube.com/watch?v=hx1mjT73xYE">http://www.youtube.com/watch?v=hx1mjT73xYE</a></p>\n<div class="youtube-video message_inline_image"><a data-id="hx1mjT73xYE" href="http://www.youtube.com/watch?v=hx1mjT73xYE"><img src="{get_camo_url("https://i.ytimg.com/vi/hx1mjT73xYE/default.jpg")}"></a></div>""",
)
msg = "https://youtu.be/hx1mjT73xYE"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
f"""<p><a href="https://youtu.be/hx1mjT73xYE">https://youtu.be/hx1mjT73xYE</a></p>\n<div class="youtube-video message_inline_image"><a data-id="hx1mjT73xYE" href="https://youtu.be/hx1mjT73xYE"><img src="{get_camo_url("https://i.ytimg.com/vi/hx1mjT73xYE/default.jpg")}"></a></div>""",
)
msg = "https://www.youtube.com/playlist?list=PL8dPuuaLjXtNlUrzyH5r6jN9ulIgZBpdo"
not_converted = markdown_convert_wrapper(msg)
self.assertEqual(
not_converted,
'<p><a href="https://www.youtube.com/playlist?list=PL8dPuuaLjXtNlUrzyH5r6jN9ulIgZBpdo">https://www.youtube.com/playlist?list=PL8dPuuaLjXtNlUrzyH5r6jN9ulIgZBpdo</a></p>',
)
msg = (
"https://www.youtube.com/playlist?v=O5nskjZ_GoI&list=PL8dPuuaLjXtNlUrzyH5r6jN9ulIgZBpdo"
)
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
f"""<p><a href="https://www.youtube.com/playlist?v=O5nskjZ_GoI&list=PL8dPuuaLjXtNlUrzyH5r6jN9ulIgZBpdo">https://www.youtube.com/playlist?v=O5nskjZ_GoI&list=PL8dPuuaLjXtNlUrzyH5r6jN9ulIgZBpdo</a></p>\n<div class="youtube-video message_inline_image"><a data-id="O5nskjZ_GoI" href="https://www.youtube.com/playlist?v=O5nskjZ_GoI&list=PL8dPuuaLjXtNlUrzyH5r6jN9ulIgZBpdo"><img src="{get_camo_url("https://i.ytimg.com/vi/O5nskjZ_GoI/default.jpg")}"></a></div>""",
)
msg = "http://www.youtube.com/watch_videos?video_ids=nOJgD4fcZhI,i96UO8-GFvw"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
f"""<p><a href="http://www.youtube.com/watch_videos?video_ids=nOJgD4fcZhI,i96UO8-GFvw">http://www.youtube.com/watch_videos?video_ids=nOJgD4fcZhI,i96UO8-GFvw</a></p>\n<div class="youtube-video message_inline_image"><a data-id="nOJgD4fcZhI" href="http://www.youtube.com/watch_videos?video_ids=nOJgD4fcZhI,i96UO8-GFvw"><img src="{get_camo_url("https://i.ytimg.com/vi/nOJgD4fcZhI/default.jpg")}"></a></div>""",
)
@override_settings(INLINE_URL_EMBED_PREVIEW=False)
def test_inline_vimeo(self) -> None:
msg = "Check out the debate: https://vimeo.com/246979354"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
'<p>Check out the debate: <a href="https://vimeo.com/246979354">https://vimeo.com/246979354</a></p>',
)
msg = "https://vimeo.com/246979354"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
'<p><a href="https://vimeo.com/246979354">https://vimeo.com/246979354</a></p>',
)
@override_settings(INLINE_IMAGE_PREVIEW=True)
def test_inline_image_thumbnail_url(self) -> None:
realm = get_realm("zephyr")
msg = "[foobar](/user_uploads/{realm_id}/50/w2G6ok9kr8AMCQCTNAUOFMln/IMG_0677.JPG)"
msg = msg.format(realm_id=realm.id)
thumbnail_img = '<img data-src-fullsize="/thumbnail?url=user_uploads%2F{realm_id}%2F50%2Fw2G6ok9kr8AMCQCTNAUOFMln%2FIMG_0677.JPG&size=full" src="/thumbnail?url=user_uploads%2F{realm_id}%2F50%2Fw2G6ok9kr8AMCQCTNAUOFMln%2FIMG_0677.JPG&size=thumbnail"><'
thumbnail_img = thumbnail_img.format(realm_id=realm.id)
converted = markdown_convert_wrapper(msg)
self.assertIn(thumbnail_img, converted)
msg = "https://www.google.com/images/srpr/logo4w.png"
thumbnail_img = '<img data-src-fullsize="/thumbnail?url=https%3A%2F%2Fwww.google.com%2Fimages%2Fsrpr%2Flogo4w.png&size=full" src="/thumbnail?url=https%3A%2F%2Fwww.google.com%2Fimages%2Fsrpr%2Flogo4w.png&size=thumbnail">'
converted = markdown_convert_wrapper(msg)
self.assertIn(thumbnail_img, converted)
msg = "www.google.com/images/srpr/logo4w.png"
thumbnail_img = '<img data-src-fullsize="/thumbnail?url=http%3A%2F%2Fwww.google.com%2Fimages%2Fsrpr%2Flogo4w.png&size=full" src="/thumbnail?url=http%3A%2F%2Fwww.google.com%2Fimages%2Fsrpr%2Flogo4w.png&size=thumbnail">'
converted = markdown_convert_wrapper(msg)
self.assertIn(thumbnail_img, converted)
msg = "https://www.google.com/images/srpr/logo4w.png"
thumbnail_img = f"""<div class="message_inline_image"><a href="https://www.google.com/images/srpr/logo4w.png"><img src="{get_camo_url("https://www.google.com/images/srpr/logo4w.png")}"></a></div>"""
with self.settings(THUMBNAIL_IMAGES=False):
converted = markdown_convert_wrapper(msg)
self.assertIn(thumbnail_img, converted)
# Any URL which is not an external link and doesn't start with
# /user_uploads/ is not thumbnailed
msg = "[foobar](/static/images/cute/turtle.png)"
thumbnail_img = '<div class="message_inline_image"><a href="/static/images/cute/turtle.png" title="foobar"><img src="/static/images/cute/turtle.png"></a></div>'
converted = markdown_convert_wrapper(msg)
self.assertIn(thumbnail_img, converted)
msg = "[foobar](/user_avatars/{realm_id}/emoji/images/50.png)"
msg = msg.format(realm_id=realm.id)
thumbnail_img = '<div class="message_inline_image"><a href="/user_avatars/{realm_id}/emoji/images/50.png" title="foobar"><img src="/user_avatars/{realm_id}/emoji/images/50.png"></a></div>'
thumbnail_img = thumbnail_img.format(realm_id=realm.id)
converted = markdown_convert_wrapper(msg)
self.assertIn(thumbnail_img, converted)
@override_settings(INLINE_IMAGE_PREVIEW=True)
def test_inline_image_preview(self) -> None:
with_preview = '<div class="message_inline_image"><a href="http://cdn.wallpapersafari.com/13/6/16eVjx.jpeg"><img data-src-fullsize="/thumbnail?url=http%3A%2F%2Fcdn.wallpapersafari.com%2F13%2F6%2F16eVjx.jpeg&size=full" src="/thumbnail?url=http%3A%2F%2Fcdn.wallpapersafari.com%2F13%2F6%2F16eVjx.jpeg&size=thumbnail"></a></div>'
without_preview = '<p><a href="http://cdn.wallpapersafari.com/13/6/16eVjx.jpeg">http://cdn.wallpapersafari.com/13/6/16eVjx.jpeg</a></p>'
content = "http://cdn.wallpapersafari.com/13/6/16eVjx.jpeg"
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
converted = render_markdown(msg, content)
self.assertEqual(converted.rendered_content, with_preview)
realm = msg.get_realm()
setattr(realm, "inline_image_preview", False)
realm.save()
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
converted = render_markdown(msg, content)
self.assertEqual(converted.rendered_content, without_preview)
@override_settings(THUMBNAIL_IMAGES=False, EXTERNAL_URI_SCHEME="https://")
def test_external_image_preview_use_camo(self) -> None:
content = "https://example.com/thing.jpeg"
thumbnail_img = f"""<div class="message_inline_image"><a href="{content}"><img src="{get_camo_url(content)}"></a></div>"""
converted = markdown_convert_wrapper(content)
self.assertIn(converted, thumbnail_img)
@override_settings(THUMBNAIL_IMAGES=False, EXTERNAL_URI_SCHEME="https://")
def test_static_image_preview_skip_camo(self) -> None:
content = f"{ settings.STATIC_URL }/thing.jpeg"
thumbnail_img = f"""<div class="message_inline_image"><a href="{content}"><img src="{content}"></a></div>"""
converted = markdown_convert_wrapper(content)
self.assertIn(converted, thumbnail_img)
@override_settings(THUMBNAIL_IMAGES=False, EXTERNAL_URI_SCHEME="https://")
def test_realm_image_preview_skip_camo(self) -> None:
content = f"https://zulip.{ settings.EXTERNAL_HOST }/thing.jpeg"
converted = markdown_convert_wrapper(content)
self.assertNotIn(converted, get_camo_url(content))
@override_settings(THUMBNAIL_IMAGES=False, EXTERNAL_URI_SCHEME="https://")
def test_cross_realm_image_preview_use_camo(self) -> None:
content = f"https://otherrealm.{ settings.EXTERNAL_HOST }/thing.jpeg"
thumbnail_img = f"""<div class="message_inline_image"><a href="{ content }"><img src="{ get_camo_url(content) }"></a></div>"""
converted = markdown_convert_wrapper(content)
self.assertIn(converted, thumbnail_img)
@override_settings(INLINE_IMAGE_PREVIEW=True)
def test_inline_image_quoted_blocks(self) -> None:
content = "http://cdn.wallpapersafari.com/13/6/16eVjx.jpeg"
expected = '<div class="message_inline_image"><a href="http://cdn.wallpapersafari.com/13/6/16eVjx.jpeg"><img data-src-fullsize="/thumbnail?url=http%3A%2F%2Fcdn.wallpapersafari.com%2F13%2F6%2F16eVjx.jpeg&size=full" src="/thumbnail?url=http%3A%2F%2Fcdn.wallpapersafari.com%2F13%2F6%2F16eVjx.jpeg&size=thumbnail"></a></div>'
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
converted = render_markdown(msg, content)
self.assertEqual(converted.rendered_content, expected)
content = ">http://cdn.wallpapersafari.com/13/6/16eVjx.jpeg\n\nAwesome!"
expected = '<blockquote>\n<p><a href="http://cdn.wallpapersafari.com/13/6/16eVjx.jpeg">http://cdn.wallpapersafari.com/13/6/16eVjx.jpeg</a></p>\n</blockquote>\n<p>Awesome!</p>'
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
converted = render_markdown(msg, content)
self.assertEqual(converted.rendered_content, expected)
content = ">* http://cdn.wallpapersafari.com/13/6/16eVjx.jpeg\n\nAwesome!"
expected = '<blockquote>\n<ul>\n<li><a href="http://cdn.wallpapersafari.com/13/6/16eVjx.jpeg">http://cdn.wallpapersafari.com/13/6/16eVjx.jpeg</a></li>\n</ul>\n</blockquote>\n<p>Awesome!</p>'
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
converted = render_markdown(msg, content)
self.assertEqual(converted.rendered_content, expected)
@override_settings(INLINE_IMAGE_PREVIEW=True)
def test_inline_image_preview_order(self) -> None:
realm = get_realm("zulip")
content = "http://imaging.nikon.com/lineup/dslr/df/img/sample/img_01.jpg\nhttp://imaging.nikon.com/lineup/dslr/df/img/sample/img_02.jpg\nhttp://imaging.nikon.com/lineup/dslr/df/img/sample/img_03.jpg"
expected = '<p><a href="http://imaging.nikon.com/lineup/dslr/df/img/sample/img_01.jpg">http://imaging.nikon.com/lineup/dslr/df/img/sample/img_01.jpg</a><br>\n<a href="http://imaging.nikon.com/lineup/dslr/df/img/sample/img_02.jpg">http://imaging.nikon.com/lineup/dslr/df/img/sample/img_02.jpg</a><br>\n<a href="http://imaging.nikon.com/lineup/dslr/df/img/sample/img_03.jpg">http://imaging.nikon.com/lineup/dslr/df/img/sample/img_03.jpg</a></p>\n<div class="message_inline_image"><a href="http://imaging.nikon.com/lineup/dslr/df/img/sample/img_01.jpg"><img data-src-fullsize="/thumbnail?url=http%3A%2F%2Fimaging.nikon.com%2Flineup%2Fdslr%2Fdf%2Fimg%2Fsample%2Fimg_01.jpg&size=full" src="/thumbnail?url=http%3A%2F%2Fimaging.nikon.com%2Flineup%2Fdslr%2Fdf%2Fimg%2Fsample%2Fimg_01.jpg&size=thumbnail"></a></div><div class="message_inline_image"><a href="http://imaging.nikon.com/lineup/dslr/df/img/sample/img_02.jpg"><img data-src-fullsize="/thumbnail?url=http%3A%2F%2Fimaging.nikon.com%2Flineup%2Fdslr%2Fdf%2Fimg%2Fsample%2Fimg_02.jpg&size=full" src="/thumbnail?url=http%3A%2F%2Fimaging.nikon.com%2Flineup%2Fdslr%2Fdf%2Fimg%2Fsample%2Fimg_02.jpg&size=thumbnail"></a></div><div class="message_inline_image"><a href="http://imaging.nikon.com/lineup/dslr/df/img/sample/img_03.jpg"><img data-src-fullsize="/thumbnail?url=http%3A%2F%2Fimaging.nikon.com%2Flineup%2Fdslr%2Fdf%2Fimg%2Fsample%2Fimg_03.jpg&size=full" src="/thumbnail?url=http%3A%2F%2Fimaging.nikon.com%2Flineup%2Fdslr%2Fdf%2Fimg%2Fsample%2Fimg_03.jpg&size=thumbnail"></a></div>'
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
converted = render_markdown(msg, content)
self.assertEqual(converted.rendered_content, expected)
content = "http://imaging.nikon.com/lineup/dslr/df/img/sample/img_01.jpg\n\n>http://imaging.nikon.com/lineup/dslr/df/img/sample/img_02.jpg\n\n* http://imaging.nikon.com/lineup/dslr/df/img/sample/img_03.jpg\n* https://www.google.com/images/srpr/logo4w.png"
expected = '<div class="message_inline_image"><a href="http://imaging.nikon.com/lineup/dslr/df/img/sample/img_01.jpg"><img data-src-fullsize="/thumbnail?url=http%3A%2F%2Fimaging.nikon.com%2Flineup%2Fdslr%2Fdf%2Fimg%2Fsample%2Fimg_01.jpg&size=full" src="/thumbnail?url=http%3A%2F%2Fimaging.nikon.com%2Flineup%2Fdslr%2Fdf%2Fimg%2Fsample%2Fimg_01.jpg&size=thumbnail"></a></div><blockquote>\n<p><a href="http://imaging.nikon.com/lineup/dslr/df/img/sample/img_02.jpg">http://imaging.nikon.com/lineup/dslr/df/img/sample/img_02.jpg</a></p>\n</blockquote>\n<ul>\n<li><div class="message_inline_image"><a href="http://imaging.nikon.com/lineup/dslr/df/img/sample/img_03.jpg"><img data-src-fullsize="/thumbnail?url=http%3A%2F%2Fimaging.nikon.com%2Flineup%2Fdslr%2Fdf%2Fimg%2Fsample%2Fimg_03.jpg&size=full" src="/thumbnail?url=http%3A%2F%2Fimaging.nikon.com%2Flineup%2Fdslr%2Fdf%2Fimg%2Fsample%2Fimg_03.jpg&size=thumbnail"></a></div></li>\n<li><div class="message_inline_image"><a href="https://www.google.com/images/srpr/logo4w.png"><img data-src-fullsize="/thumbnail?url=https%3A%2F%2Fwww.google.com%2Fimages%2Fsrpr%2Flogo4w.png&size=full" src="/thumbnail?url=https%3A%2F%2Fwww.google.com%2Fimages%2Fsrpr%2Flogo4w.png&size=thumbnail"></a></div></li>\n</ul>'
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
converted = render_markdown(msg, content)
self.assertEqual(converted.rendered_content, expected)
content = "Test 1\n[21136101110_1dde1c1a7e_o.jpg](/user_uploads/{realm_id}/6d/F1PX6u16JA2P-nK45PyxHIYZ/21136101110_1dde1c1a7e_o.jpg) \n\nNext image\n[IMG_20161116_023910.jpg](/user_uploads/{realm_id}/69/sh7L06e7uH7NaX6d5WFfVYQp/IMG_20161116_023910.jpg) \n\nAnother screenshot\n[Screenshot-from-2016-06-01-16-22-42.png](/user_uploads/{realm_id}/70/_aZmIEWaN1iUaxwkDjkO7bpj/Screenshot-from-2016-06-01-16-22-42.png)"
content = content.format(realm_id=realm.id)
expected = '<p>Test 1<br>\n<a href="/user_uploads/{realm_id}/6d/F1PX6u16JA2P-nK45PyxHIYZ/21136101110_1dde1c1a7e_o.jpg">21136101110_1dde1c1a7e_o.jpg</a> </p>\n<div class="message_inline_image"><a href="/user_uploads/{realm_id}/6d/F1PX6u16JA2P-nK45PyxHIYZ/21136101110_1dde1c1a7e_o.jpg" title="21136101110_1dde1c1a7e_o.jpg"><img data-src-fullsize="/thumbnail?url=user_uploads%2F{realm_id}%2F6d%2FF1PX6u16JA2P-nK45PyxHIYZ%2F21136101110_1dde1c1a7e_o.jpg&size=full" src="/thumbnail?url=user_uploads%2F{realm_id}%2F6d%2FF1PX6u16JA2P-nK45PyxHIYZ%2F21136101110_1dde1c1a7e_o.jpg&size=thumbnail"></a></div><p>Next image<br>\n<a href="/user_uploads/{realm_id}/69/sh7L06e7uH7NaX6d5WFfVYQp/IMG_20161116_023910.jpg">IMG_20161116_023910.jpg</a> </p>\n<div class="message_inline_image"><a href="/user_uploads/{realm_id}/69/sh7L06e7uH7NaX6d5WFfVYQp/IMG_20161116_023910.jpg" title="IMG_20161116_023910.jpg"><img data-src-fullsize="/thumbnail?url=user_uploads%2F{realm_id}%2F69%2Fsh7L06e7uH7NaX6d5WFfVYQp%2FIMG_20161116_023910.jpg&size=full" src="/thumbnail?url=user_uploads%2F{realm_id}%2F69%2Fsh7L06e7uH7NaX6d5WFfVYQp%2FIMG_20161116_023910.jpg&size=thumbnail"></a></div><p>Another screenshot<br>\n<a href="/user_uploads/{realm_id}/70/_aZmIEWaN1iUaxwkDjkO7bpj/Screenshot-from-2016-06-01-16-22-42.png">Screenshot-from-2016-06-01-16-22-42.png</a></p>\n<div class="message_inline_image"><a href="/user_uploads/{realm_id}/70/_aZmIEWaN1iUaxwkDjkO7bpj/Screenshot-from-2016-06-01-16-22-42.png" title="Screenshot-from-2016-06-01-16-22-42.png"><img data-src-fullsize="/thumbnail?url=user_uploads%2F{realm_id}%2F70%2F_aZmIEWaN1iUaxwkDjkO7bpj%2FScreenshot-from-2016-06-01-16-22-42.png&size=full" src="/thumbnail?url=user_uploads%2F{realm_id}%2F70%2F_aZmIEWaN1iUaxwkDjkO7bpj%2FScreenshot-from-2016-06-01-16-22-42.png&size=thumbnail"></a></div>'
expected = expected.format(realm_id=realm.id)
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
converted = render_markdown(msg, content)
self.assertEqual(converted.rendered_content, expected)
@override_settings(INLINE_IMAGE_PREVIEW=True)
def test_corrected_image_source(self) -> None:
# testing only Wikipedia because linx.li URLs can be expected to expire
content = "https://en.wikipedia.org/wiki/File:Wright_of_Derby,_The_Orrery.jpg"
expected = '<div class="message_inline_image"><a href="https://en.wikipedia.org/wiki/Special:FilePath/File:Wright_of_Derby,_The_Orrery.jpg"><img data-src-fullsize="/thumbnail?url=https%3A%2F%2Fen.wikipedia.org%2Fwiki%2FSpecial%3AFilePath%2FFile%3AWright_of_Derby%2C_The_Orrery.jpg&size=full" src="/thumbnail?url=https%3A%2F%2Fen.wikipedia.org%2Fwiki%2FSpecial%3AFilePath%2FFile%3AWright_of_Derby%2C_The_Orrery.jpg&size=thumbnail"></a></div>'
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
converted = render_markdown(msg, content)
self.assertEqual(converted.rendered_content, expected)
@override_settings(INLINE_IMAGE_PREVIEW=False)
def test_image_preview_enabled(self) -> None:
ret = image_preview_enabled()
self.assertFalse(ret)
settings.INLINE_IMAGE_PREVIEW = True
sender_user_profile = self.example_user("othello")
message = Message(sender=sender_user_profile, sending_client=get_client("test"))
realm = message.get_realm()
ret = image_preview_enabled()
self.assertTrue(ret)
ret = image_preview_enabled(no_previews=True)
self.assertFalse(ret)
ret = image_preview_enabled(message, realm)
self.assertTrue(ret)
ret = image_preview_enabled(message)
self.assertTrue(ret)
ret = image_preview_enabled(message, realm, no_previews=True)
self.assertFalse(ret)
ret = image_preview_enabled(message, no_previews=True)
self.assertFalse(ret)
@override_settings(INLINE_URL_EMBED_PREVIEW=False)
def test_url_embed_preview_enabled(self) -> None:
sender_user_profile = self.example_user("othello")
message = copy.deepcopy(
Message(sender=sender_user_profile, sending_client=get_client("test"))
)
realm = message.get_realm()
realm.inline_url_embed_preview = True # off by default
realm.save(update_fields=["inline_url_embed_preview"])
ret = url_embed_preview_enabled()
self.assertFalse(ret)
settings.INLINE_URL_EMBED_PREVIEW = True
ret = url_embed_preview_enabled()
self.assertTrue(ret)
ret = image_preview_enabled(no_previews=True)
self.assertFalse(ret)
ret = url_embed_preview_enabled(message, realm)
self.assertTrue(ret)
ret = url_embed_preview_enabled(message)
self.assertTrue(ret)
ret = url_embed_preview_enabled(message, no_previews=True)
self.assertFalse(ret)
def test_inline_dropbox(self) -> None:
msg = "Look at how hilarious our old office was: https://www.dropbox.com/s/ymdijjcg67hv2ta/IMG_0923.JPG"
image_info = {
"image": "https://photos-4.dropbox.com/t/2/AABIre1oReJgPYuc_53iv0IHq1vUzRaDg2rrCfTpiWMccQ/12/129/jpeg/1024x1024/2/_/0/4/IMG_0923.JPG/CIEBIAEgAiAHKAIoBw/ymdijjcg67hv2ta/AABz2uuED1ox3vpWWvMpBxu6a/IMG_0923.JPG",
"desc": "Shared with Dropbox",
"title": "IMG_0923.JPG",
}
with mock.patch("zerver.lib.markdown.fetch_open_graph_image", return_value=image_info):
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
f"""<p>Look at how hilarious our old office was: <a href="https://www.dropbox.com/s/ymdijjcg67hv2ta/IMG_0923.JPG">https://www.dropbox.com/s/ymdijjcg67hv2ta/IMG_0923.JPG</a></p>\n<div class="message_inline_image"><a href="https://www.dropbox.com/s/ymdijjcg67hv2ta/IMG_0923.JPG" title="IMG_0923.JPG"><img src="{get_camo_url("https://www.dropbox.com/s/ymdijjcg67hv2ta/IMG_0923.JPG?raw=1")}"></a></div>""",
)
msg = "Look at my hilarious drawing folder: https://www.dropbox.com/sh/cm39k9e04z7fhim/AAAII5NK-9daee3FcF41anEua?dl="
image_info = {
"image": "https://cf.dropboxstatic.com/static/images/icons128/folder_dropbox.png",
"desc": "Shared with Dropbox",
"title": "Saves",
}
with mock.patch("zerver.lib.markdown.fetch_open_graph_image", return_value=image_info):
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
f"""<p>Look at my hilarious drawing folder: <a href="https://www.dropbox.com/sh/cm39k9e04z7fhim/AAAII5NK-9daee3FcF41anEua?dl=">https://www.dropbox.com/sh/cm39k9e04z7fhim/AAAII5NK-9daee3FcF41anEua?dl=</a></p>\n<div class="message_inline_ref"><a href="https://www.dropbox.com/sh/cm39k9e04z7fhim/AAAII5NK-9daee3FcF41anEua?dl=" title="Saves"><img src="{get_camo_url("https://cf.dropboxstatic.com/static/images/icons128/folder_dropbox.png")}"></a><div><div class="message_inline_image_title">Saves</div><desc class="message_inline_image_desc"></desc></div></div>""",
)
def test_inline_dropbox_preview(self) -> None:
# Test photo album previews
msg = "https://www.dropbox.com/sc/tditp9nitko60n5/03rEiZldy5"
image_info = {
"image": "https://photos-6.dropbox.com/t/2/AAAlawaeD61TyNewO5vVi-DGf2ZeuayfyHFdNTNzpGq-QA/12/271544745/jpeg/1024x1024/2/_/0/5/baby-piglet.jpg/CKnjvYEBIAIgBygCKAc/tditp9nitko60n5/AADX03VAIrQlTl28CtujDcMla/0",
"desc": "Shared with Dropbox",
"title": "1 photo",
}
with mock.patch("zerver.lib.markdown.fetch_open_graph_image", return_value=image_info):
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
f"""<p><a href="https://www.dropbox.com/sc/tditp9nitko60n5/03rEiZldy5">https://www.dropbox.com/sc/tditp9nitko60n5/03rEiZldy5</a></p>\n<div class="message_inline_image"><a href="https://www.dropbox.com/sc/tditp9nitko60n5/03rEiZldy5" title="1 photo"><img src="{get_camo_url("https://photos-6.dropbox.com/t/2/AAAlawaeD61TyNewO5vVi-DGf2ZeuayfyHFdNTNzpGq-QA/12/271544745/jpeg/1024x1024/2/_/0/5/baby-piglet.jpg/CKnjvYEBIAIgBygCKAc/tditp9nitko60n5/AADX03VAIrQlTl28CtujDcMla/0")}"></a></div>""",
)
def test_inline_dropbox_negative(self) -> None:
# Make sure we're not overzealous in our conversion:
msg = "Look at the new dropbox logo: https://www.dropbox.com/static/images/home_logo.png"
with mock.patch("zerver.lib.markdown.fetch_open_graph_image", return_value=None):
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
'<p>Look at the new dropbox logo: <a href="https://www.dropbox.com/static/images/home_logo.png">https://www.dropbox.com/static/images/home_logo.png</a></p>\n<div class="message_inline_image"><a href="https://www.dropbox.com/static/images/home_logo.png"><img data-src-fullsize="/thumbnail?url=https%3A%2F%2Fwww.dropbox.com%2Fstatic%2Fimages%2Fhome_logo.png&size=full" src="/thumbnail?url=https%3A%2F%2Fwww.dropbox.com%2Fstatic%2Fimages%2Fhome_logo.png&size=thumbnail"></a></div>',
)
def test_inline_dropbox_bad(self) -> None:
# Don't fail on bad dropbox links
msg = "https://zulip-test.dropbox.com/photos/cl/ROmr9K1XYtmpneM"
with mock.patch("zerver.lib.markdown.fetch_open_graph_image", return_value=None):
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
'<p><a href="https://zulip-test.dropbox.com/photos/cl/ROmr9K1XYtmpneM">https://zulip-test.dropbox.com/photos/cl/ROmr9K1XYtmpneM</a></p>',
)
def test_inline_github_preview(self) -> None:
# Test photo album previews
msg = "Test: https://github.com/zulip/zulip/blob/main/static/images/logo/zulip-icon-128x128.png"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
'<p>Test: <a href="https://github.com/zulip/zulip/blob/main/static/images/logo/zulip-icon-128x128.png">https://github.com/zulip/zulip/blob/main/static/images/logo/zulip-icon-128x128.png</a></p>\n<div class="message_inline_image"><a href="https://github.com/zulip/zulip/blob/main/static/images/logo/zulip-icon-128x128.png"><img data-src-fullsize="/thumbnail?url=https%3A%2F%2Fraw.githubusercontent.com%2Fzulip%2Fzulip%2Fmain%2Fstatic%2Fimages%2Flogo%2Fzulip-icon-128x128.png&size=full" src="/thumbnail?url=https%3A%2F%2Fraw.githubusercontent.com%2Fzulip%2Fzulip%2Fmain%2Fstatic%2Fimages%2Flogo%2Fzulip-icon-128x128.png&size=thumbnail"></a></div>',
)
msg = "Test: https://developer.github.com/assets/images/hero-circuit-bg.png"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
'<p>Test: <a href="https://developer.github.com/assets/images/hero-circuit-bg.png">https://developer.github.com/assets/images/hero-circuit-bg.png</a></p>\n<div class="message_inline_image"><a href="https://developer.github.com/assets/images/hero-circuit-bg.png"><img data-src-fullsize="/thumbnail?url=https%3A%2F%2Fdeveloper.github.com%2Fassets%2Fimages%2Fhero-circuit-bg.png&size=full" src="/thumbnail?url=https%3A%2F%2Fdeveloper.github.com%2Fassets%2Fimages%2Fhero-circuit-bg.png&size=thumbnail"></a></div>',
)
def test_inline_youtube_preview(self) -> None:
# Test YouTube URLs in spoilers
msg = """\n```spoiler Check out this PyCon video\nhttps://www.youtube.com/watch?v=0c46YHS3RY8\n```"""
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
f"""<div class="spoiler-block"><div class="spoiler-header">\n<p>Check out this PyCon video</p>\n</div><div class="spoiler-content" aria-hidden="true">\n<p><a href="https://www.youtube.com/watch?v=0c46YHS3RY8">https://www.youtube.com/watch?v=0c46YHS3RY8</a></p>\n<div class="youtube-video message_inline_image"><a data-id="0c46YHS3RY8" href="https://www.youtube.com/watch?v=0c46YHS3RY8"><img src="{get_camo_url("https://i.ytimg.com/vi/0c46YHS3RY8/default.jpg")}"></a></div></div></div>""",
)
# Test YouTube URLs in normal messages.
msg = "[YouTube link](https://www.youtube.com/watch?v=0c46YHS3RY8)"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
f"""<p><a href="https://www.youtube.com/watch?v=0c46YHS3RY8">YouTube link</a></p>\n<div class="youtube-video message_inline_image"><a data-id="0c46YHS3RY8" href="https://www.youtube.com/watch?v=0c46YHS3RY8"><img src="{get_camo_url("https://i.ytimg.com/vi/0c46YHS3RY8/default.jpg")}"></a></div>""",
)
msg = "https://www.youtube.com/watch?v=0c46YHS3RY8\n\nSample text\n\nhttps://www.youtube.com/watch?v=lXFO2ULktEI"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
f"""<p><a href="https://www.youtube.com/watch?v=0c46YHS3RY8">https://www.youtube.com/watch?v=0c46YHS3RY8</a></p>\n<div class="youtube-video message_inline_image"><a data-id="0c46YHS3RY8" href="https://www.youtube.com/watch?v=0c46YHS3RY8"><img src="{get_camo_url("https://i.ytimg.com/vi/0c46YHS3RY8/default.jpg")}"></a></div><p>Sample text</p>\n<p><a href="https://www.youtube.com/watch?v=lXFO2ULktEI">https://www.youtube.com/watch?v=lXFO2ULktEI</a></p>\n<div class="youtube-video message_inline_image"><a data-id="lXFO2ULktEI" href="https://www.youtube.com/watch?v=lXFO2ULktEI"><img src="{get_camo_url("https://i.ytimg.com/vi/lXFO2ULktEI/default.jpg")}"></a></div>""",
)
def test_twitter_id_extraction(self) -> None:
self.assertEqual(
get_tweet_id("http://twitter.com/#!/VizzQuotes/status/409030735191097344"),
"409030735191097344",
)
self.assertEqual(
get_tweet_id("http://twitter.com/VizzQuotes/status/409030735191097344"),
"409030735191097344",
)
self.assertEqual(
get_tweet_id("http://twitter.com/VizzQuotes/statuses/409030735191097344"),
"409030735191097344",
)
self.assertEqual(get_tweet_id("https://twitter.com/wdaher/status/1017581858"), "1017581858")
self.assertEqual(
get_tweet_id("https://twitter.com/wdaher/status/1017581858/"), "1017581858"
)
self.assertEqual(
get_tweet_id("https://twitter.com/windyoona/status/410766290349879296/photo/1"),
"410766290349879296",
)
self.assertEqual(
get_tweet_id("https://twitter.com/windyoona/status/410766290349879296/"),
"410766290349879296",
)
def test_inline_interesting_links(self) -> None:
def make_link(url: str) -> str:
return f'<a href="{url}">{url}</a>'
normal_tweet_html = (
'<a href="https://twitter.com/Twitter"'
">@Twitter</a> "
"meets @seepicturely at #tcdisrupt cc."
'<a href="https://twitter.com/boscomonkey"'
">@boscomonkey</a> "
'<a href="https://twitter.com/episod"'
">@episod</a> "
'<a href="http://t.co/6J2EgYM"'
">http://instagr.am/p/MuW67/</a>"
)
mention_in_link_tweet_html = """<a href="http://t.co/@foo">http://foo.com</a>"""
media_tweet_html = (
'<a href="http://t.co/xo7pAhK6n3">'
"http://twitter.com/NEVNBoston/status/421654515616849920/photo/1</a>"
)
emoji_in_tweet_html = """Zulip is <span aria-label=\"100\" class="emoji emoji-1f4af" role=\"img\" title="100">:100:</span>% open-source!"""
def make_inline_twitter_preview(url: str, tweet_html: str, image_html: str = "") -> str:
## As of right now, all previews are mocked to be the exact same tweet
return (
'<div class="inline-preview-twitter">'
'<div class="twitter-tweet">'
f'<a href="{url}">'
'<img class="twitter-avatar"'
' src="https://external-content.zulipcdn.net/external_content/1f7cd2436976d410eab8189ebceda87ae0b34ead/687474703a2f2f7062732e7477696d672e63'
"6f6d2f70726f66696c655f696d616765732f313338303931323137332f53637265656e5f73686f745f323031312d30362d30335f61745f372e33352e33"
'365f504d5f6e6f726d616c2e706e67">'
"</a>"
f"<p>{tweet_html}</p>"
"<span>- Eoin McMillan (@imeoin)</span>"
f"{image_html}"
"</div>"
"</div>"
)
msg = "http://www.twitter.com"
converted = markdown_convert_wrapper(msg)
self.assertEqual(converted, "<p>{}</p>".format(make_link("http://www.twitter.com")))
msg = "http://www.twitter.com/wdaher/"
converted = markdown_convert_wrapper(msg)
self.assertEqual(converted, "<p>{}</p>".format(make_link("http://www.twitter.com/wdaher/")))
msg = "http://www.twitter.com/wdaher/status/3"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted, "<p>{}</p>".format(make_link("http://www.twitter.com/wdaher/status/3"))
)
# id too long
msg = "http://www.twitter.com/wdaher/status/2879779692873154569"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
"<p>{}</p>".format(
make_link("http://www.twitter.com/wdaher/status/2879779692873154569")
),
)
# id too large (i.e. tweet doesn't exist)
msg = "http://www.twitter.com/wdaher/status/999999999999999999"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
"<p>{}</p>".format(
make_link("http://www.twitter.com/wdaher/status/999999999999999999")
),
)
msg = "http://www.twitter.com/wdaher/status/287977969287315456"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
"<p>{}</p>\n{}".format(
make_link("http://www.twitter.com/wdaher/status/287977969287315456"),
make_inline_twitter_preview(
"http://www.twitter.com/wdaher/status/287977969287315456", normal_tweet_html
),
),
)
msg = "https://www.twitter.com/wdaher/status/287977969287315456"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
"<p>{}</p>\n{}".format(
make_link("https://www.twitter.com/wdaher/status/287977969287315456"),
make_inline_twitter_preview(
"https://www.twitter.com/wdaher/status/287977969287315456", normal_tweet_html
),
),
)
msg = "http://twitter.com/wdaher/status/287977969287315456"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
"<p>{}</p>\n{}".format(
make_link("http://twitter.com/wdaher/status/287977969287315456"),
make_inline_twitter_preview(
"http://twitter.com/wdaher/status/287977969287315456", normal_tweet_html
),
),
)
# Repeated links will only be converted once
msg = (
"http://twitter.com/wdaher/status/287977969287315456 "
"http://twitter.com/wdaher/status/287977969287315457 "
"http://twitter.com/wdaher/status/287977969287315457 "
"http://twitter.com/wdaher/status/287977969287315457"
)
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
"<p>{} {} {} {}</p>\n{}{}".format(
make_link("http://twitter.com/wdaher/status/287977969287315456"),
make_link("http://twitter.com/wdaher/status/287977969287315457"),
make_link("http://twitter.com/wdaher/status/287977969287315457"),
make_link("http://twitter.com/wdaher/status/287977969287315457"),
make_inline_twitter_preview(
"http://twitter.com/wdaher/status/287977969287315456", normal_tweet_html
),
make_inline_twitter_preview(
"http://twitter.com/wdaher/status/287977969287315457", normal_tweet_html
),
),
)
# A max of 3 will be converted
msg = (
"http://twitter.com/wdaher/status/287977969287315456 "
"http://twitter.com/wdaher/status/287977969287315457 "
"https://twitter.com/wdaher/status/287977969287315456 "
"http://twitter.com/wdaher/status/287977969287315460"
)
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
"<p>{} {} {} {}</p>\n{}{}{}".format(
make_link("http://twitter.com/wdaher/status/287977969287315456"),
make_link("http://twitter.com/wdaher/status/287977969287315457"),
make_link("https://twitter.com/wdaher/status/287977969287315456"),
make_link("http://twitter.com/wdaher/status/287977969287315460"),
make_inline_twitter_preview(
"http://twitter.com/wdaher/status/287977969287315456", normal_tweet_html
),
make_inline_twitter_preview(
"http://twitter.com/wdaher/status/287977969287315457", normal_tweet_html
),
make_inline_twitter_preview(
"https://twitter.com/wdaher/status/287977969287315456", normal_tweet_html
),
),
)
# Test smart in-place inlining behavior:
msg = (
"Paragraph 1: http://twitter.com/wdaher/status/287977969287315456\n\n"
"Paragraph 2\n\n"
"Paragraph 3: http://twitter.com/wdaher/status/287977969287315457"
)
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
"<p>Paragraph 1: {}</p>\n{}<p>Paragraph 2</p>\n<p>Paragraph 3: {}</p>\n{}".format(
make_link("http://twitter.com/wdaher/status/287977969287315456"),
make_inline_twitter_preview(
"http://twitter.com/wdaher/status/287977969287315456", normal_tweet_html
),
make_link("http://twitter.com/wdaher/status/287977969287315457"),
make_inline_twitter_preview(
"http://twitter.com/wdaher/status/287977969287315457", normal_tweet_html
),
),
)
# Tweet has a mention in a URL, only the URL is linked
msg = "http://twitter.com/wdaher/status/287977969287315458"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
"<p>{}</p>\n{}".format(
make_link("http://twitter.com/wdaher/status/287977969287315458"),
make_inline_twitter_preview(
"http://twitter.com/wdaher/status/287977969287315458",
mention_in_link_tweet_html,
),
),
)
# Tweet with an image
msg = "http://twitter.com/wdaher/status/287977969287315459"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
"<p>{}</p>\n{}".format(
make_link("http://twitter.com/wdaher/status/287977969287315459"),
make_inline_twitter_preview(
"http://twitter.com/wdaher/status/287977969287315459",
media_tweet_html,
(
'<div class="twitter-image">'
'<a href="http://t.co/xo7pAhK6n3">'
f"""<img src="{get_camo_url("https://pbs.twimg.com/media/BdoEjD4IEAIq86Z.jpg:small")}">"""
"</a>"
"</div>"
),
),
),
)
msg = "http://twitter.com/wdaher/status/287977969287315460"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
"<p>{}</p>\n{}".format(
make_link("http://twitter.com/wdaher/status/287977969287315460"),
make_inline_twitter_preview(
"http://twitter.com/wdaher/status/287977969287315460", emoji_in_tweet_html
),
),
)
# Test Twitter previews in spoiler tags.
msg = "```spoiler secret tweet\nTweet: http://twitter.com/wdaher/status/287977969287315456\n```"
converted = markdown_convert_wrapper(msg)
rendered_spoiler = '<div class="spoiler-block"><div class="spoiler-header">\n<p>secret tweet</p>\n</div><div class="spoiler-content" aria-hidden="true">\n<p>Tweet: {}</p>\n{}</div></div>'
self.assertEqual(
converted,
rendered_spoiler.format(
make_link("http://twitter.com/wdaher/status/287977969287315456"),
make_inline_twitter_preview(
"http://twitter.com/wdaher/status/287977969287315456", normal_tweet_html
),
),
)
def test_fetch_tweet_data_settings_validation(self) -> None:
with self.settings(TEST_SUITE=False, TWITTER_CONSUMER_KEY=None):
self.assertIs(None, fetch_tweet_data("287977969287315459"))
def test_content_has_emoji(self) -> None:
self.assertFalse(content_has_emoji_syntax("boring"))
self.assertFalse(content_has_emoji_syntax("hello: world"))
self.assertFalse(content_has_emoji_syntax(":foobar"))
self.assertFalse(content_has_emoji_syntax("::: hello :::"))
self.assertTrue(content_has_emoji_syntax("foo :whatever:"))
self.assertTrue(content_has_emoji_syntax("\n:whatever:"))
self.assertTrue(content_has_emoji_syntax(":smile: ::::::"))
def test_realm_emoji(self) -> None:
def emoji_img(name: str, file_name: str, realm_id: int) -> str:
return '<img alt="{}" class="emoji" src="{}" title="{}">'.format(
name, get_emoji_url(file_name, realm_id), name[1:-1].replace("_", " ")
)
realm = get_realm("zulip")
# Needs to mock an actual message because that's how Markdown obtains the realm
msg = Message(sender=self.example_user("hamlet"))
converted = markdown_convert(":green_tick:", message_realm=realm, message=msg)
realm_emoji = RealmEmoji.objects.filter(
realm=realm, name="green_tick", deactivated=False
).get()
self.assertEqual(
converted.rendered_content,
"<p>{}</p>".format(emoji_img(":green_tick:", realm_emoji.file_name, realm.id)),
)
# Deactivate realm emoji.
do_remove_realm_emoji(realm, "green_tick")
converted = markdown_convert(":green_tick:", message_realm=realm, message=msg)
self.assertEqual(converted.rendered_content, "<p>:green_tick:</p>")
def test_deactivated_realm_emoji(self) -> None:
# Deactivate realm emoji.
realm = get_realm("zulip")
do_remove_realm_emoji(realm, "green_tick")
msg = Message(sender=self.example_user("hamlet"))
converted = markdown_convert(":green_tick:", message_realm=realm, message=msg)
self.assertEqual(converted.rendered_content, "<p>:green_tick:</p>")
def test_unicode_emoji(self) -> None:
msg = "\u2615" # ☕
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
'<p><span aria-label="coffee" class="emoji emoji-2615" role="img" title="coffee">:coffee:</span></p>',
)
msg = "\u2615\u2615" # ☕☕
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
'<p><span aria-label="coffee" class="emoji emoji-2615" role="img" title="coffee">:coffee:</span><span aria-label="coffee" class="emoji emoji-2615" role="img" title="coffee">:coffee:</span></p>',
)
def test_no_translate_emoticons_if_off(self) -> None:
user_profile = self.example_user("othello")
do_change_user_setting(user_profile, "translate_emoticons", False)
msg = Message(sender=user_profile, sending_client=get_client("test"))
content = ":)"
expected = "<p>:)</p>"
converted = render_markdown(msg, content)
self.assertEqual(converted.rendered_content, expected)
def test_same_markup(self) -> None:
msg = "\u2615" # ☕
unicode_converted = markdown_convert_wrapper(msg)
msg = ":coffee:" # ☕☕
converted = markdown_convert_wrapper(msg)
self.assertEqual(converted, unicode_converted)
def test_links_in_topic_name(self) -> None:
realm = get_realm("zulip")
msg = Message(sender=self.example_user("othello"))
msg.set_topic_name("https://google.com/hello-world")
converted_topic = topic_links(realm.id, msg.topic_name())
self.assertEqual(
converted_topic,
[{"url": "https://google.com/hello-world", "text": "https://google.com/hello-world"}],
)
msg.set_topic_name("http://google.com/hello-world")
converted_topic = topic_links(realm.id, msg.topic_name())
self.assertEqual(
converted_topic,
[{"url": "http://google.com/hello-world", "text": "http://google.com/hello-world"}],
)
msg.set_topic_name("Without scheme google.com/hello-world")
converted_topic = topic_links(realm.id, msg.topic_name())
self.assertEqual(
converted_topic,
[{"url": "https://google.com/hello-world", "text": "google.com/hello-world"}],
)
msg.set_topic_name("Without scheme random.words/hello-world")
converted_topic = topic_links(realm.id, msg.topic_name())
self.assertEqual(converted_topic, [])
msg.set_topic_name(
"Try out http://ftp.debian.org, https://google.com/ and https://google.in/."
)
converted_topic = topic_links(realm.id, msg.topic_name())
self.assertEqual(
converted_topic,
[
{"url": "http://ftp.debian.org", "text": "http://ftp.debian.org"},
{"url": "https://google.com/", "text": "https://google.com/"},
{"url": "https://google.in/", "text": "https://google.in/"},
],
)
# test order for links without scheme
msg.set_topic_name("google.in google.com")
converted_topic = topic_links(realm.id, msg.topic_name())
self.assertEqual(
converted_topic,
[
{"url": "https://google.in", "text": "google.in"},
{"url": "https://google.com", "text": "google.com"},
],
)
def test_realm_patterns(self) -> None:
realm = get_realm("zulip")
url_format_string = r"https://trac.example.com/ticket/%(id)s"
linkifier = RealmFilter(
realm=realm, pattern=r"#(?P<id>[0-9]{2,8})", url_format_string=url_format_string
)
linkifier.save()
self.assertEqual(
linkifier.__str__(),
"<RealmFilter(zulip): #(?P<id>[0-9]{2,8}) https://trac.example.com/ticket/%(id)s>",
)
msg = Message(sender=self.example_user("othello"))
msg.set_topic_name("#444")
flush_per_request_caches()
content = "We should fix #224 and #115, but not issue#124 or #1124z or [trac #15](https://trac.example.com/ticket/16) today."
converted = markdown_convert(content, message_realm=realm, message=msg)
converted_topic = topic_links(realm.id, msg.topic_name())
self.assertEqual(
converted.rendered_content,
'<p>We should fix <a href="https://trac.example.com/ticket/224">#224</a> and <a href="https://trac.example.com/ticket/115">#115</a>, but not issue#124 or #1124z or <a href="https://trac.example.com/ticket/16">trac #15</a> today.</p>',
)
self.assertEqual(
converted_topic, [{"url": "https://trac.example.com/ticket/444", "text": "#444"}]
)
msg.set_topic_name("#444 https://google.com")
converted_topic = topic_links(realm.id, msg.topic_name())
self.assertEqual(
converted_topic,
[
{"url": "https://trac.example.com/ticket/444", "text": "#444"},
{"url": "https://google.com", "text": "https://google.com"},
],
)
RealmFilter(
realm=realm,
pattern=r"#(?P<id>[a-zA-Z]+-[0-9]+)",
url_format_string=r"https://trac.example.com/ticket/%(id)s",
).save()
msg = Message(sender=self.example_user("hamlet"))
content = "#ZUL-123 was fixed and code was deployed to production, also #zul-321 was deployed to staging"
converted = markdown_convert(content, message_realm=realm, message=msg)
self.assertEqual(
converted.rendered_content,
'<p><a href="https://trac.example.com/ticket/ZUL-123">#ZUL-123</a> was fixed and code was deployed to production, also <a href="https://trac.example.com/ticket/zul-321">#zul-321</a> was deployed to staging</p>',
)
def assert_conversion(content: str, should_have_converted: bool = True) -> None:
converted = markdown_convert(content, message_realm=realm, message=msg).rendered_content
converted_topic = topic_links(realm.id, content)
if should_have_converted:
self.assertTrue("https://trac.example.com" in converted)
self.assert_length(converted_topic, 1)
self.assertEqual(
converted_topic[0],
{"url": "https://trac.example.com/ticket/123", "text": "#123"},
)
else:
self.assertTrue("https://trac.example.com" not in converted)
self.assert_length(converted_topic, 0)
assert_conversion("Hello #123 World")
assert_conversion("Hello #123World", False)
assert_conversion("Hello#123 World", False)
assert_conversion("Hello#123World", False)
# Ideally, these should be converted, but Markdown doesn't
# handle word boundary detection in languages that don't use
# whitespace for that correctly yet.
assert_conversion("チケットは#123です", False)
assert_conversion("チケットは #123です", False)
assert_conversion("チケットは#123 です", False)
assert_conversion("チケットは #123 です")
assert_conversion("(#123)")
assert_conversion("#123>")
assert_conversion('"#123"')
assert_conversion("#123@")
assert_conversion(")#123(", False)
assert_conversion("##123", False)
# test nested realm patterns should avoid double matching
RealmFilter(
realm=realm,
pattern=r"hello#(?P<id>[0-9]+)",
url_format_string=r"https://trac.example.com/hello/%(id)s",
).save()
converted_topic = topic_links(realm.id, "hello#123 #234")
self.assertEqual(
converted_topic,
[
{"url": "https://trac.example.com/hello/123", "text": "hello#123"},
{"url": "https://trac.example.com/ticket/234", "text": "#234"},
],
)
# test correct order when realm pattern and normal links are both present.
converted_topic = topic_links(realm.id, "#234 https://google.com")
self.assertEqual(
converted_topic,
[
{"url": "https://trac.example.com/ticket/234", "text": "#234"},
{"url": "https://google.com", "text": "https://google.com"},
],
)
def test_multiple_matching_realm_patterns(self) -> None:
realm = get_realm("zulip")
url_format_string = r"https://trac.example.com/ticket/%(id)s"
linkifier_1 = RealmFilter(
realm=realm,
pattern=r"(?P<id>ABC\-[0-9]+)(?![A-Z0-9-])",
url_format_string=url_format_string,
)
linkifier_1.save()
self.assertEqual(
linkifier_1.__str__(),
r"<RealmFilter(zulip): (?P<id>ABC\-[0-9]+)(?![A-Z0-9-])"
" https://trac.example.com/ticket/%(id)s>",
)
url_format_string = r"https://other-trac.example.com/ticket/%(id)s"
linkifier_2 = RealmFilter(
realm=realm,
pattern=r"(?P<id>[A-Z][A-Z0-9]*\-[0-9]+)(?![A-Z0-9-])",
url_format_string=url_format_string,
)
linkifier_2.save()
self.assertEqual(
linkifier_2.__str__(),
r"<RealmFilter(zulip): (?P<id>[A-Z][A-Z0-9]*\-[0-9]+)(?![A-Z0-9-])"
" https://other-trac.example.com/ticket/%(id)s>",
)
msg = Message(sender=self.example_user("othello"))
msg.set_topic_name("ABC-123")
flush_per_request_caches()
content = (
"We should fix ABC-123 or [trac ABC-123](https://trac.example.com/ticket/16) today."
)
converted = markdown_convert(content, message_realm=realm, message=msg)
converted_topic = topic_links(realm.id, msg.topic_name())
# The second linkifier (which was saved later) was ignored as the content was marked AtomicString after first conversion.
# There was no easy way to support parsing both linkifiers and not run into an infinite loop, hence the second linkifier is ignored.
self.assertEqual(
converted.rendered_content,
'<p>We should fix <a href="https://trac.example.com/ticket/ABC-123">ABC-123</a> or <a href="https://trac.example.com/ticket/16">trac ABC-123</a> today.</p>',
)
# Both the links should be generated in topics.
self.assertEqual(
converted_topic,
[
{"url": "https://trac.example.com/ticket/ABC-123", "text": "ABC-123"},
{"url": "https://other-trac.example.com/ticket/ABC-123", "text": "ABC-123"},
],
)
def test_flush_linkifier(self) -> None:
realm = get_realm("zulip")
def flush() -> None:
"""
flush_linkifiers is a post-save hook, so calling it
directly for testing is kind of awkward
"""
class Instance:
realm_id: Optional[int] = None
instance = Instance()
instance.realm_id = realm.id
flush_linkifiers(sender=RealmFilter, instance=cast(RealmFilter, instance))
def save_new_linkifier() -> None:
linkifier = RealmFilter(realm=realm, pattern=r"whatever", url_format_string="whatever")
linkifier.save()
# start fresh for our realm
flush()
self.assertFalse(realm_in_local_linkifiers_cache(realm.id))
# call this just for side effects of populating the cache
linkifiers_for_realm(realm.id)
self.assertTrue(realm_in_local_linkifiers_cache(realm.id))
# Saving a new RealmFilter should have the side effect of
# flushing the cache.
save_new_linkifier()
self.assertFalse(realm_in_local_linkifiers_cache(realm.id))
# and flush it one more time, to make sure we don't get a KeyError
flush()
self.assertFalse(realm_in_local_linkifiers_cache(realm.id))
def test_realm_patterns_negative(self) -> None:
realm = get_realm("zulip")
RealmFilter(
realm=realm,
pattern=r"#(?P<id>[0-9]{2,8})",
url_format_string=r"https://trac.example.com/ticket/%(id)s",
).save()
boring_msg = Message(sender=self.example_user("othello"))
boring_msg.set_topic_name("no match here")
converted_boring_topic = topic_links(realm.id, boring_msg.topic_name())
self.assertEqual(converted_boring_topic, [])
def test_is_status_message(self) -> None:
user_profile = self.example_user("othello")
msg = Message(sender=user_profile, sending_client=get_client("test"))
content = "/me makes a list\n* one\n* two"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
"<p>/me makes a list</p>\n<ul>\n<li>one</li>\n<li>two</li>\n</ul>",
)
self.assertTrue(Message.is_status_message(content, rendering_result.rendered_content))
content = "/me takes a walk"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
"<p>/me takes a walk</p>",
)
self.assertTrue(Message.is_status_message(content, rendering_result.rendered_content))
content = "/me writes a second line\nline"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
"<p>/me writes a second line<br>\nline</p>",
)
self.assertTrue(Message.is_status_message(content, rendering_result.rendered_content))
def test_alert_words(self) -> None:
user_profile = self.example_user("othello")
do_add_alert_words(user_profile, ["ALERTWORD", "scaryword"])
msg = Message(sender=user_profile, sending_client=get_client("test"))
realm_alert_words_automaton = get_alert_word_automaton(user_profile.realm)
def render(msg: Message, content: str) -> MessageRenderingResult:
return render_markdown(
msg, content, realm_alert_words_automaton=realm_alert_words_automaton
)
content = "We have an ALERTWORD day today!"
rendering_result = render(msg, content)
self.assertEqual(
rendering_result.rendered_content, "<p>We have an ALERTWORD day today!</p>"
)
self.assertEqual(rendering_result.user_ids_with_alert_words, {user_profile.id})
msg = Message(sender=user_profile, sending_client=get_client("test"))
content = "We have a NOTHINGWORD day today!"
rendering_result = render(msg, content)
self.assertEqual(
rendering_result.rendered_content, "<p>We have a NOTHINGWORD day today!</p>"
)
self.assertEqual(rendering_result.user_ids_with_alert_words, set())
def test_alert_words_returns_user_ids_with_alert_words(self) -> None:
alert_words_for_users: Dict[str, List[str]] = {
"hamlet": ["how"],
"cordelia": ["this possible"],
"iago": ["hello"],
"prospero": ["hello"],
"othello": ["how are you"],
"aaron": ["hey"],
}
user_profiles: Dict[str, UserProfile] = {}
for (username, alert_words) in alert_words_for_users.items():
user_profile = self.example_user(username)
user_profiles.update({username: user_profile})
do_add_alert_words(user_profile, alert_words)
sender_user_profile = self.example_user("polonius")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
realm_alert_words_automaton = get_alert_word_automaton(sender_user_profile.realm)
def render(msg: Message, content: str) -> MessageRenderingResult:
return render_markdown(
msg, content, realm_alert_words_automaton=realm_alert_words_automaton
)
content = "hello how is this possible how are you doing today"
rendering_result = render(msg, content)
expected_user_ids: Set[int] = {
user_profiles["hamlet"].id,
user_profiles["cordelia"].id,
user_profiles["iago"].id,
user_profiles["prospero"].id,
user_profiles["othello"].id,
}
# All users except aaron have their alert word appear in the message content
self.assertEqual(rendering_result.user_ids_with_alert_words, expected_user_ids)
def test_alert_words_returns_user_ids_with_alert_words_1(self) -> None:
alert_words_for_users: Dict[str, List[str]] = {
"hamlet": ["provisioning", "Prod deployment"],
"cordelia": ["test", "Prod"],
"iago": ["prod"],
"prospero": ["deployment"],
"othello": ["last"],
}
user_profiles: Dict[str, UserProfile] = {}
for (username, alert_words) in alert_words_for_users.items():
user_profile = self.example_user(username)
user_profiles.update({username: user_profile})
do_add_alert_words(user_profile, alert_words)
sender_user_profile = self.example_user("polonius")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
realm_alert_words_automaton = get_alert_word_automaton(sender_user_profile.realm)
def render(msg: Message, content: str) -> MessageRenderingResult:
return render_markdown(
msg, content, realm_alert_words_automaton=realm_alert_words_automaton
)
content = """Hello, everyone. Prod deployment has been completed
And this is a new line
to test out how Markdown convert this into something line ending split array
and this is a new line
last"""
rendering_result = render(msg, content)
expected_user_ids: Set[int] = {
user_profiles["hamlet"].id,
user_profiles["cordelia"].id,
user_profiles["iago"].id,
user_profiles["prospero"].id,
user_profiles["othello"].id,
}
# All users have their alert word appear in the message content
self.assertEqual(rendering_result.user_ids_with_alert_words, expected_user_ids)
def test_alert_words_returns_user_ids_with_alert_words_in_french(self) -> None:
alert_words_for_users: Dict[str, List[str]] = {
"hamlet": ["réglementaire", "une politique", "une merveille"],
"cordelia": ["énormément", "Prod"],
"iago": ["prod"],
"prospero": ["deployment"],
"othello": ["last"],
}
user_profiles: Dict[str, UserProfile] = {}
for (username, alert_words) in alert_words_for_users.items():
user_profile = self.example_user(username)
user_profiles.update({username: user_profile})
do_add_alert_words(user_profile, alert_words)
sender_user_profile = self.example_user("polonius")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
realm_alert_words_automaton = get_alert_word_automaton(sender_user_profile.realm)
def render(msg: Message, content: str) -> MessageRenderingResult:
return render_markdown(
msg, content, realm_alert_words_automaton=realm_alert_words_automaton
)
content = """This is to test out alert words work in languages with accented characters too
bonjour est (énormément) ce a quoi ressemble le français
et j'espère qu'il n'y n' réglementaire a pas de mots d'alerte dans ce texte français
"""
rendering_result = render(msg, content)
expected_user_ids: Set[int] = {user_profiles["hamlet"].id, user_profiles["cordelia"].id}
# Only hamlet and cordelia have their alert-words appear in the message content
self.assertEqual(rendering_result.user_ids_with_alert_words, expected_user_ids)
def test_alert_words_returns_empty_user_ids_with_alert_words(self) -> None:
alert_words_for_users: Dict[str, List[str]] = {
"hamlet": [],
"cordelia": [],
"iago": [],
"prospero": [],
"othello": [],
"aaron": [],
}
user_profiles: Dict[str, UserProfile] = {}
for (username, alert_words) in alert_words_for_users.items():
user_profile = self.example_user(username)
user_profiles.update({username: user_profile})
do_add_alert_words(user_profile, alert_words)
sender_user_profile = self.example_user("polonius")
msg = Message(sender=user_profile, sending_client=get_client("test"))
realm_alert_words_automaton = get_alert_word_automaton(sender_user_profile.realm)
def render(msg: Message, content: str) -> MessageRenderingResult:
return render_markdown(
msg, content, realm_alert_words_automaton=realm_alert_words_automaton
)
content = """hello how is this possible how are you doing today
This is to test that the no user_ids who have alrert wourldword is participating
in sending of the message
"""
rendering_result = render(msg, content)
expected_user_ids: Set[int] = set()
# None of the users have their alert-words appear in the message content
self.assertEqual(rendering_result.user_ids_with_alert_words, expected_user_ids)
def get_mock_alert_words(self, num_words: int, word_length: int) -> List[str]:
alert_words = ["x" * word_length] * num_words # type List[str]
return alert_words
def test_alert_words_with_empty_alert_words(self) -> None:
alert_words_for_users: Dict[str, List[str]] = {
"hamlet": [],
"cordelia": [],
"iago": [],
"othello": [],
}
user_profiles: Dict[str, UserProfile] = {}
for (username, alert_words) in alert_words_for_users.items():
user_profile = self.example_user(username)
user_profiles.update({username: user_profile})
do_add_alert_words(user_profile, alert_words)
sender_user_profile = self.example_user("polonius")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
realm_alert_words_automaton = get_alert_word_automaton(sender_user_profile.realm)
def render(msg: Message, content: str) -> MessageRenderingResult:
return render_markdown(
msg, content, realm_alert_words_automaton=realm_alert_words_automaton
)
content = """This is to test a empty alert words i.e. no user has any alert-words set"""
rendering_result = render(msg, content)
expected_user_ids: Set[int] = set()
self.assertEqual(rendering_result.user_ids_with_alert_words, expected_user_ids)
def test_alert_words_retuns_user_ids_with_alert_words_with_huge_alert_words(self) -> None:
alert_words_for_users: Dict[str, List[str]] = {
"hamlet": ["issue124"],
"cordelia": self.get_mock_alert_words(500, 10),
"iago": self.get_mock_alert_words(500, 10),
"othello": self.get_mock_alert_words(500, 10),
}
user_profiles: Dict[str, UserProfile] = {}
for (username, alert_words) in alert_words_for_users.items():
user_profile = self.example_user(username)
user_profiles.update({username: user_profile})
do_add_alert_words(user_profile, alert_words)
sender_user_profile = self.example_user("polonius")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
realm_alert_words_automaton = get_alert_word_automaton(sender_user_profile.realm)
def render(msg: Message, content: str) -> MessageRenderingResult:
return render_markdown(
msg, content, realm_alert_words_automaton=realm_alert_words_automaton
)
content = """The code above will print 10 random values of numbers between 1 and 100.
The second line, for x in range(10), determines how many values will be printed (when you use
range(x), the number that you use in place of x will be the amount of values that you'll have
printed. if you want 20 values, use range(20). use range(5) if you only want 5 values returned,
etc.). I was talking abou the issue124 on github. Then the third line: print random.randint(1,101) will automatically select a random integer
between 1 and 100 for you. The process is fairly simple
"""
rendering_result = render(msg, content)
expected_user_ids: Set[int] = {user_profiles["hamlet"].id}
# Only hamlet has alert-word 'issue124' present in the message content
self.assertEqual(rendering_result.user_ids_with_alert_words, expected_user_ids)
def test_default_code_block_language(self) -> None:
realm = get_realm("zulip")
self.assertEqual(realm.default_code_block_language, None)
text = "```{}\nconsole.log('Hello World');\n```\n"
# Render without default language
msg_with_js = markdown_convert_wrapper(text.format("js"))
msg_with_python = markdown_convert_wrapper(text.format("python"))
msg_without_language = markdown_convert_wrapper(text.format(""))
msg_with_quote = markdown_convert_wrapper(text.format("quote"))
msg_with_math = markdown_convert_wrapper(text.format("math"))
msg_with_none = markdown_convert_wrapper(text.format("none"))
# Render with default=javascript
do_set_realm_property(realm, "default_code_block_language", "javascript", acting_user=None)
msg_without_language_default_js = markdown_convert_wrapper(text.format(""))
msg_with_python_default_js = markdown_convert_wrapper(text.format("python"))
# Render with default=python
do_set_realm_property(realm, "default_code_block_language", "python", acting_user=None)
msg_without_language_default_py = markdown_convert_wrapper(text.format(""))
msg_with_none_default_py = markdown_convert_wrapper(text.format("none"))
# Render with default=quote
do_set_realm_property(realm, "default_code_block_language", "quote", acting_user=None)
msg_without_language_default_quote = markdown_convert_wrapper(text.format(""))
# Render with default=math
do_set_realm_property(realm, "default_code_block_language", "math", acting_user=None)
msg_without_language_default_math = markdown_convert_wrapper(text.format(""))
# Render without default language
do_set_realm_property(realm, "default_code_block_language", None, acting_user=None)
msg_without_language_final = markdown_convert_wrapper(text.format(""))
self.assertTrue(msg_with_js == msg_without_language_default_js)
self.assertTrue(
msg_with_python == msg_with_python_default_js == msg_without_language_default_py
)
self.assertTrue(msg_with_quote == msg_without_language_default_quote)
self.assertTrue(msg_with_math == msg_without_language_default_math)
self.assertTrue(msg_without_language == msg_without_language_final)
self.assertTrue(msg_with_none == msg_with_none_default_py)
# Test checking inside nested quotes
nested_text = "````quote\n\n{}\n\n{}````".format(text.format("js"), text.format(""))
do_set_realm_property(realm, "default_code_block_language", "javascript", acting_user=None)
rendered = markdown_convert_wrapper(nested_text)
with_language, without_language = re.findall(r"<pre>(.*?)$", rendered, re.MULTILINE)
self.assertTrue(with_language == without_language)
do_set_realm_property(realm, "default_code_block_language", None, acting_user=None)
rendered = markdown_convert_wrapper(nested_text)
with_language, without_language = re.findall(r"<pre>(.*?)$", rendered, re.MULTILINE)
self.assertFalse(with_language == without_language)
def test_mention_wildcard(self) -> None:
user_profile = self.example_user("othello")
msg = Message(sender=user_profile, sending_client=get_client("test"))
content = "@**all** test"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
'<p><span class="user-mention" data-user-id="*">' "@all" "</span> test</p>",
)
self.assertTrue(rendering_result.mentions_wildcard)
def test_mention_everyone(self) -> None:
user_profile = self.example_user("othello")
msg = Message(sender=user_profile, sending_client=get_client("test"))
content = "@**everyone** test"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
'<p><span class="user-mention" data-user-id="*">' "@everyone" "</span> test</p>",
)
self.assertTrue(rendering_result.mentions_wildcard)
def test_mention_stream(self) -> None:
user_profile = self.example_user("othello")
msg = Message(sender=user_profile, sending_client=get_client("test"))
content = "@**stream** test"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
'<p><span class="user-mention" data-user-id="*">' "@stream" "</span> test</p>",
)
self.assertTrue(rendering_result.mentions_wildcard)
def test_mention_at_wildcard(self) -> None:
user_profile = self.example_user("othello")
msg = Message(sender=user_profile, sending_client=get_client("test"))
content = "@all test"
rendering_result = render_markdown(msg, content)
self.assertEqual(rendering_result.rendered_content, "<p>@all test</p>")
self.assertFalse(rendering_result.mentions_wildcard)
self.assertEqual(rendering_result.mentions_user_ids, set())
def test_mention_at_everyone(self) -> None:
user_profile = self.example_user("othello")
msg = Message(sender=user_profile, sending_client=get_client("test"))
content = "@everyone test"
rendering_result = render_markdown(msg, content)
self.assertEqual(rendering_result.rendered_content, "<p>@everyone test</p>")
self.assertFalse(rendering_result.mentions_wildcard)
self.assertEqual(rendering_result.mentions_user_ids, set())
def test_mention_word_starting_with_at_wildcard(self) -> None:
user_profile = self.example_user("othello")
msg = Message(sender=user_profile, sending_client=get_client("test"))
content = "test @alleycat.com test"
rendering_result = render_markdown(msg, content)
self.assertEqual(rendering_result.rendered_content, "<p>test @alleycat.com test</p>")
self.assertFalse(rendering_result.mentions_wildcard)
self.assertEqual(rendering_result.mentions_user_ids, set())
def test_mention_at_normal_user(self) -> None:
user_profile = self.example_user("othello")
msg = Message(sender=user_profile, sending_client=get_client("test"))
content = "@aaron test"
rendering_result = render_markdown(msg, content)
self.assertEqual(rendering_result.rendered_content, "<p>@aaron test</p>")
self.assertFalse(rendering_result.mentions_wildcard)
self.assertEqual(rendering_result.mentions_user_ids, set())
def test_mention_single(self) -> None:
sender_user_profile = self.example_user("othello")
user_profile = self.example_user("hamlet")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
user_id = user_profile.id
content = "@**King Hamlet**"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
'<p><span class="user-mention" ' f'data-user-id="{user_id}">' "@King Hamlet</span></p>",
)
self.assertEqual(rendering_result.mentions_user_ids, {user_profile.id})
content = f"@**|{user_id}**"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
'<p><span class="user-mention" ' f'data-user-id="{user_id}">' "@King Hamlet</span></p>",
)
self.assertEqual(rendering_result.mentions_user_ids, {user_profile.id})
def test_mention_silent(self) -> None:
sender_user_profile = self.example_user("othello")
user_profile = self.example_user("hamlet")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
user_id = user_profile.id
content = "@_**King Hamlet**"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
'<p><span class="user-mention silent" '
f'data-user-id="{user_id}">'
"King Hamlet</span></p>",
)
self.assertEqual(rendering_result.mentions_user_ids, set())
def test_silent_wildcard_mention(self) -> None:
user_profile = self.example_user("othello")
msg = Message(sender=user_profile, sending_client=get_client("test"))
wildcards = ["all", "everyone", "stream"]
for wildcard in wildcards:
content = f"@_**{wildcard}**"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
f'<p><span class="user-mention silent" data-user-id="*">{wildcard}</span></p>',
)
self.assertFalse(rendering_result.mentions_wildcard)
def test_mention_invalid_followed_by_valid(self) -> None:
sender_user_profile = self.example_user("othello")
user_profile = self.example_user("hamlet")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
user_id = user_profile.id
content = "@**Invalid user** and @**King Hamlet**"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
'<p>@<strong>Invalid user</strong> and <span class="user-mention" '
f'data-user-id="{user_id}">'
"@King Hamlet</span></p>",
)
self.assertEqual(rendering_result.mentions_user_ids, {user_profile.id})
def test_invalid_mention_not_uses_valid_mention_data(self) -> None:
sender_user_profile = self.example_user("othello")
hamlet = self.example_user("hamlet")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
# Even though King Hamlet will be present in mention data as
# it was was fetched for first mention but second mention is
# incorrect(as it uses hamlet's id) so it should not be able
# to use that data for creating a valid mention.
content = f"@**King Hamlet|{hamlet.id}** and @**aaron|{hamlet.id}**"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
f'<p><span class="user-mention" data-user-id="{hamlet.id}">'
f"@King Hamlet</span> and @<strong>aaron|{hamlet.id}</strong></p>",
)
self.assertEqual(rendering_result.mentions_user_ids, {hamlet.id})
def test_silent_mention_invalid_followed_by_valid(self) -> None:
sender_user_profile = self.example_user("othello")
user_profile = self.example_user("hamlet")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
user_id = user_profile.id
content = "@_**Invalid user** and @_**King Hamlet**"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
'<p>@_<strong>Invalid user</strong> and <span class="user-mention silent" '
f'data-user-id="{user_id}">'
"King Hamlet</span></p>",
)
self.assertEqual(rendering_result.mentions_user_ids, set())
content = f"@_**|123456789** and @_**|{user_id}**"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
"<p>@_<strong>|123456789</strong> and "
'<span class="user-mention silent" '
f'data-user-id="{user_id}">'
"King Hamlet</span></p>",
)
self.assertEqual(rendering_result.mentions_user_ids, set())
def test_possible_mentions(self) -> None:
def assert_mentions(content: str, names: Set[str], has_wildcards: bool = False) -> None:
self.assertEqual(possible_mentions(content), (names, has_wildcards))
aaron = self.example_user("aaron")
assert_mentions("", set())
assert_mentions("boring", set())
assert_mentions("@**all**", set(), True)
assert_mentions("smush@**steve**smush", set())
assert_mentions(
f"Hello @**King Hamlet**, @**|{aaron.id}** and @**Cordelia, Lear's daughter**\n@**Foo van Barson|1234** @**all**",
{"King Hamlet", f"|{aaron.id}", "Cordelia, Lear's daughter", "Foo van Barson|1234"},
True,
)
def test_mention_multiple(self) -> None:
sender_user_profile = self.example_user("othello")
hamlet = self.example_user("hamlet")
cordelia = self.example_user("cordelia")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "@**King Hamlet** and @**Cordelia, Lear's daughter**, check this out"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
"<p>"
'<span class="user-mention" '
f'data-user-id="{hamlet.id}">@King Hamlet</span> and '
'<span class="user-mention" '
f'data-user-id="{cordelia.id}">@Cordelia, Lear\'s daughter</span>, '
"check this out</p>",
)
self.assertEqual(rendering_result.mentions_user_ids, {hamlet.id, cordelia.id})
def test_mention_in_quotes(self) -> None:
othello = self.example_user("othello")
hamlet = self.example_user("hamlet")
cordelia = self.example_user("cordelia")
msg = Message(sender=othello, sending_client=get_client("test"))
content = "> @**King Hamlet** and @**Othello, the Moor of Venice**\n\n @**King Hamlet** and @**Cordelia, Lear's daughter**"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
"<blockquote>\n<p>"
f'<span class="user-mention silent" data-user-id="{hamlet.id}">King Hamlet</span>'
" and "
f'<span class="user-mention silent" data-user-id="{othello.id}">Othello, the Moor of Venice</span>'
"</p>\n</blockquote>\n"
"<p>"
f'<span class="user-mention" data-user-id="{hamlet.id}">@King Hamlet</span>'
" and "
f'<span class="user-mention" data-user-id="{cordelia.id}">@Cordelia, Lear\'s daughter</span>'
"</p>",
)
self.assertEqual(rendering_result.mentions_user_ids, {hamlet.id, cordelia.id})
# Both fenced quote and > quote should be identical for both silent and regular syntax.
expected = (
"<blockquote>\n<p>"
f'<span class="user-mention silent" data-user-id="{hamlet.id}">King Hamlet</span>'
"</p>\n</blockquote>"
)
content = "```quote\n@**King Hamlet**\n```"
rendering_result = render_markdown(msg, content)
self.assertEqual(rendering_result.rendered_content, expected)
self.assertEqual(rendering_result.mentions_user_ids, set())
content = "> @**King Hamlet**"
rendering_result = render_markdown(msg, content)
self.assertEqual(rendering_result.rendered_content, expected)
self.assertEqual(rendering_result.mentions_user_ids, set())
content = "```quote\n@_**King Hamlet**\n```"
rendering_result = render_markdown(msg, content)
self.assertEqual(rendering_result.rendered_content, expected)
self.assertEqual(rendering_result.mentions_user_ids, set())
content = "> @_**King Hamlet**"
rendering_result = render_markdown(msg, content)
self.assertEqual(rendering_result.rendered_content, expected)
self.assertEqual(rendering_result.mentions_user_ids, set())
def test_wildcard_mention_in_quotes(self) -> None:
user_profile = self.example_user("othello")
message = Message(sender=user_profile, sending_client=get_client("test"))
def assert_silent_mention(content: str, wildcard: str) -> None:
expected = (
"<blockquote>\n<p>"
f'<span class="user-mention silent" data-user-id="*">{wildcard}</span>'
"</p>\n</blockquote>"
)
rendering_result = render_markdown(message, content)
self.assertEqual(rendering_result.rendered_content, expected)
self.assertFalse(rendering_result.mentions_wildcard)
wildcards = ["all", "everyone", "stream"]
for wildcard in wildcards:
assert_silent_mention(f"> @**{wildcard}**", wildcard)
assert_silent_mention(f"> @_**{wildcard}**", wildcard)
assert_silent_mention(f"```quote\n@**{wildcard}**\n```", wildcard)
assert_silent_mention(f"```quote\n@_**{wildcard}**\n```", wildcard)
def test_mention_duplicate_full_name(self) -> None:
realm = get_realm("zulip")
def make_user(email: str, full_name: str) -> UserProfile:
return create_user(
email=email,
password="whatever",
realm=realm,
full_name=full_name,
)
sender_user_profile = self.example_user("othello")
twin1 = make_user("twin1@example.com", "Mark Twin")
twin2 = make_user("twin2@example.com", "Mark Twin")
cordelia = self.example_user("cordelia")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = f"@**Mark Twin|{twin1.id}**, @**Mark Twin|{twin2.id}** and @**Cordelia, Lear's daughter**, hi."
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
"<p>"
'<span class="user-mention" '
f'data-user-id="{twin1.id}">@Mark Twin</span>, '
'<span class="user-mention" '
f'data-user-id="{twin2.id}">@Mark Twin</span> and '
'<span class="user-mention" '
f'data-user-id="{cordelia.id}">@Cordelia, Lear\'s daughter</span>, '
"hi.</p>",
)
self.assertEqual(rendering_result.mentions_user_ids, {twin1.id, twin2.id, cordelia.id})
def test_mention_invalid(self) -> None:
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "Hey @**Nonexistent User**"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content, "<p>Hey @<strong>Nonexistent User</strong></p>"
)
self.assertEqual(rendering_result.mentions_user_ids, set())
def test_user_mention_atomic_string(self) -> None:
sender_user_profile = self.example_user("othello")
realm = get_realm("zulip")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
# Create a linkifier.
url_format_string = r"https://trac.example.com/ticket/%(id)s"
linkifier = RealmFilter(
realm=realm, pattern=r"#(?P<id>[0-9]{2,8})", url_format_string=url_format_string
)
linkifier.save()
self.assertEqual(
linkifier.__str__(),
"<RealmFilter(zulip): #(?P<id>[0-9]{2,8}) https://trac.example.com/ticket/%(id)s>",
)
# Create a user that potentially interferes with the pattern.
test_user = create_user(
email="atomic@example.com",
password="whatever",
realm=realm,
full_name="Atomic #123",
)
content = "@**Atomic #123**"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
'<p><span class="user-mention" '
f'data-user-id="{test_user.id}">'
"@Atomic #123</span></p>",
)
self.assertEqual(rendering_result.mentions_user_ids, {test_user.id})
content = "@_**Atomic #123**"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
'<p><span class="user-mention silent" '
f'data-user-id="{test_user.id}">'
"Atomic #123</span></p>",
)
self.assertEqual(rendering_result.mentions_user_ids, set())
def create_user_group_for_test(self, user_group_name: str) -> UserGroup:
othello = self.example_user("othello")
return create_user_group(user_group_name, [othello], get_realm("zulip"))
def test_user_group_mention_single(self) -> None:
sender_user_profile = self.example_user("othello")
user_profile = self.example_user("hamlet")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
user_id = user_profile.id
user_group = self.create_user_group_for_test("support")
content = "@**King Hamlet** @*support*"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
'<p><span class="user-mention" '
f'data-user-id="{user_id}">'
"@King Hamlet</span> "
'<span class="user-group-mention" '
f'data-user-group-id="{user_group.id}">'
"@support</span></p>",
)
self.assertEqual(rendering_result.mentions_user_ids, {user_profile.id})
self.assertEqual(rendering_result.mentions_user_group_ids, {user_group.id})
def test_invalid_user_group_followed_by_valid_mention_single(self) -> None:
sender_user_profile = self.example_user("othello")
user_profile = self.example_user("hamlet")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
user_id = user_profile.id
user_group = self.create_user_group_for_test("support")
content = "@**King Hamlet** @*Invalid user group* @*support*"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
'<p><span class="user-mention" '
f'data-user-id="{user_id}">'
"@King Hamlet</span> "
"@<em>Invalid user group</em> "
'<span class="user-group-mention" '
f'data-user-group-id="{user_group.id}">'
"@support</span></p>",
)
self.assertEqual(rendering_result.mentions_user_ids, {user_profile.id})
self.assertEqual(rendering_result.mentions_user_group_ids, {user_group.id})
def test_user_group_mention_atomic_string(self) -> None:
sender_user_profile = self.example_user("othello")
realm = get_realm("zulip")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
user_profile = self.example_user("hamlet")
# Create a linkifier.
url_format_string = r"https://trac.example.com/ticket/%(id)s"
linkifier = RealmFilter(
realm=realm, pattern=r"#(?P<id>[0-9]{2,8})", url_format_string=url_format_string
)
linkifier.save()
self.assertEqual(
linkifier.__str__(),
"<RealmFilter(zulip): #(?P<id>[0-9]{2,8}) https://trac.example.com/ticket/%(id)s>",
)
# Create a user-group that potentially interferes with the pattern.
user_id = user_profile.id
user_group = self.create_user_group_for_test("support #123")
content = "@**King Hamlet** @*support #123*"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
'<p><span class="user-mention" '
f'data-user-id="{user_id}">'
"@King Hamlet</span> "
'<span class="user-group-mention" '
f'data-user-group-id="{user_group.id}">'
"@support #123</span></p>",
)
self.assertEqual(rendering_result.mentions_user_ids, {user_profile.id})
self.assertEqual(rendering_result.mentions_user_group_ids, {user_group.id})
def test_possible_user_group_mentions(self) -> None:
def assert_mentions(content: str, names: Set[str]) -> None:
self.assertEqual(possible_user_group_mentions(content), names)
assert_mentions("", set())
assert_mentions("boring", set())
assert_mentions("@**all**", set())
assert_mentions("smush@*steve*smush", set())
assert_mentions(
"@*support* Hello @**King Hamlet** and @**Cordelia, Lear's daughter**\n"
"@**Foo van Barson** @**all**",
{"support"},
)
assert_mentions(
"Attention @*support*, @*frontend* and @*backend*\ngroups.",
{"support", "frontend", "backend"},
)
def test_user_group_mention_multiple(self) -> None:
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
support = self.create_user_group_for_test("support")
backend = self.create_user_group_for_test("backend")
content = "@*support* and @*backend*, check this out"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
"<p>"
'<span class="user-group-mention" '
f'data-user-group-id="{support.id}">'
"@support</span> "
"and "
'<span class="user-group-mention" '
f'data-user-group-id="{backend.id}">'
"@backend</span>, "
"check this out"
"</p>",
)
self.assertEqual(rendering_result.mentions_user_group_ids, {support.id, backend.id})
def test_user_group_mention_edit(self) -> None:
sender_user_profile = self.example_user("hamlet")
user_profile = self.example_user("othello")
self.create_user_group_for_test("support")
self.login("hamlet")
msg_id = self.send_stream_message(
sender_user_profile, "Denmark", topic_name="editing", content="test"
)
def update_message_and_check_flag(content: str, mentioned: bool) -> None:
result = self.client_patch(
"/json/messages/" + str(msg_id),
{
"message_id": msg_id,
"content": content,
},
)
self.assert_json_success(result)
um = UserMessage.objects.get(
user_profile_id=user_profile.id,
message_id=msg_id,
)
if mentioned:
self.assertIn("mentioned", um.flags_list())
else:
self.assertNotIn("mentioned", um.flags_list())
update_message_and_check_flag("@*support*", True)
update_message_and_check_flag("@*support-invalid* edited", False)
update_message_and_check_flag("@*support* edited", True)
update_message_and_check_flag("edited", False)
update_message_and_check_flag("@*support*", True)
update_message_and_check_flag("@_*support*", False)
def test_user_group_mention_invalid(self) -> None:
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "Hey @*Nonexistent group*"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content, "<p>Hey @<em>Nonexistent group</em></p>"
)
self.assertEqual(rendering_result.mentions_user_group_ids, set())
def test_user_group_silent_mention(self) -> None:
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
support = self.create_user_group_for_test("support")
content = "We'll add you to @_*support* user group."
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
"<p>We'll add you to "
f'<span class="user-group-mention silent" data-user-group-id="{support.id}">support</span>'
" user group.</p>",
)
self.assertEqual(rendering_result.mentions_user_group_ids, set())
def test_user_group_mention_in_quotes(self) -> None:
user_profile = self.example_user("othello")
message = Message(sender=user_profile, sending_client=get_client("test"))
backend = self.create_user_group_for_test("backend")
def assert_silent_mention(content: str) -> None:
expected = (
"<blockquote>\n<p>"
f'<span class="user-group-mention silent" data-user-group-id="{backend.id}">backend</span>'
"</p>\n</blockquote>"
)
rendering_result = render_markdown(message, content)
self.assertEqual(rendering_result.rendered_content, expected)
self.assertEqual(rendering_result.mentions_user_group_ids, set())
assert_silent_mention("> @*backend*")
assert_silent_mention("> @_*backend*")
assert_silent_mention("```quote\n@*backend*\n```")
assert_silent_mention("```quote\n@_*backend*\n```")
def test_stream_single(self) -> None:
denmark = get_stream("Denmark", get_realm("zulip"))
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "#**Denmark**"
self.assertEqual(
render_markdown(msg, content).rendered_content,
'<p><a class="stream" data-stream-id="{d.id}" href="/#narrow/stream/{d.id}-Denmark">#{d.name}</a></p>'.format(
d=denmark,
),
)
def test_invalid_stream_followed_by_valid_mention(self) -> None:
denmark = get_stream("Denmark", get_realm("zulip"))
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "#**Invalid** and #**Denmark**"
self.assertEqual(
render_markdown(msg, content).rendered_content,
'<p>#<strong>Invalid</strong> and <a class="stream" data-stream-id="{d.id}" href="/#narrow/stream/{d.id}-Denmark">#{d.name}</a></p>'.format(
d=denmark,
),
)
def test_stream_multiple(self) -> None:
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
realm = get_realm("zulip")
denmark = get_stream("Denmark", realm)
scotland = get_stream("Scotland", realm)
content = "Look to #**Denmark** and #**Scotland**, there something"
self.assertEqual(
render_markdown(msg, content).rendered_content,
"<p>Look to "
'<a class="stream" '
'data-stream-id="{denmark.id}" '
'href="/#narrow/stream/{denmark.id}-Denmark">#{denmark.name}</a> and '
'<a class="stream" '
'data-stream-id="{scotland.id}" '
'href="/#narrow/stream/{scotland.id}-Scotland">#{scotland.name}</a>, '
"there something</p>".format(denmark=denmark, scotland=scotland),
)
def test_stream_case_sensitivity(self) -> None:
realm = get_realm("zulip")
case_sens = Stream.objects.create(name="CaseSens", realm=realm)
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "#**CaseSens**"
self.assertEqual(
render_markdown(msg, content).rendered_content,
'<p><a class="stream" data-stream-id="{s.id}" href="/#narrow/stream/{s.id}-{s.name}">#{s.name}</a></p>'.format(
s=case_sens,
),
)
def test_stream_case_sensitivity_nonmatching(self) -> None:
"""#StreamName requires the stream be spelled with the correct case
currently. If we change that in the future, we'll need to change this
test."""
realm = get_realm("zulip")
Stream.objects.create(name="CaseSens", realm=realm)
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "#**casesens**"
self.assertEqual(
render_markdown(msg, content).rendered_content, "<p>#<strong>casesens</strong></p>"
)
def test_topic_single(self) -> None:
denmark = get_stream("Denmark", get_realm("zulip"))
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "#**Denmark>some topic**"
self.assertEqual(
render_markdown(msg, content).rendered_content,
'<p><a class="stream-topic" data-stream-id="{d.id}" href="/#narrow/stream/{d.id}-Denmark/topic/some.20topic">#{d.name} > some topic</a></p>'.format(
d=denmark,
),
)
def test_topic_atomic_string(self) -> None:
realm = get_realm("zulip")
# Create a linkifier.
sender_user_profile = self.example_user("othello")
url_format_string = r"https://trac.example.com/ticket/%(id)s"
linkifier = RealmFilter(
realm=realm, pattern=r"#(?P<id>[0-9]{2,8})", url_format_string=url_format_string
)
linkifier.save()
self.assertEqual(
linkifier.__str__(),
"<RealmFilter(zulip): #(?P<id>[0-9]{2,8}) https://trac.example.com/ticket/%(id)s>",
)
# Create a topic link that potentially interferes with the pattern.
denmark = get_stream("Denmark", realm)
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "#**Denmark>#1234**"
self.assertEqual(
render_markdown(msg, content).rendered_content,
'<p><a class="stream-topic" data-stream-id="{d.id}" href="/#narrow/stream/{d.id}-Denmark/topic/.231234">#{d.name} > #1234</a></p>'.format(
d=denmark,
),
)
def test_topic_multiple(self) -> None:
denmark = get_stream("Denmark", get_realm("zulip"))
scotland = get_stream("Scotland", get_realm("zulip"))
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "This has two links: #**Denmark>some topic** and #**Scotland>other topic**."
self.assertEqual(
render_markdown(msg, content).rendered_content,
"<p>This has two links: "
'<a class="stream-topic" data-stream-id="{denmark.id}" '
'href="/#narrow/stream/{denmark.id}-{denmark.name}/topic/some.20topic">'
"#{denmark.name} > some topic</a>"
" and "
'<a class="stream-topic" data-stream-id="{scotland.id}" '
'href="/#narrow/stream/{scotland.id}-{scotland.name}/topic/other.20topic">'
"#{scotland.name} > other topic</a>"
".</p>".format(denmark=denmark, scotland=scotland),
)
def test_possible_stream_names(self) -> None:
content = """#**test here**
This mentions #**Denmark** too.
#**garçon** #**천국** @**Ignore Person**
"""
self.assertEqual(
possible_linked_stream_names(content),
{"test here", "Denmark", "garçon", "천국"},
)
def test_stream_unicode(self) -> None:
realm = get_realm("zulip")
uni = Stream.objects.create(name="привет", realm=realm)
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "#**привет**"
quoted_name = ".D0.BF.D1.80.D0.B8.D0.B2.D0.B5.D1.82"
href = f"/#narrow/stream/{uni.id}-{quoted_name}"
self.assertEqual(
render_markdown(msg, content).rendered_content,
'<p><a class="stream" data-stream-id="{s.id}" href="{href}">#{s.name}</a></p>'.format(
s=uni,
href=href,
),
)
def test_stream_atomic_string(self) -> None:
realm = get_realm("zulip")
# Create a linkifier.
sender_user_profile = self.example_user("othello")
url_format_string = r"https://trac.example.com/ticket/%(id)s"
linkifier = RealmFilter(
realm=realm, pattern=r"#(?P<id>[0-9]{2,8})", url_format_string=url_format_string
)
linkifier.save()
self.assertEqual(
linkifier.__str__(),
"<RealmFilter(zulip): #(?P<id>[0-9]{2,8}) https://trac.example.com/ticket/%(id)s>",
)
# Create a stream that potentially interferes with the pattern.
stream = Stream.objects.create(name="Stream #1234", realm=realm)
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "#**Stream #1234**"
href = f"/#narrow/stream/{stream.id}-Stream-.231234"
self.assertEqual(
render_markdown(msg, content).rendered_content,
'<p><a class="stream" data-stream-id="{s.id}" href="{href}">#{s.name}</a></p>'.format(
s=stream,
href=href,
),
)
def test_stream_invalid(self) -> None:
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "There #**Nonexistentstream**"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content, "<p>There #<strong>Nonexistentstream</strong></p>"
)
self.assertEqual(rendering_result.mentions_user_ids, set())
def test_image_preview_title(self) -> None:
msg = "[My favorite image](https://example.com/testimage.png)"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
"<p>"
'<a href="https://example.com/testimage.png">My favorite image</a>'
"</p>\n"
'<div class="message_inline_image">'
'<a href="https://example.com/testimage.png" title="My favorite image">'
'<img data-src-fullsize="/thumbnail?url=https%3A%2F%2Fexample.com%2Ftestimage.png&size=full" src="/thumbnail?url=https%3A%2F%2Fexample.com%2Ftestimage.png&size=thumbnail">'
"</a>"
"</div>",
)
def test_mit_rendering(self) -> None:
"""Test the Markdown configs for the MIT Zephyr mirroring system;
verifies almost all inline patterns are disabled, but
inline_interesting_links is still enabled"""
msg = "**test**"
realm = get_realm("zephyr")
client = get_client("zephyr_mirror")
message = Message(sending_client=client, sender=self.mit_user("sipbtest"))
converted = markdown_convert(msg, message_realm=realm, message=message)
self.assertEqual(
converted.rendered_content,
"<p>**test**</p>",
)
msg = "* test"
converted = markdown_convert(msg, message_realm=realm, message=message)
self.assertEqual(
converted.rendered_content,
"<p>* test</p>",
)
msg = "https://lists.debian.org/debian-ctte/2014/02/msg00173.html"
converted = markdown_convert(msg, message_realm=realm, message=message)
self.assertEqual(
converted.rendered_content,
'<p><a href="https://lists.debian.org/debian-ctte/2014/02/msg00173.html">https://lists.debian.org/debian-ctte/2014/02/msg00173.html</a></p>',
)
def test_url_to_a(self) -> None:
url = "javascript://example.com/invalidURL"
converted = url_to_a(db_data=None, url=url, text=url)
self.assertEqual(
converted,
"javascript://example.com/invalidURL",
)
def test_disabled_code_block_processor(self) -> None:
msg = (
"Hello,\n\n"
+ " I am writing this message to test something. I am writing this message to test something."
)
converted = markdown_convert_wrapper(msg)
expected_output = (
"<p>Hello,</p>\n"
+ '<div class="codehilite"><pre><span></span><code>I am writing this message to test something. I am writing this message to test something.\n'
+ "</code></pre></div>"
)
self.assertEqual(converted, expected_output)
realm = do_create_realm(
string_id="code_block_processor_test", name="code_block_processor_test"
)
maybe_update_markdown_engines(realm.id, True)
rendering_result = markdown_convert(msg, message_realm=realm, email_gateway=True)
expected_output = (
"<p>Hello,</p>\n"
+ "<p>I am writing this message to test something. I am writing this message to test something.</p>"
)
self.assertEqual(rendering_result.rendered_content, expected_output)
def test_normal_link(self) -> None:
realm = get_realm("zulip")
sender_user_profile = self.example_user("othello")
message = Message(sender=sender_user_profile, sending_client=get_client("test"))
msg = "http://example.com/#settings/"
self.assertEqual(
markdown_convert(msg, message_realm=realm, message=message).rendered_content,
'<p><a href="http://example.com/#settings/">http://example.com/#settings/</a></p>',
)
def test_relative_link(self) -> None:
realm = get_realm("zulip")
sender_user_profile = self.example_user("othello")
message = Message(sender=sender_user_profile, sending_client=get_client("test"))
msg = "http://zulip.testserver/#narrow/stream/999-hello"
self.assertEqual(
markdown_convert(msg, message_realm=realm, message=message).rendered_content,
'<p><a href="#narrow/stream/999-hello">http://zulip.testserver/#narrow/stream/999-hello</a></p>',
)
def test_relative_link_streams_page(self) -> None:
realm = get_realm("zulip")
sender_user_profile = self.example_user("othello")
message = Message(sender=sender_user_profile, sending_client=get_client("test"))
msg = "http://zulip.testserver/#streams/all"
self.assertEqual(
markdown_convert(msg, message_realm=realm, message=message).rendered_content,
'<p><a href="#streams/all">http://zulip.testserver/#streams/all</a></p>',
)
def test_md_relative_link(self) -> None:
realm = get_realm("zulip")
sender_user_profile = self.example_user("othello")
message = Message(sender=sender_user_profile, sending_client=get_client("test"))
msg = "[hello](http://zulip.testserver/#narrow/stream/999-hello)"
self.assertEqual(
markdown_convert(msg, message_realm=realm, message=message).rendered_content,
'<p><a href="#narrow/stream/999-hello">hello</a></p>',
)
def test_html_entity_conversion(self) -> None:
msg = """\
Test raw: Hello, ©
Test inline code: `©`
Test fenced code:
```
©
©
```
Test quote:
~~~quote
©
~~~
Test a list:
* ©
* `©`
* ```©```
Test an indented block:
©"""
expected_output = """\
<p>Test raw: Hello, ©<br>
Test inline code: <code>&copy;</code></p>
<p>Test fenced code:</p>
<div class="codehilite"><pre><span></span><code>&copy;
&copy;
</code></pre></div>
<p>Test quote:</p>
<blockquote>
<p>©</p>
</blockquote>
<p>Test a list:</p>
<ul>
<li>©</li>
<li><code>&copy;</code></li>
<li><code>&copy;</code></li>
</ul>
<p>Test an indented block:</p>
<div class="codehilite"><pre><span></span><code>&copy;
</code></pre></div>"""
converted = markdown_convert_wrapper(dedent(msg))
self.assertEqual(converted, dedent(expected_output))
class MarkdownApiTests(ZulipTestCase):
def test_render_message_api(self) -> None:
content = "That is a **bold** statement"
result = self.api_post(
self.example_user("othello"),
"/api/v1/messages/render",
dict(content=content),
)
self.assert_json_success(result)
self.assertEqual(
result.json()["rendered"], "<p>That is a <strong>bold</strong> statement</p>"
)
def test_render_mention_stream_api(self) -> None:
"""Determines whether we're correctly passing the realm context"""
content = "This mentions #**Denmark** and @**King Hamlet**."
result = self.api_post(
self.example_user("othello"),
"/api/v1/messages/render",
dict(content=content),
)
self.assert_json_success(result)
user_id = self.example_user("hamlet").id
stream_id = get_stream("Denmark", get_realm("zulip")).id
self.assertEqual(
result.json()["rendered"],
f'<p>This mentions <a class="stream" data-stream-id="{stream_id}" href="/#narrow/stream/{stream_id}-Denmark">#Denmark</a> and <span class="user-mention" data-user-id="{user_id}">@King Hamlet</span>.</p>',
)
class MarkdownErrorTests(ZulipTestCase):
def test_markdown_error_handling(self) -> None:
with self.simulated_markdown_failure():
with self.assertRaises(MarkdownRenderingException):
markdown_convert_wrapper("")
def test_send_message_errors(self) -> None:
message = "whatever"
with self.simulated_markdown_failure():
# We don't use assertRaisesRegex because it seems to not
# handle i18n properly here on some systems.
with self.assertRaises(JsonableError):
self.send_stream_message(self.example_user("othello"), "Denmark", message)
@override_settings(MAX_MESSAGE_LENGTH=10)
def test_ultra_long_rendering(self) -> None:
"""A rendered message with an ultra-long length (> 100 * MAX_MESSAGE_LENGTH)
throws an exception"""
msg = "mock rendered message\n" * 10 * settings.MAX_MESSAGE_LENGTH
with mock.patch("zerver.lib.markdown.timeout", return_value=msg), mock.patch(
"zerver.lib.markdown.markdown_logger"
):
with self.assertRaises(MarkdownRenderingException):
markdown_convert_wrapper(msg)
def test_curl_code_block_validation(self) -> None:
processor = SimulatedFencedBlockPreprocessor(Markdown())
processor.run_content_validators = True
markdown_input = [
"``` curl",
"curl {{ api_url }}/v1/register",
" -u BOT_EMAIL_ADDRESS:BOT_API_KEY",
' -d "queue_id=1375801870:2942"',
"```",
]
with self.assertRaises(MarkdownRenderingException):
processor.run(markdown_input)
def test_curl_code_block_without_validation(self) -> None:
processor = SimulatedFencedBlockPreprocessor(Markdown())
markdown_input = [
"``` curl",
"curl {{ api_url }}/v1/register",
" -u BOT_EMAIL_ADDRESS:BOT_API_KEY",
' -d "queue_id=1375801870:2942"',
"```",
]
expected = [
"",
"**curl:curl {{ api_url }}/v1/register",
" -u BOT_EMAIL_ADDRESS:BOT_API_KEY",
' -d "queue_id=1375801870:2942"**',
"",
"",
]
result = processor.run(markdown_input)
self.assertEqual(result, expected)
| 46.693605 | 1,845 | 0.630506 | import copy
import os
import re
from textwrap import dedent
from typing import Any, Dict, List, Optional, Set, Tuple, cast
from unittest import mock
import orjson
from django.conf import settings
from django.test import override_settings
from markdown import Markdown
from zerver.lib.actions import (
change_user_is_active,
do_add_alert_words,
do_change_user_setting,
do_create_realm,
do_remove_realm_emoji,
do_set_realm_property,
)
from zerver.lib.alert_words import get_alert_word_automaton
from zerver.lib.camo import get_camo_url
from zerver.lib.create_user import create_user
from zerver.lib.emoji import get_emoji_url
from zerver.lib.exceptions import JsonableError, MarkdownRenderingException
from zerver.lib.markdown import (
MarkdownListPreprocessor,
MessageRenderingResult,
clear_state_for_testing,
content_has_emoji_syntax,
fetch_tweet_data,
get_tweet_id,
image_preview_enabled,
markdown_convert,
maybe_update_markdown_engines,
possible_linked_stream_names,
topic_links,
url_embed_preview_enabled,
url_to_a,
)
from zerver.lib.markdown.fenced_code import FencedBlockPreprocessor
from zerver.lib.mdiff import diff_strings
from zerver.lib.mention import (
MentionData,
get_possible_mentions_info,
possible_mentions,
possible_user_group_mentions,
)
from zerver.lib.message import render_markdown
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.tex import render_tex
from zerver.lib.user_groups import create_user_group
from zerver.models import (
Message,
RealmEmoji,
RealmFilter,
Stream,
UserGroup,
UserMessage,
UserProfile,
flush_linkifiers,
flush_per_request_caches,
get_client,
get_realm,
get_stream,
linkifiers_for_realm,
realm_in_local_linkifiers_cache,
)
class SimulatedFencedBlockPreprocessor(FencedBlockPreprocessor):
def format_code(self, lang: Optional[str], code: str) -> str:
return (lang or "") + ":" + code
def placeholder(self, s: str) -> str:
return "**" + s.strip("\n") + "**"
class FencedBlockPreprocessorTest(ZulipTestCase):
def test_simple_quoting(self) -> None:
processor = FencedBlockPreprocessor(Markdown())
markdown_input = [
"~~~ quote",
"hi",
"bye",
"",
"",
]
expected = [
"",
"> hi",
"> bye",
"> ",
"> ",
"",
"",
]
lines = processor.run(markdown_input)
self.assertEqual(lines, expected)
def test_serial_quoting(self) -> None:
processor = FencedBlockPreprocessor(Markdown())
markdown_input = [
"~~~ quote",
"hi",
"~~~",
"",
"~~~ quote",
"bye",
"",
"",
]
expected = [
"",
"> hi",
"",
"",
"",
"> bye",
"> ",
"> ",
"",
"",
]
lines = processor.run(markdown_input)
self.assertEqual(lines, expected)
def test_serial_code(self) -> None:
processor = SimulatedFencedBlockPreprocessor(Markdown())
markdown_input = [
"``` .py",
"hello()",
"```",
"",
"```vb.net",
"goodbye()",
"```",
"",
"```c#",
"weirdchar()",
"```",
"",
"```",
"no-highlight()",
"```",
"",
]
expected = [
"",
"**py:hello()**",
"",
"",
"",
"**vb.net:goodbye()**",
"",
"",
"",
"**c#:weirdchar()**",
"",
"",
"",
"**:no-highlight()**",
"",
"",
]
lines = processor.run(markdown_input)
self.assertEqual(lines, expected)
def test_nested_code(self) -> None:
processor = SimulatedFencedBlockPreprocessor(Markdown())
markdown_input = [
"~~~ quote",
"hi",
"``` .py",
"hello()",
"```",
"",
"",
]
expected = [
"",
"> hi",
"> ",
"> **py:hello()**",
"> ",
"> ",
"> ",
"",
"",
]
lines = processor.run(markdown_input)
self.assertEqual(lines, expected)
def markdown_convert_wrapper(content: str) -> str:
return markdown_convert(
content=content,
message_realm=get_realm("zulip"),
).rendered_content
class MarkdownMiscTest(ZulipTestCase):
def test_diffs_work_as_expected(self) -> None:
str1 = "<p>The quick brown fox jumps over the lazy dog. Animal stories are fun, yeah</p>"
str2 = "<p>The fast fox jumps over the lazy dogs and cats. Animal stories are fun</p>"
expected_diff = "\u001b[34m-\u001b[0m <p>The \u001b[33mquick brown\u001b[0m fox jumps over the lazy dog. Animal stories are fun\u001b[31m, yeah\u001b[0m</p>\n\u001b[34m+\u001b[0m <p>The \u001b[33mfast\u001b[0m fox jumps over the lazy dog\u001b[32ms and cats\u001b[0m. Animal stories are fun</p>\n"
self.assertEqual(diff_strings(str1, str2), expected_diff)
def test_get_possible_mentions_info(self) -> None:
realm = get_realm("zulip")
def make_user(email: str, full_name: str) -> UserProfile:
return create_user(
email=email,
password="whatever",
realm=realm,
full_name=full_name,
)
fred1 = make_user("fred1@example.com", "Fred Flintstone")
change_user_is_active(fred1, False)
fred2 = make_user("fred2@example.com", "Fred Flintstone")
fred3 = make_user("fred3@example.com", "Fred Flintstone")
change_user_is_active(fred3, False)
fred4 = make_user("fred4@example.com", "Fred Flintstone")
lst = get_possible_mentions_info(
realm.id, {"Fred Flintstone", "Cordelia, LEAR's daughter", "Not A User"}
)
set_of_names = set(map(lambda x: x["full_name"].lower(), lst))
self.assertEqual(set_of_names, {"fred flintstone", "cordelia, lear's daughter"})
by_id = {row["id"]: row for row in lst}
self.assertEqual(
by_id.get(fred2.id),
dict(
email=fred2.email,
full_name="Fred Flintstone",
id=fred2.id,
),
)
self.assertEqual(
by_id.get(fred4.id),
dict(
email=fred4.email,
full_name="Fred Flintstone",
id=fred4.id,
),
)
def test_mention_data(self) -> None:
realm = get_realm("zulip")
hamlet = self.example_user("hamlet")
cordelia = self.example_user("cordelia")
content = "@**King Hamlet** @**Cordelia, lear's daughter**"
mention_data = MentionData(realm.id, content)
self.assertEqual(mention_data.get_user_ids(), {hamlet.id, cordelia.id})
self.assertEqual(
mention_data.get_user_by_id(hamlet.id),
dict(
email=hamlet.email,
full_name=hamlet.full_name,
id=hamlet.id,
),
)
user = mention_data.get_user_by_name("king hamLET")
assert user is not None
self.assertEqual(user["email"], hamlet.email)
self.assertFalse(mention_data.message_has_wildcards())
content = "@**King Hamlet** @**Cordelia, lear's daughter** @**all**"
mention_data = MentionData(realm.id, content)
self.assertTrue(mention_data.message_has_wildcards())
def test_invalid_katex_path(self) -> None:
with self.settings(DEPLOY_ROOT="/nonexistent"):
with self.assertLogs(level="ERROR") as m:
render_tex("random text")
self.assertEqual(m.output, ["ERROR:root:Cannot find KaTeX for latex rendering!"])
class MarkdownListPreprocessorTest(ZulipTestCase):
def split_message(self, msg: str) -> Tuple[List[str], List[str]]:
original = msg.replace("<>", "").split("\n")
expected = re.split(r"\n|<>", msg)
return original, expected
def test_basic_list(self) -> None:
preprocessor = MarkdownListPreprocessor()
original, expected = self.split_message("List without a gap\n<>* One\n* Two")
self.assertEqual(preprocessor.run(original), expected)
def test_list_after_quotes(self) -> None:
preprocessor = MarkdownListPreprocessor()
original, expected = self.split_message(
"```quote\nSomething\n```\n\nList without a gap\n<>* One\n* Two"
)
self.assertEqual(preprocessor.run(original), expected)
def test_list_in_code(self) -> None:
preprocessor = MarkdownListPreprocessor()
original, expected = self.split_message("```\nList without a gap\n* One\n* Two\n```")
self.assertEqual(preprocessor.run(original), expected)
def test_complex_nesting_with_different_fences(self) -> None:
preprocessor = MarkdownListPreprocessor()
msg = """```quote
In quote. We should convert a list here:<>
* one
* two
~~~
This is a nested code fence, do not make changes here:
* one
* two
````quote
Quote in code fence. Should not convert:
* one
* two
````
~~~
Back in the quote. We should convert:<>
* one
* two
```
Outside. Should convert:<>
* one
* two
"""
original, expected = self.split_message(msg)
self.assertEqual(preprocessor.run(original), expected)
def test_complex_nesting_with_same_fence(self) -> None:
preprocessor = MarkdownListPreprocessor()
msg = """```quote
In quote. We should convert a list here:<>
* one
* two
```python
This is a nested code fence, do not make changes here:
* one
* two
```quote
Quote in code fence. Should not convert:
* one
* two
```
```
Back in the quote. We should convert:<>
* one
* two
```
Outside. Should convert:<>
* one
* two
"""
original, expected = self.split_message(msg)
self.assertEqual(preprocessor.run(original), expected)
class MarkdownTest(ZulipTestCase):
def setUp(self) -> None:
super().setUp()
clear_state_for_testing()
def assertEqual(self, first: Any, second: Any, msg: str = "") -> None:
if isinstance(first, str) and isinstance(second, str):
if first != second:
raise AssertionError(
"Actual and expected outputs do not match; showing diff.\n"
+ diff_strings(first, second)
+ msg
)
else:
super().assertEqual(first, second)
def load_markdown_tests(self) -> Tuple[Dict[str, Any], List[List[str]]]:
test_fixtures = {}
with open(
os.path.join(os.path.dirname(__file__), "fixtures/markdown_test_cases.json"), "rb"
) as f:
data = orjson.loads(f.read())
for test in data["regular_tests"]:
test_fixtures[test["name"]] = test
return test_fixtures, data["linkify_tests"]
def test_markdown_no_ignores(self) -> None:
format_tests, linkify_tests = self.load_markdown_tests()
for name, test in format_tests.items():
message = f'Test "{name}" shouldn\'t be ignored.'
is_ignored = test.get("ignore", False)
self.assertFalse(is_ignored, message)
def test_markdown_fixtures(self) -> None:
format_tests, linkify_tests = self.load_markdown_tests()
valid_keys = {
"name",
"input",
"expected_output",
"backend_only_rendering",
"marked_expected_output",
"text_content",
"translate_emoticons",
"ignore",
}
for name, test in format_tests.items():
with self.subTest(markdown_test_case=name):
# Check that there aren't any unexpected keys as those are often typos
self.assert_length(set(test.keys()) - valid_keys, 0)
if test.get("ignore", False):
continue
if test.get("translate_emoticons", False):
user_profile = self.example_user("othello")
do_change_user_setting(user_profile, "translate_emoticons", True)
msg = Message(sender=user_profile, sending_client=get_client("test"))
rendering_result = render_markdown(msg, test["input"])
converted = rendering_result.rendered_content
else:
converted = markdown_convert_wrapper(test["input"])
self.assertEqual(converted, test["expected_output"])
def replaced(payload: str, url: str, phrase: str = "") -> str:
if url[:4] == "http":
href = url
elif "@" in url:
href = "mailto:" + url
else:
href = "http://" + url
return payload % (f'<a href="{href}">{url}</a>',)
with mock.patch(
"zerver.lib.url_preview.preview.link_embed_data_from_cache", return_value=None
):
for inline_url, reference, url in linkify_tests:
try:
match = replaced(reference, url, phrase=inline_url)
except TypeError:
match = reference
converted = markdown_convert_wrapper(inline_url)
self.assertEqual(match, converted)
def test_inline_file(self) -> None:
msg = "Check out this file file:///Volumes/myserver/Users/Shared/pi.py"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
'<p>Check out this file <a href="file:///Volumes/myserver/Users/Shared/pi.py">file:///Volumes/myserver/Users/Shared/pi.py</a></p>',
)
clear_state_for_testing()
with self.settings(ENABLE_FILE_LINKS=False):
realm = do_create_realm(string_id="file_links_test", name="file_links_test")
maybe_update_markdown_engines(realm.id, False)
self.assertEqual(
markdown_convert(msg, message_realm=realm).rendered_content,
"<p>Check out this file file:///Volumes/myserver/Users/Shared/pi.py</p>",
)
def test_inline_bitcoin(self) -> None:
msg = "To bitcoin:1A1zP1eP5QGefi2DMPTfTL5SLmv7DivfNa or not to bitcoin"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
'<p>To <a href="bitcoin:1A1zP1eP5QGefi2DMPTfTL5SLmv7DivfNa">bitcoin:1A1zP1eP5QGefi2DMPTfTL5SLmv7DivfNa</a> or not to bitcoin</p>',
)
def test_inline_youtube(self) -> None:
msg = "Check out the debate: http://www.youtube.com/watch?v=hx1mjT73xYE"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
f"""<p>Check out the debate: <a href="http://www.youtube.com/watch?v=hx1mjT73xYE">http://www.youtube.com/watch?v=hx1mjT73xYE</a></p>\n<div class="youtube-video message_inline_image"><a data-id="hx1mjT73xYE" href="http://www.youtube.com/watch?v=hx1mjT73xYE"><img src="{get_camo_url("https://i.ytimg.com/vi/hx1mjT73xYE/default.jpg")}"></a></div>""",
)
msg = "http://www.youtube.com/watch?v=hx1mjT73xYE"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
f"""<p><a href="http://www.youtube.com/watch?v=hx1mjT73xYE">http://www.youtube.com/watch?v=hx1mjT73xYE</a></p>\n<div class="youtube-video message_inline_image"><a data-id="hx1mjT73xYE" href="http://www.youtube.com/watch?v=hx1mjT73xYE"><img src="{get_camo_url("https://i.ytimg.com/vi/hx1mjT73xYE/default.jpg")}"></a></div>""",
)
msg = "https://youtu.be/hx1mjT73xYE"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
f"""<p><a href="https://youtu.be/hx1mjT73xYE">https://youtu.be/hx1mjT73xYE</a></p>\n<div class="youtube-video message_inline_image"><a data-id="hx1mjT73xYE" href="https://youtu.be/hx1mjT73xYE"><img src="{get_camo_url("https://i.ytimg.com/vi/hx1mjT73xYE/default.jpg")}"></a></div>""",
)
msg = "https://www.youtube.com/playlist?list=PL8dPuuaLjXtNlUrzyH5r6jN9ulIgZBpdo"
not_converted = markdown_convert_wrapper(msg)
self.assertEqual(
not_converted,
'<p><a href="https://www.youtube.com/playlist?list=PL8dPuuaLjXtNlUrzyH5r6jN9ulIgZBpdo">https://www.youtube.com/playlist?list=PL8dPuuaLjXtNlUrzyH5r6jN9ulIgZBpdo</a></p>',
)
msg = (
"https://www.youtube.com/playlist?v=O5nskjZ_GoI&list=PL8dPuuaLjXtNlUrzyH5r6jN9ulIgZBpdo"
)
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
f"""<p><a href="https://www.youtube.com/playlist?v=O5nskjZ_GoI&list=PL8dPuuaLjXtNlUrzyH5r6jN9ulIgZBpdo">https://www.youtube.com/playlist?v=O5nskjZ_GoI&list=PL8dPuuaLjXtNlUrzyH5r6jN9ulIgZBpdo</a></p>\n<div class="youtube-video message_inline_image"><a data-id="O5nskjZ_GoI" href="https://www.youtube.com/playlist?v=O5nskjZ_GoI&list=PL8dPuuaLjXtNlUrzyH5r6jN9ulIgZBpdo"><img src="{get_camo_url("https://i.ytimg.com/vi/O5nskjZ_GoI/default.jpg")}"></a></div>""",
)
msg = "http://www.youtube.com/watch_videos?video_ids=nOJgD4fcZhI,i96UO8-GFvw"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
f"""<p><a href="http://www.youtube.com/watch_videos?video_ids=nOJgD4fcZhI,i96UO8-GFvw">http://www.youtube.com/watch_videos?video_ids=nOJgD4fcZhI,i96UO8-GFvw</a></p>\n<div class="youtube-video message_inline_image"><a data-id="nOJgD4fcZhI" href="http://www.youtube.com/watch_videos?video_ids=nOJgD4fcZhI,i96UO8-GFvw"><img src="{get_camo_url("https://i.ytimg.com/vi/nOJgD4fcZhI/default.jpg")}"></a></div>""",
)
@override_settings(INLINE_URL_EMBED_PREVIEW=False)
def test_inline_vimeo(self) -> None:
msg = "Check out the debate: https://vimeo.com/246979354"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
'<p>Check out the debate: <a href="https://vimeo.com/246979354">https://vimeo.com/246979354</a></p>',
)
msg = "https://vimeo.com/246979354"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
'<p><a href="https://vimeo.com/246979354">https://vimeo.com/246979354</a></p>',
)
@override_settings(INLINE_IMAGE_PREVIEW=True)
def test_inline_image_thumbnail_url(self) -> None:
realm = get_realm("zephyr")
msg = "[foobar](/user_uploads/{realm_id}/50/w2G6ok9kr8AMCQCTNAUOFMln/IMG_0677.JPG)"
msg = msg.format(realm_id=realm.id)
thumbnail_img = '<img data-src-fullsize="/thumbnail?url=user_uploads%2F{realm_id}%2F50%2Fw2G6ok9kr8AMCQCTNAUOFMln%2FIMG_0677.JPG&size=full" src="/thumbnail?url=user_uploads%2F{realm_id}%2F50%2Fw2G6ok9kr8AMCQCTNAUOFMln%2FIMG_0677.JPG&size=thumbnail"><'
thumbnail_img = thumbnail_img.format(realm_id=realm.id)
converted = markdown_convert_wrapper(msg)
self.assertIn(thumbnail_img, converted)
msg = "https://www.google.com/images/srpr/logo4w.png"
thumbnail_img = '<img data-src-fullsize="/thumbnail?url=https%3A%2F%2Fwww.google.com%2Fimages%2Fsrpr%2Flogo4w.png&size=full" src="/thumbnail?url=https%3A%2F%2Fwww.google.com%2Fimages%2Fsrpr%2Flogo4w.png&size=thumbnail">'
converted = markdown_convert_wrapper(msg)
self.assertIn(thumbnail_img, converted)
msg = "www.google.com/images/srpr/logo4w.png"
thumbnail_img = '<img data-src-fullsize="/thumbnail?url=http%3A%2F%2Fwww.google.com%2Fimages%2Fsrpr%2Flogo4w.png&size=full" src="/thumbnail?url=http%3A%2F%2Fwww.google.com%2Fimages%2Fsrpr%2Flogo4w.png&size=thumbnail">'
converted = markdown_convert_wrapper(msg)
self.assertIn(thumbnail_img, converted)
msg = "https://www.google.com/images/srpr/logo4w.png"
thumbnail_img = f"""<div class="message_inline_image"><a href="https://www.google.com/images/srpr/logo4w.png"><img src="{get_camo_url("https://www.google.com/images/srpr/logo4w.png")}"></a></div>"""
with self.settings(THUMBNAIL_IMAGES=False):
converted = markdown_convert_wrapper(msg)
self.assertIn(thumbnail_img, converted)
# /user_uploads/ is not thumbnailed
msg = "[foobar](/static/images/cute/turtle.png)"
thumbnail_img = '<div class="message_inline_image"><a href="/static/images/cute/turtle.png" title="foobar"><img src="/static/images/cute/turtle.png"></a></div>'
converted = markdown_convert_wrapper(msg)
self.assertIn(thumbnail_img, converted)
msg = "[foobar](/user_avatars/{realm_id}/emoji/images/50.png)"
msg = msg.format(realm_id=realm.id)
thumbnail_img = '<div class="message_inline_image"><a href="/user_avatars/{realm_id}/emoji/images/50.png" title="foobar"><img src="/user_avatars/{realm_id}/emoji/images/50.png"></a></div>'
thumbnail_img = thumbnail_img.format(realm_id=realm.id)
converted = markdown_convert_wrapper(msg)
self.assertIn(thumbnail_img, converted)
@override_settings(INLINE_IMAGE_PREVIEW=True)
def test_inline_image_preview(self) -> None:
with_preview = '<div class="message_inline_image"><a href="http://cdn.wallpapersafari.com/13/6/16eVjx.jpeg"><img data-src-fullsize="/thumbnail?url=http%3A%2F%2Fcdn.wallpapersafari.com%2F13%2F6%2F16eVjx.jpeg&size=full" src="/thumbnail?url=http%3A%2F%2Fcdn.wallpapersafari.com%2F13%2F6%2F16eVjx.jpeg&size=thumbnail"></a></div>'
without_preview = '<p><a href="http://cdn.wallpapersafari.com/13/6/16eVjx.jpeg">http://cdn.wallpapersafari.com/13/6/16eVjx.jpeg</a></p>'
content = "http://cdn.wallpapersafari.com/13/6/16eVjx.jpeg"
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
converted = render_markdown(msg, content)
self.assertEqual(converted.rendered_content, with_preview)
realm = msg.get_realm()
setattr(realm, "inline_image_preview", False)
realm.save()
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
converted = render_markdown(msg, content)
self.assertEqual(converted.rendered_content, without_preview)
@override_settings(THUMBNAIL_IMAGES=False, EXTERNAL_URI_SCHEME="https://")
def test_external_image_preview_use_camo(self) -> None:
content = "https://example.com/thing.jpeg"
thumbnail_img = f"""<div class="message_inline_image"><a href="{content}"><img src="{get_camo_url(content)}"></a></div>"""
converted = markdown_convert_wrapper(content)
self.assertIn(converted, thumbnail_img)
@override_settings(THUMBNAIL_IMAGES=False, EXTERNAL_URI_SCHEME="https://")
def test_static_image_preview_skip_camo(self) -> None:
content = f"{ settings.STATIC_URL }/thing.jpeg"
thumbnail_img = f"""<div class="message_inline_image"><a href="{content}"><img src="{content}"></a></div>"""
converted = markdown_convert_wrapper(content)
self.assertIn(converted, thumbnail_img)
@override_settings(THUMBNAIL_IMAGES=False, EXTERNAL_URI_SCHEME="https://")
def test_realm_image_preview_skip_camo(self) -> None:
content = f"https://zulip.{ settings.EXTERNAL_HOST }/thing.jpeg"
converted = markdown_convert_wrapper(content)
self.assertNotIn(converted, get_camo_url(content))
@override_settings(THUMBNAIL_IMAGES=False, EXTERNAL_URI_SCHEME="https://")
def test_cross_realm_image_preview_use_camo(self) -> None:
content = f"https://otherrealm.{ settings.EXTERNAL_HOST }/thing.jpeg"
thumbnail_img = f"""<div class="message_inline_image"><a href="{ content }"><img src="{ get_camo_url(content) }"></a></div>"""
converted = markdown_convert_wrapper(content)
self.assertIn(converted, thumbnail_img)
@override_settings(INLINE_IMAGE_PREVIEW=True)
def test_inline_image_quoted_blocks(self) -> None:
content = "http://cdn.wallpapersafari.com/13/6/16eVjx.jpeg"
expected = '<div class="message_inline_image"><a href="http://cdn.wallpapersafari.com/13/6/16eVjx.jpeg"><img data-src-fullsize="/thumbnail?url=http%3A%2F%2Fcdn.wallpapersafari.com%2F13%2F6%2F16eVjx.jpeg&size=full" src="/thumbnail?url=http%3A%2F%2Fcdn.wallpapersafari.com%2F13%2F6%2F16eVjx.jpeg&size=thumbnail"></a></div>'
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
converted = render_markdown(msg, content)
self.assertEqual(converted.rendered_content, expected)
content = ">http://cdn.wallpapersafari.com/13/6/16eVjx.jpeg\n\nAwesome!"
expected = '<blockquote>\n<p><a href="http://cdn.wallpapersafari.com/13/6/16eVjx.jpeg">http://cdn.wallpapersafari.com/13/6/16eVjx.jpeg</a></p>\n</blockquote>\n<p>Awesome!</p>'
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
converted = render_markdown(msg, content)
self.assertEqual(converted.rendered_content, expected)
content = ">* http://cdn.wallpapersafari.com/13/6/16eVjx.jpeg\n\nAwesome!"
expected = '<blockquote>\n<ul>\n<li><a href="http://cdn.wallpapersafari.com/13/6/16eVjx.jpeg">http://cdn.wallpapersafari.com/13/6/16eVjx.jpeg</a></li>\n</ul>\n</blockquote>\n<p>Awesome!</p>'
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
converted = render_markdown(msg, content)
self.assertEqual(converted.rendered_content, expected)
@override_settings(INLINE_IMAGE_PREVIEW=True)
def test_inline_image_preview_order(self) -> None:
realm = get_realm("zulip")
content = "http://imaging.nikon.com/lineup/dslr/df/img/sample/img_01.jpg\nhttp://imaging.nikon.com/lineup/dslr/df/img/sample/img_02.jpg\nhttp://imaging.nikon.com/lineup/dslr/df/img/sample/img_03.jpg"
expected = '<p><a href="http://imaging.nikon.com/lineup/dslr/df/img/sample/img_01.jpg">http://imaging.nikon.com/lineup/dslr/df/img/sample/img_01.jpg</a><br>\n<a href="http://imaging.nikon.com/lineup/dslr/df/img/sample/img_02.jpg">http://imaging.nikon.com/lineup/dslr/df/img/sample/img_02.jpg</a><br>\n<a href="http://imaging.nikon.com/lineup/dslr/df/img/sample/img_03.jpg">http://imaging.nikon.com/lineup/dslr/df/img/sample/img_03.jpg</a></p>\n<div class="message_inline_image"><a href="http://imaging.nikon.com/lineup/dslr/df/img/sample/img_01.jpg"><img data-src-fullsize="/thumbnail?url=http%3A%2F%2Fimaging.nikon.com%2Flineup%2Fdslr%2Fdf%2Fimg%2Fsample%2Fimg_01.jpg&size=full" src="/thumbnail?url=http%3A%2F%2Fimaging.nikon.com%2Flineup%2Fdslr%2Fdf%2Fimg%2Fsample%2Fimg_01.jpg&size=thumbnail"></a></div><div class="message_inline_image"><a href="http://imaging.nikon.com/lineup/dslr/df/img/sample/img_02.jpg"><img data-src-fullsize="/thumbnail?url=http%3A%2F%2Fimaging.nikon.com%2Flineup%2Fdslr%2Fdf%2Fimg%2Fsample%2Fimg_02.jpg&size=full" src="/thumbnail?url=http%3A%2F%2Fimaging.nikon.com%2Flineup%2Fdslr%2Fdf%2Fimg%2Fsample%2Fimg_02.jpg&size=thumbnail"></a></div><div class="message_inline_image"><a href="http://imaging.nikon.com/lineup/dslr/df/img/sample/img_03.jpg"><img data-src-fullsize="/thumbnail?url=http%3A%2F%2Fimaging.nikon.com%2Flineup%2Fdslr%2Fdf%2Fimg%2Fsample%2Fimg_03.jpg&size=full" src="/thumbnail?url=http%3A%2F%2Fimaging.nikon.com%2Flineup%2Fdslr%2Fdf%2Fimg%2Fsample%2Fimg_03.jpg&size=thumbnail"></a></div>'
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
converted = render_markdown(msg, content)
self.assertEqual(converted.rendered_content, expected)
content = "http://imaging.nikon.com/lineup/dslr/df/img/sample/img_01.jpg\n\n>http://imaging.nikon.com/lineup/dslr/df/img/sample/img_02.jpg\n\n* http://imaging.nikon.com/lineup/dslr/df/img/sample/img_03.jpg\n* https://www.google.com/images/srpr/logo4w.png"
expected = '<div class="message_inline_image"><a href="http://imaging.nikon.com/lineup/dslr/df/img/sample/img_01.jpg"><img data-src-fullsize="/thumbnail?url=http%3A%2F%2Fimaging.nikon.com%2Flineup%2Fdslr%2Fdf%2Fimg%2Fsample%2Fimg_01.jpg&size=full" src="/thumbnail?url=http%3A%2F%2Fimaging.nikon.com%2Flineup%2Fdslr%2Fdf%2Fimg%2Fsample%2Fimg_01.jpg&size=thumbnail"></a></div><blockquote>\n<p><a href="http://imaging.nikon.com/lineup/dslr/df/img/sample/img_02.jpg">http://imaging.nikon.com/lineup/dslr/df/img/sample/img_02.jpg</a></p>\n</blockquote>\n<ul>\n<li><div class="message_inline_image"><a href="http://imaging.nikon.com/lineup/dslr/df/img/sample/img_03.jpg"><img data-src-fullsize="/thumbnail?url=http%3A%2F%2Fimaging.nikon.com%2Flineup%2Fdslr%2Fdf%2Fimg%2Fsample%2Fimg_03.jpg&size=full" src="/thumbnail?url=http%3A%2F%2Fimaging.nikon.com%2Flineup%2Fdslr%2Fdf%2Fimg%2Fsample%2Fimg_03.jpg&size=thumbnail"></a></div></li>\n<li><div class="message_inline_image"><a href="https://www.google.com/images/srpr/logo4w.png"><img data-src-fullsize="/thumbnail?url=https%3A%2F%2Fwww.google.com%2Fimages%2Fsrpr%2Flogo4w.png&size=full" src="/thumbnail?url=https%3A%2F%2Fwww.google.com%2Fimages%2Fsrpr%2Flogo4w.png&size=thumbnail"></a></div></li>\n</ul>'
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
converted = render_markdown(msg, content)
self.assertEqual(converted.rendered_content, expected)
content = "Test 1\n[21136101110_1dde1c1a7e_o.jpg](/user_uploads/{realm_id}/6d/F1PX6u16JA2P-nK45PyxHIYZ/21136101110_1dde1c1a7e_o.jpg) \n\nNext image\n[IMG_20161116_023910.jpg](/user_uploads/{realm_id}/69/sh7L06e7uH7NaX6d5WFfVYQp/IMG_20161116_023910.jpg) \n\nAnother screenshot\n[Screenshot-from-2016-06-01-16-22-42.png](/user_uploads/{realm_id}/70/_aZmIEWaN1iUaxwkDjkO7bpj/Screenshot-from-2016-06-01-16-22-42.png)"
content = content.format(realm_id=realm.id)
expected = '<p>Test 1<br>\n<a href="/user_uploads/{realm_id}/6d/F1PX6u16JA2P-nK45PyxHIYZ/21136101110_1dde1c1a7e_o.jpg">21136101110_1dde1c1a7e_o.jpg</a> </p>\n<div class="message_inline_image"><a href="/user_uploads/{realm_id}/6d/F1PX6u16JA2P-nK45PyxHIYZ/21136101110_1dde1c1a7e_o.jpg" title="21136101110_1dde1c1a7e_o.jpg"><img data-src-fullsize="/thumbnail?url=user_uploads%2F{realm_id}%2F6d%2FF1PX6u16JA2P-nK45PyxHIYZ%2F21136101110_1dde1c1a7e_o.jpg&size=full" src="/thumbnail?url=user_uploads%2F{realm_id}%2F6d%2FF1PX6u16JA2P-nK45PyxHIYZ%2F21136101110_1dde1c1a7e_o.jpg&size=thumbnail"></a></div><p>Next image<br>\n<a href="/user_uploads/{realm_id}/69/sh7L06e7uH7NaX6d5WFfVYQp/IMG_20161116_023910.jpg">IMG_20161116_023910.jpg</a> </p>\n<div class="message_inline_image"><a href="/user_uploads/{realm_id}/69/sh7L06e7uH7NaX6d5WFfVYQp/IMG_20161116_023910.jpg" title="IMG_20161116_023910.jpg"><img data-src-fullsize="/thumbnail?url=user_uploads%2F{realm_id}%2F69%2Fsh7L06e7uH7NaX6d5WFfVYQp%2FIMG_20161116_023910.jpg&size=full" src="/thumbnail?url=user_uploads%2F{realm_id}%2F69%2Fsh7L06e7uH7NaX6d5WFfVYQp%2FIMG_20161116_023910.jpg&size=thumbnail"></a></div><p>Another screenshot<br>\n<a href="/user_uploads/{realm_id}/70/_aZmIEWaN1iUaxwkDjkO7bpj/Screenshot-from-2016-06-01-16-22-42.png">Screenshot-from-2016-06-01-16-22-42.png</a></p>\n<div class="message_inline_image"><a href="/user_uploads/{realm_id}/70/_aZmIEWaN1iUaxwkDjkO7bpj/Screenshot-from-2016-06-01-16-22-42.png" title="Screenshot-from-2016-06-01-16-22-42.png"><img data-src-fullsize="/thumbnail?url=user_uploads%2F{realm_id}%2F70%2F_aZmIEWaN1iUaxwkDjkO7bpj%2FScreenshot-from-2016-06-01-16-22-42.png&size=full" src="/thumbnail?url=user_uploads%2F{realm_id}%2F70%2F_aZmIEWaN1iUaxwkDjkO7bpj%2FScreenshot-from-2016-06-01-16-22-42.png&size=thumbnail"></a></div>'
expected = expected.format(realm_id=realm.id)
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
converted = render_markdown(msg, content)
self.assertEqual(converted.rendered_content, expected)
@override_settings(INLINE_IMAGE_PREVIEW=True)
def test_corrected_image_source(self) -> None:
# testing only Wikipedia because linx.li URLs can be expected to expire
content = "https://en.wikipedia.org/wiki/File:Wright_of_Derby,_The_Orrery.jpg"
expected = '<div class="message_inline_image"><a href="https://en.wikipedia.org/wiki/Special:FilePath/File:Wright_of_Derby,_The_Orrery.jpg"><img data-src-fullsize="/thumbnail?url=https%3A%2F%2Fen.wikipedia.org%2Fwiki%2FSpecial%3AFilePath%2FFile%3AWright_of_Derby%2C_The_Orrery.jpg&size=full" src="/thumbnail?url=https%3A%2F%2Fen.wikipedia.org%2Fwiki%2FSpecial%3AFilePath%2FFile%3AWright_of_Derby%2C_The_Orrery.jpg&size=thumbnail"></a></div>'
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
converted = render_markdown(msg, content)
self.assertEqual(converted.rendered_content, expected)
@override_settings(INLINE_IMAGE_PREVIEW=False)
def test_image_preview_enabled(self) -> None:
ret = image_preview_enabled()
self.assertFalse(ret)
settings.INLINE_IMAGE_PREVIEW = True
sender_user_profile = self.example_user("othello")
message = Message(sender=sender_user_profile, sending_client=get_client("test"))
realm = message.get_realm()
ret = image_preview_enabled()
self.assertTrue(ret)
ret = image_preview_enabled(no_previews=True)
self.assertFalse(ret)
ret = image_preview_enabled(message, realm)
self.assertTrue(ret)
ret = image_preview_enabled(message)
self.assertTrue(ret)
ret = image_preview_enabled(message, realm, no_previews=True)
self.assertFalse(ret)
ret = image_preview_enabled(message, no_previews=True)
self.assertFalse(ret)
@override_settings(INLINE_URL_EMBED_PREVIEW=False)
def test_url_embed_preview_enabled(self) -> None:
sender_user_profile = self.example_user("othello")
message = copy.deepcopy(
Message(sender=sender_user_profile, sending_client=get_client("test"))
)
realm = message.get_realm()
realm.inline_url_embed_preview = True # off by default
realm.save(update_fields=["inline_url_embed_preview"])
ret = url_embed_preview_enabled()
self.assertFalse(ret)
settings.INLINE_URL_EMBED_PREVIEW = True
ret = url_embed_preview_enabled()
self.assertTrue(ret)
ret = image_preview_enabled(no_previews=True)
self.assertFalse(ret)
ret = url_embed_preview_enabled(message, realm)
self.assertTrue(ret)
ret = url_embed_preview_enabled(message)
self.assertTrue(ret)
ret = url_embed_preview_enabled(message, no_previews=True)
self.assertFalse(ret)
def test_inline_dropbox(self) -> None:
msg = "Look at how hilarious our old office was: https://www.dropbox.com/s/ymdijjcg67hv2ta/IMG_0923.JPG"
image_info = {
"image": "https://photos-4.dropbox.com/t/2/AABIre1oReJgPYuc_53iv0IHq1vUzRaDg2rrCfTpiWMccQ/12/129/jpeg/1024x1024/2/_/0/4/IMG_0923.JPG/CIEBIAEgAiAHKAIoBw/ymdijjcg67hv2ta/AABz2uuED1ox3vpWWvMpBxu6a/IMG_0923.JPG",
"desc": "Shared with Dropbox",
"title": "IMG_0923.JPG",
}
with mock.patch("zerver.lib.markdown.fetch_open_graph_image", return_value=image_info):
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
f"""<p>Look at how hilarious our old office was: <a href="https://www.dropbox.com/s/ymdijjcg67hv2ta/IMG_0923.JPG">https://www.dropbox.com/s/ymdijjcg67hv2ta/IMG_0923.JPG</a></p>\n<div class="message_inline_image"><a href="https://www.dropbox.com/s/ymdijjcg67hv2ta/IMG_0923.JPG" title="IMG_0923.JPG"><img src="{get_camo_url("https://www.dropbox.com/s/ymdijjcg67hv2ta/IMG_0923.JPG?raw=1")}"></a></div>""",
)
msg = "Look at my hilarious drawing folder: https://www.dropbox.com/sh/cm39k9e04z7fhim/AAAII5NK-9daee3FcF41anEua?dl="
image_info = {
"image": "https://cf.dropboxstatic.com/static/images/icons128/folder_dropbox.png",
"desc": "Shared with Dropbox",
"title": "Saves",
}
with mock.patch("zerver.lib.markdown.fetch_open_graph_image", return_value=image_info):
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
f"""<p>Look at my hilarious drawing folder: <a href="https://www.dropbox.com/sh/cm39k9e04z7fhim/AAAII5NK-9daee3FcF41anEua?dl=">https://www.dropbox.com/sh/cm39k9e04z7fhim/AAAII5NK-9daee3FcF41anEua?dl=</a></p>\n<div class="message_inline_ref"><a href="https://www.dropbox.com/sh/cm39k9e04z7fhim/AAAII5NK-9daee3FcF41anEua?dl=" title="Saves"><img src="{get_camo_url("https://cf.dropboxstatic.com/static/images/icons128/folder_dropbox.png")}"></a><div><div class="message_inline_image_title">Saves</div><desc class="message_inline_image_desc"></desc></div></div>""",
)
def test_inline_dropbox_preview(self) -> None:
# Test photo album previews
msg = "https://www.dropbox.com/sc/tditp9nitko60n5/03rEiZldy5"
image_info = {
"image": "https://photos-6.dropbox.com/t/2/AAAlawaeD61TyNewO5vVi-DGf2ZeuayfyHFdNTNzpGq-QA/12/271544745/jpeg/1024x1024/2/_/0/5/baby-piglet.jpg/CKnjvYEBIAIgBygCKAc/tditp9nitko60n5/AADX03VAIrQlTl28CtujDcMla/0",
"desc": "Shared with Dropbox",
"title": "1 photo",
}
with mock.patch("zerver.lib.markdown.fetch_open_graph_image", return_value=image_info):
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
f"""<p><a href="https://www.dropbox.com/sc/tditp9nitko60n5/03rEiZldy5">https://www.dropbox.com/sc/tditp9nitko60n5/03rEiZldy5</a></p>\n<div class="message_inline_image"><a href="https://www.dropbox.com/sc/tditp9nitko60n5/03rEiZldy5" title="1 photo"><img src="{get_camo_url("https://photos-6.dropbox.com/t/2/AAAlawaeD61TyNewO5vVi-DGf2ZeuayfyHFdNTNzpGq-QA/12/271544745/jpeg/1024x1024/2/_/0/5/baby-piglet.jpg/CKnjvYEBIAIgBygCKAc/tditp9nitko60n5/AADX03VAIrQlTl28CtujDcMla/0")}"></a></div>""",
)
def test_inline_dropbox_negative(self) -> None:
# Make sure we're not overzealous in our conversion:
msg = "Look at the new dropbox logo: https://www.dropbox.com/static/images/home_logo.png"
with mock.patch("zerver.lib.markdown.fetch_open_graph_image", return_value=None):
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
'<p>Look at the new dropbox logo: <a href="https://www.dropbox.com/static/images/home_logo.png">https://www.dropbox.com/static/images/home_logo.png</a></p>\n<div class="message_inline_image"><a href="https://www.dropbox.com/static/images/home_logo.png"><img data-src-fullsize="/thumbnail?url=https%3A%2F%2Fwww.dropbox.com%2Fstatic%2Fimages%2Fhome_logo.png&size=full" src="/thumbnail?url=https%3A%2F%2Fwww.dropbox.com%2Fstatic%2Fimages%2Fhome_logo.png&size=thumbnail"></a></div>',
)
def test_inline_dropbox_bad(self) -> None:
msg = "https://zulip-test.dropbox.com/photos/cl/ROmr9K1XYtmpneM"
with mock.patch("zerver.lib.markdown.fetch_open_graph_image", return_value=None):
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
'<p><a href="https://zulip-test.dropbox.com/photos/cl/ROmr9K1XYtmpneM">https://zulip-test.dropbox.com/photos/cl/ROmr9K1XYtmpneM</a></p>',
)
def test_inline_github_preview(self) -> None:
# Test photo album previews
msg = "Test: https://github.com/zulip/zulip/blob/main/static/images/logo/zulip-icon-128x128.png"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
'<p>Test: <a href="https://github.com/zulip/zulip/blob/main/static/images/logo/zulip-icon-128x128.png">https://github.com/zulip/zulip/blob/main/static/images/logo/zulip-icon-128x128.png</a></p>\n<div class="message_inline_image"><a href="https://github.com/zulip/zulip/blob/main/static/images/logo/zulip-icon-128x128.png"><img data-src-fullsize="/thumbnail?url=https%3A%2F%2Fraw.githubusercontent.com%2Fzulip%2Fzulip%2Fmain%2Fstatic%2Fimages%2Flogo%2Fzulip-icon-128x128.png&size=full" src="/thumbnail?url=https%3A%2F%2Fraw.githubusercontent.com%2Fzulip%2Fzulip%2Fmain%2Fstatic%2Fimages%2Flogo%2Fzulip-icon-128x128.png&size=thumbnail"></a></div>',
)
msg = "Test: https://developer.github.com/assets/images/hero-circuit-bg.png"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
'<p>Test: <a href="https://developer.github.com/assets/images/hero-circuit-bg.png">https://developer.github.com/assets/images/hero-circuit-bg.png</a></p>\n<div class="message_inline_image"><a href="https://developer.github.com/assets/images/hero-circuit-bg.png"><img data-src-fullsize="/thumbnail?url=https%3A%2F%2Fdeveloper.github.com%2Fassets%2Fimages%2Fhero-circuit-bg.png&size=full" src="/thumbnail?url=https%3A%2F%2Fdeveloper.github.com%2Fassets%2Fimages%2Fhero-circuit-bg.png&size=thumbnail"></a></div>',
)
def test_inline_youtube_preview(self) -> None:
# Test YouTube URLs in spoilers
msg = """\n```spoiler Check out this PyCon video\nhttps://www.youtube.com/watch?v=0c46YHS3RY8\n```"""
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
f"""<div class="spoiler-block"><div class="spoiler-header">\n<p>Check out this PyCon video</p>\n</div><div class="spoiler-content" aria-hidden="true">\n<p><a href="https://www.youtube.com/watch?v=0c46YHS3RY8">https://www.youtube.com/watch?v=0c46YHS3RY8</a></p>\n<div class="youtube-video message_inline_image"><a data-id="0c46YHS3RY8" href="https://www.youtube.com/watch?v=0c46YHS3RY8"><img src="{get_camo_url("https://i.ytimg.com/vi/0c46YHS3RY8/default.jpg")}"></a></div></div></div>""",
)
# Test YouTube URLs in normal messages.
msg = "[YouTube link](https://www.youtube.com/watch?v=0c46YHS3RY8)"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
f"""<p><a href="https://www.youtube.com/watch?v=0c46YHS3RY8">YouTube link</a></p>\n<div class="youtube-video message_inline_image"><a data-id="0c46YHS3RY8" href="https://www.youtube.com/watch?v=0c46YHS3RY8"><img src="{get_camo_url("https://i.ytimg.com/vi/0c46YHS3RY8/default.jpg")}"></a></div>""",
)
msg = "https://www.youtube.com/watch?v=0c46YHS3RY8\n\nSample text\n\nhttps://www.youtube.com/watch?v=lXFO2ULktEI"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
f"""<p><a href="https://www.youtube.com/watch?v=0c46YHS3RY8">https://www.youtube.com/watch?v=0c46YHS3RY8</a></p>\n<div class="youtube-video message_inline_image"><a data-id="0c46YHS3RY8" href="https://www.youtube.com/watch?v=0c46YHS3RY8"><img src="{get_camo_url("https://i.ytimg.com/vi/0c46YHS3RY8/default.jpg")}"></a></div><p>Sample text</p>\n<p><a href="https://www.youtube.com/watch?v=lXFO2ULktEI">https://www.youtube.com/watch?v=lXFO2ULktEI</a></p>\n<div class="youtube-video message_inline_image"><a data-id="lXFO2ULktEI" href="https://www.youtube.com/watch?v=lXFO2ULktEI"><img src="{get_camo_url("https://i.ytimg.com/vi/lXFO2ULktEI/default.jpg")}"></a></div>""",
)
def test_twitter_id_extraction(self) -> None:
self.assertEqual(
get_tweet_id("http://twitter.com/#!/VizzQuotes/status/409030735191097344"),
"409030735191097344",
)
self.assertEqual(
get_tweet_id("http://twitter.com/VizzQuotes/status/409030735191097344"),
"409030735191097344",
)
self.assertEqual(
get_tweet_id("http://twitter.com/VizzQuotes/statuses/409030735191097344"),
"409030735191097344",
)
self.assertEqual(get_tweet_id("https://twitter.com/wdaher/status/1017581858"), "1017581858")
self.assertEqual(
get_tweet_id("https://twitter.com/wdaher/status/1017581858/"), "1017581858"
)
self.assertEqual(
get_tweet_id("https://twitter.com/windyoona/status/410766290349879296/photo/1"),
"410766290349879296",
)
self.assertEqual(
get_tweet_id("https://twitter.com/windyoona/status/410766290349879296/"),
"410766290349879296",
)
def test_inline_interesting_links(self) -> None:
def make_link(url: str) -> str:
return f'<a href="{url}">{url}</a>'
normal_tweet_html = (
'<a href="https://twitter.com/Twitter"'
">@Twitter</a> "
"meets @seepicturely at #tcdisrupt cc."
'<a href="https://twitter.com/boscomonkey"'
">@boscomonkey</a> "
'<a href="https://twitter.com/episod"'
">@episod</a> "
'<a href="http://t.co/6J2EgYM"'
">http://instagr.am/p/MuW67/</a>"
)
mention_in_link_tweet_html = """<a href="http://t.co/@foo">http://foo.com</a>"""
media_tweet_html = (
'<a href="http://t.co/xo7pAhK6n3">'
"http://twitter.com/NEVNBoston/status/421654515616849920/photo/1</a>"
)
emoji_in_tweet_html = """Zulip is <span aria-label=\"100\" class="emoji emoji-1f4af" role=\"img\" title="100">:100:</span>% open-source!"""
def make_inline_twitter_preview(url: str, tweet_html: str, image_html: str = "") -> str:
## As of right now, all previews are mocked to be the exact same tweet
return (
'<div class="inline-preview-twitter">'
'<div class="twitter-tweet">'
f'<a href="{url}">'
'<img class="twitter-avatar"'
' src="https://external-content.zulipcdn.net/external_content/1f7cd2436976d410eab8189ebceda87ae0b34ead/687474703a2f2f7062732e7477696d672e63'
"6f6d2f70726f66696c655f696d616765732f313338303931323137332f53637265656e5f73686f745f323031312d30362d30335f61745f372e33352e33"
'365f504d5f6e6f726d616c2e706e67">'
"</a>"
f"<p>{tweet_html}</p>"
"<span>- Eoin McMillan (@imeoin)</span>"
f"{image_html}"
"</div>"
"</div>"
)
msg = "http://www.twitter.com"
converted = markdown_convert_wrapper(msg)
self.assertEqual(converted, "<p>{}</p>".format(make_link("http://www.twitter.com")))
msg = "http://www.twitter.com/wdaher/"
converted = markdown_convert_wrapper(msg)
self.assertEqual(converted, "<p>{}</p>".format(make_link("http://www.twitter.com/wdaher/")))
msg = "http://www.twitter.com/wdaher/status/3"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted, "<p>{}</p>".format(make_link("http://www.twitter.com/wdaher/status/3"))
)
# id too long
msg = "http://www.twitter.com/wdaher/status/2879779692873154569"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
"<p>{}</p>".format(
make_link("http://www.twitter.com/wdaher/status/2879779692873154569")
),
)
# id too large (i.e. tweet doesn't exist)
msg = "http://www.twitter.com/wdaher/status/999999999999999999"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
"<p>{}</p>".format(
make_link("http://www.twitter.com/wdaher/status/999999999999999999")
),
)
msg = "http://www.twitter.com/wdaher/status/287977969287315456"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
"<p>{}</p>\n{}".format(
make_link("http://www.twitter.com/wdaher/status/287977969287315456"),
make_inline_twitter_preview(
"http://www.twitter.com/wdaher/status/287977969287315456", normal_tweet_html
),
),
)
msg = "https://www.twitter.com/wdaher/status/287977969287315456"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
"<p>{}</p>\n{}".format(
make_link("https://www.twitter.com/wdaher/status/287977969287315456"),
make_inline_twitter_preview(
"https://www.twitter.com/wdaher/status/287977969287315456", normal_tweet_html
),
),
)
msg = "http://twitter.com/wdaher/status/287977969287315456"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
"<p>{}</p>\n{}".format(
make_link("http://twitter.com/wdaher/status/287977969287315456"),
make_inline_twitter_preview(
"http://twitter.com/wdaher/status/287977969287315456", normal_tweet_html
),
),
)
msg = (
"http://twitter.com/wdaher/status/287977969287315456 "
"http://twitter.com/wdaher/status/287977969287315457 "
"http://twitter.com/wdaher/status/287977969287315457 "
"http://twitter.com/wdaher/status/287977969287315457"
)
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
"<p>{} {} {} {}</p>\n{}{}".format(
make_link("http://twitter.com/wdaher/status/287977969287315456"),
make_link("http://twitter.com/wdaher/status/287977969287315457"),
make_link("http://twitter.com/wdaher/status/287977969287315457"),
make_link("http://twitter.com/wdaher/status/287977969287315457"),
make_inline_twitter_preview(
"http://twitter.com/wdaher/status/287977969287315456", normal_tweet_html
),
make_inline_twitter_preview(
"http://twitter.com/wdaher/status/287977969287315457", normal_tweet_html
),
),
)
msg = (
"http://twitter.com/wdaher/status/287977969287315456 "
"http://twitter.com/wdaher/status/287977969287315457 "
"https://twitter.com/wdaher/status/287977969287315456 "
"http://twitter.com/wdaher/status/287977969287315460"
)
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
"<p>{} {} {} {}</p>\n{}{}{}".format(
make_link("http://twitter.com/wdaher/status/287977969287315456"),
make_link("http://twitter.com/wdaher/status/287977969287315457"),
make_link("https://twitter.com/wdaher/status/287977969287315456"),
make_link("http://twitter.com/wdaher/status/287977969287315460"),
make_inline_twitter_preview(
"http://twitter.com/wdaher/status/287977969287315456", normal_tweet_html
),
make_inline_twitter_preview(
"http://twitter.com/wdaher/status/287977969287315457", normal_tweet_html
),
make_inline_twitter_preview(
"https://twitter.com/wdaher/status/287977969287315456", normal_tweet_html
),
),
)
msg = (
"Paragraph 1: http://twitter.com/wdaher/status/287977969287315456\n\n"
"Paragraph 2\n\n"
"Paragraph 3: http://twitter.com/wdaher/status/287977969287315457"
)
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
"<p>Paragraph 1: {}</p>\n{}<p>Paragraph 2</p>\n<p>Paragraph 3: {}</p>\n{}".format(
make_link("http://twitter.com/wdaher/status/287977969287315456"),
make_inline_twitter_preview(
"http://twitter.com/wdaher/status/287977969287315456", normal_tweet_html
),
make_link("http://twitter.com/wdaher/status/287977969287315457"),
make_inline_twitter_preview(
"http://twitter.com/wdaher/status/287977969287315457", normal_tweet_html
),
),
)
msg = "http://twitter.com/wdaher/status/287977969287315458"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
"<p>{}</p>\n{}".format(
make_link("http://twitter.com/wdaher/status/287977969287315458"),
make_inline_twitter_preview(
"http://twitter.com/wdaher/status/287977969287315458",
mention_in_link_tweet_html,
),
),
)
msg = "http://twitter.com/wdaher/status/287977969287315459"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
"<p>{}</p>\n{}".format(
make_link("http://twitter.com/wdaher/status/287977969287315459"),
make_inline_twitter_preview(
"http://twitter.com/wdaher/status/287977969287315459",
media_tweet_html,
(
'<div class="twitter-image">'
'<a href="http://t.co/xo7pAhK6n3">'
f"""<img src="{get_camo_url("https://pbs.twimg.com/media/BdoEjD4IEAIq86Z.jpg:small")}">"""
"</a>"
"</div>"
),
),
),
)
msg = "http://twitter.com/wdaher/status/287977969287315460"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
"<p>{}</p>\n{}".format(
make_link("http://twitter.com/wdaher/status/287977969287315460"),
make_inline_twitter_preview(
"http://twitter.com/wdaher/status/287977969287315460", emoji_in_tweet_html
),
),
)
msg = "```spoiler secret tweet\nTweet: http://twitter.com/wdaher/status/287977969287315456\n```"
converted = markdown_convert_wrapper(msg)
rendered_spoiler = '<div class="spoiler-block"><div class="spoiler-header">\n<p>secret tweet</p>\n</div><div class="spoiler-content" aria-hidden="true">\n<p>Tweet: {}</p>\n{}</div></div>'
self.assertEqual(
converted,
rendered_spoiler.format(
make_link("http://twitter.com/wdaher/status/287977969287315456"),
make_inline_twitter_preview(
"http://twitter.com/wdaher/status/287977969287315456", normal_tweet_html
),
),
)
def test_fetch_tweet_data_settings_validation(self) -> None:
with self.settings(TEST_SUITE=False, TWITTER_CONSUMER_KEY=None):
self.assertIs(None, fetch_tweet_data("287977969287315459"))
def test_content_has_emoji(self) -> None:
self.assertFalse(content_has_emoji_syntax("boring"))
self.assertFalse(content_has_emoji_syntax("hello: world"))
self.assertFalse(content_has_emoji_syntax(":foobar"))
self.assertFalse(content_has_emoji_syntax("::: hello :::"))
self.assertTrue(content_has_emoji_syntax("foo :whatever:"))
self.assertTrue(content_has_emoji_syntax("\n:whatever:"))
self.assertTrue(content_has_emoji_syntax(":smile: ::::::"))
def test_realm_emoji(self) -> None:
def emoji_img(name: str, file_name: str, realm_id: int) -> str:
return '<img alt="{}" class="emoji" src="{}" title="{}">'.format(
name, get_emoji_url(file_name, realm_id), name[1:-1].replace("_", " ")
)
realm = get_realm("zulip")
msg = Message(sender=self.example_user("hamlet"))
converted = markdown_convert(":green_tick:", message_realm=realm, message=msg)
realm_emoji = RealmEmoji.objects.filter(
realm=realm, name="green_tick", deactivated=False
).get()
self.assertEqual(
converted.rendered_content,
"<p>{}</p>".format(emoji_img(":green_tick:", realm_emoji.file_name, realm.id)),
)
# Deactivate realm emoji.
do_remove_realm_emoji(realm, "green_tick")
converted = markdown_convert(":green_tick:", message_realm=realm, message=msg)
self.assertEqual(converted.rendered_content, "<p>:green_tick:</p>")
def test_deactivated_realm_emoji(self) -> None:
# Deactivate realm emoji.
realm = get_realm("zulip")
do_remove_realm_emoji(realm, "green_tick")
msg = Message(sender=self.example_user("hamlet"))
converted = markdown_convert(":green_tick:", message_realm=realm, message=msg)
self.assertEqual(converted.rendered_content, "<p>:green_tick:</p>")
def test_unicode_emoji(self) -> None:
msg = "\u2615" # ☕
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
'<p><span aria-label="coffee" class="emoji emoji-2615" role="img" title="coffee">:coffee:</span></p>',
)
msg = "\u2615\u2615" # ☕☕
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
'<p><span aria-label="coffee" class="emoji emoji-2615" role="img" title="coffee">:coffee:</span><span aria-label="coffee" class="emoji emoji-2615" role="img" title="coffee">:coffee:</span></p>',
)
def test_no_translate_emoticons_if_off(self) -> None:
user_profile = self.example_user("othello")
do_change_user_setting(user_profile, "translate_emoticons", False)
msg = Message(sender=user_profile, sending_client=get_client("test"))
content = ":)"
expected = "<p>:)</p>"
converted = render_markdown(msg, content)
self.assertEqual(converted.rendered_content, expected)
def test_same_markup(self) -> None:
msg = "\u2615" # ☕
unicode_converted = markdown_convert_wrapper(msg)
msg = ":coffee:" # ☕☕
converted = markdown_convert_wrapper(msg)
self.assertEqual(converted, unicode_converted)
def test_links_in_topic_name(self) -> None:
realm = get_realm("zulip")
msg = Message(sender=self.example_user("othello"))
msg.set_topic_name("https://google.com/hello-world")
converted_topic = topic_links(realm.id, msg.topic_name())
self.assertEqual(
converted_topic,
[{"url": "https://google.com/hello-world", "text": "https://google.com/hello-world"}],
)
msg.set_topic_name("http://google.com/hello-world")
converted_topic = topic_links(realm.id, msg.topic_name())
self.assertEqual(
converted_topic,
[{"url": "http://google.com/hello-world", "text": "http://google.com/hello-world"}],
)
msg.set_topic_name("Without scheme google.com/hello-world")
converted_topic = topic_links(realm.id, msg.topic_name())
self.assertEqual(
converted_topic,
[{"url": "https://google.com/hello-world", "text": "google.com/hello-world"}],
)
msg.set_topic_name("Without scheme random.words/hello-world")
converted_topic = topic_links(realm.id, msg.topic_name())
self.assertEqual(converted_topic, [])
msg.set_topic_name(
"Try out http://ftp.debian.org, https://google.com/ and https://google.in/."
)
converted_topic = topic_links(realm.id, msg.topic_name())
self.assertEqual(
converted_topic,
[
{"url": "http://ftp.debian.org", "text": "http://ftp.debian.org"},
{"url": "https://google.com/", "text": "https://google.com/"},
{"url": "https://google.in/", "text": "https://google.in/"},
],
)
# test order for links without scheme
msg.set_topic_name("google.in google.com")
converted_topic = topic_links(realm.id, msg.topic_name())
self.assertEqual(
converted_topic,
[
{"url": "https://google.in", "text": "google.in"},
{"url": "https://google.com", "text": "google.com"},
],
)
def test_realm_patterns(self) -> None:
realm = get_realm("zulip")
url_format_string = r"https://trac.example.com/ticket/%(id)s"
linkifier = RealmFilter(
realm=realm, pattern=r"#(?P<id>[0-9]{2,8})", url_format_string=url_format_string
)
linkifier.save()
self.assertEqual(
linkifier.__str__(),
"<RealmFilter(zulip): #(?P<id>[0-9]{2,8}) https://trac.example.com/ticket/%(id)s>",
)
msg = Message(sender=self.example_user("othello"))
msg.set_topic_name("#444")
flush_per_request_caches()
content = "We should fix #224 and #115, but not issue#124 or #1124z or [trac #15](https://trac.example.com/ticket/16) today."
converted = markdown_convert(content, message_realm=realm, message=msg)
converted_topic = topic_links(realm.id, msg.topic_name())
self.assertEqual(
converted.rendered_content,
'<p>We should fix <a href="https://trac.example.com/ticket/224"> converted_topic,
[
{"url": "https://trac.example.com/ticket/444", "text": "#444"},
{"url": "https://google.com", "text": "https://google.com"},
],
)
RealmFilter(
realm=realm,
pattern=r"#(?P<id>[a-zA-Z]+-[0-9]+)",
url_format_string=r"https://trac.example.com/ticket/%(id)s",
).save()
msg = Message(sender=self.example_user("hamlet"))
content = "#ZUL-123 was fixed and code was deployed to production, also #zul-321 was deployed to staging"
converted = markdown_convert(content, message_realm=realm, message=msg)
self.assertEqual(
converted.rendered_content,
'<p><a href="https://trac.example.com/ticket/ZUL-123">content: str, should_have_converted: bool = True) -> None:
converted = markdown_convert(content, message_realm=realm, message=msg).rendered_content
converted_topic = topic_links(realm.id, content)
if should_have_converted:
self.assertTrue("https://trac.example.com" in converted)
self.assert_length(converted_topic, 1)
self.assertEqual(
converted_topic[0],
{"url": "https://trac.example.com/ticket/123", "text": "#123"},
)
else:
self.assertTrue("https://trac.example.com" not in converted)
self.assert_length(converted_topic, 0)
assert_conversion("Hello #123 World")
assert_conversion("Hello #123World", False)
assert_conversion("Hello#123 World", False)
assert_conversion("Hello#123World", False)
# Ideally, these should be converted, but Markdown doesn't
# whitespace for that correctly yet.
assert_conversion("チケットは#123です", False)
assert_conversion("チケットは #123です", False)
assert_conversion("チケットは#123 です", False)
assert_conversion("チケットは #123 です")
assert_conversion("(#123)")
assert_conversion("#123>")
assert_conversion('"#123"')
assert_conversion("#123@")
assert_conversion(")#123(", False)
assert_conversion("##123", False)
# test nested realm patterns should avoid double matching
RealmFilter(
realm=realm,
pattern=r"hello#(?P<id>[0-9]+)",
url_format_string=r"https://trac.example.com/hello/%(id)s",
).save()
converted_topic = topic_links(realm.id, "hello#123 #234")
self.assertEqual(
converted_topic,
[
{"url": "https://trac.example.com/hello/123", "text": "hello#123"},
{"url": "https://trac.example.com/ticket/234", "text": "#234"},
],
)
# test correct order when realm pattern and normal links are both present.
converted_topic = topic_links(realm.id, "#234 https://google.com")
self.assertEqual(
converted_topic,
[
{"url": "https://trac.example.com/ticket/234", "text": "#234"},
{"url": "https://google.com", "text": "https://google.com"},
],
)
def test_multiple_matching_realm_patterns(self) -> None:
realm = get_realm("zulip")
url_format_string = r"https://trac.example.com/ticket/%(id)s"
linkifier_1 = RealmFilter(
realm=realm,
pattern=r"(?P<id>ABC\-[0-9]+)(?![A-Z0-9-])",
url_format_string=url_format_string,
)
linkifier_1.save()
self.assertEqual(
linkifier_1.__str__(),
r"<RealmFilter(zulip): (?P<id>ABC\-[0-9]+)(?![A-Z0-9-])"
" https://trac.example.com/ticket/%(id)s>",
)
url_format_string = r"https://other-trac.example.com/ticket/%(id)s"
linkifier_2 = RealmFilter(
realm=realm,
pattern=r"(?P<id>[A-Z][A-Z0-9]*\-[0-9]+)(?![A-Z0-9-])",
url_format_string=url_format_string,
)
linkifier_2.save()
self.assertEqual(
linkifier_2.__str__(),
r"<RealmFilter(zulip): (?P<id>[A-Z][A-Z0-9]*\-[0-9]+)(?![A-Z0-9-])"
" https://other-trac.example.com/ticket/%(id)s>",
)
msg = Message(sender=self.example_user("othello"))
msg.set_topic_name("ABC-123")
flush_per_request_caches()
content = (
"We should fix ABC-123 or [trac ABC-123](https://trac.example.com/ticket/16) today."
)
converted = markdown_convert(content, message_realm=realm, message=msg)
converted_topic = topic_links(realm.id, msg.topic_name())
# The second linkifier (which was saved later) was ignored as the content was marked AtomicString after first conversion.
# There was no easy way to support parsing both linkifiers and not run into an infinite loop, hence the second linkifier is ignored.
self.assertEqual(
converted.rendered_content,
'<p>We should fix <a href="https://trac.example.com/ticket/ABC-123">ABC-123</a> or <a href="https://trac.example.com/ticket/16">trac ABC-123</a> today.</p>',
)
# Both the links should be generated in topics.
self.assertEqual(
converted_topic,
[
{"url": "https://trac.example.com/ticket/ABC-123", "text": "ABC-123"},
{"url": "https://other-trac.example.com/ticket/ABC-123", "text": "ABC-123"},
],
)
def test_flush_linkifier(self) -> None:
realm = get_realm("zulip")
def flush() -> None:
class Instance:
realm_id: Optional[int] = None
instance = Instance()
instance.realm_id = realm.id
flush_linkifiers(sender=RealmFilter, instance=cast(RealmFilter, instance))
def save_new_linkifier() -> None:
linkifier = RealmFilter(realm=realm, pattern=r"whatever", url_format_string="whatever")
linkifier.save()
# start fresh for our realm
flush()
self.assertFalse(realm_in_local_linkifiers_cache(realm.id))
# call this just for side effects of populating the cache
linkifiers_for_realm(realm.id)
self.assertTrue(realm_in_local_linkifiers_cache(realm.id))
# Saving a new RealmFilter should have the side effect of
# flushing the cache.
save_new_linkifier()
self.assertFalse(realm_in_local_linkifiers_cache(realm.id))
# and flush it one more time, to make sure we don't get a KeyError
flush()
self.assertFalse(realm_in_local_linkifiers_cache(realm.id))
def test_realm_patterns_negative(self) -> None:
realm = get_realm("zulip")
RealmFilter(
realm=realm,
pattern=r"#(?P<id>[0-9]{2,8})",
url_format_string=r"https://trac.example.com/ticket/%(id)s",
).save()
boring_msg = Message(sender=self.example_user("othello"))
boring_msg.set_topic_name("no match here")
converted_boring_topic = topic_links(realm.id, boring_msg.topic_name())
self.assertEqual(converted_boring_topic, [])
def test_is_status_message(self) -> None:
user_profile = self.example_user("othello")
msg = Message(sender=user_profile, sending_client=get_client("test"))
content = "/me makes a list\n* one\n* two"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
"<p>/me makes a list</p>\n<ul>\n<li>one</li>\n<li>two</li>\n</ul>",
)
self.assertTrue(Message.is_status_message(content, rendering_result.rendered_content))
content = "/me takes a walk"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
"<p>/me takes a walk</p>",
)
self.assertTrue(Message.is_status_message(content, rendering_result.rendered_content))
content = "/me writes a second line\nline"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
"<p>/me writes a second line<br>\nline</p>",
)
self.assertTrue(Message.is_status_message(content, rendering_result.rendered_content))
def test_alert_words(self) -> None:
user_profile = self.example_user("othello")
do_add_alert_words(user_profile, ["ALERTWORD", "scaryword"])
msg = Message(sender=user_profile, sending_client=get_client("test"))
realm_alert_words_automaton = get_alert_word_automaton(user_profile.realm)
def render(msg: Message, content: str) -> MessageRenderingResult:
return render_markdown(
msg, content, realm_alert_words_automaton=realm_alert_words_automaton
)
content = "We have an ALERTWORD day today!"
rendering_result = render(msg, content)
self.assertEqual(
rendering_result.rendered_content, "<p>We have an ALERTWORD day today!</p>"
)
self.assertEqual(rendering_result.user_ids_with_alert_words, {user_profile.id})
msg = Message(sender=user_profile, sending_client=get_client("test"))
content = "We have a NOTHINGWORD day today!"
rendering_result = render(msg, content)
self.assertEqual(
rendering_result.rendered_content, "<p>We have a NOTHINGWORD day today!</p>"
)
self.assertEqual(rendering_result.user_ids_with_alert_words, set())
def test_alert_words_returns_user_ids_with_alert_words(self) -> None:
alert_words_for_users: Dict[str, List[str]] = {
"hamlet": ["how"],
"cordelia": ["this possible"],
"iago": ["hello"],
"prospero": ["hello"],
"othello": ["how are you"],
"aaron": ["hey"],
}
user_profiles: Dict[str, UserProfile] = {}
for (username, alert_words) in alert_words_for_users.items():
user_profile = self.example_user(username)
user_profiles.update({username: user_profile})
do_add_alert_words(user_profile, alert_words)
sender_user_profile = self.example_user("polonius")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
realm_alert_words_automaton = get_alert_word_automaton(sender_user_profile.realm)
def render(msg: Message, content: str) -> MessageRenderingResult:
return render_markdown(
msg, content, realm_alert_words_automaton=realm_alert_words_automaton
)
content = "hello how is this possible how are you doing today"
rendering_result = render(msg, content)
expected_user_ids: Set[int] = {
user_profiles["hamlet"].id,
user_profiles["cordelia"].id,
user_profiles["iago"].id,
user_profiles["prospero"].id,
user_profiles["othello"].id,
}
self.assertEqual(rendering_result.user_ids_with_alert_words, expected_user_ids)
def test_alert_words_returns_user_ids_with_alert_words_1(self) -> None:
alert_words_for_users: Dict[str, List[str]] = {
"hamlet": ["provisioning", "Prod deployment"],
"cordelia": ["test", "Prod"],
"iago": ["prod"],
"prospero": ["deployment"],
"othello": ["last"],
}
user_profiles: Dict[str, UserProfile] = {}
for (username, alert_words) in alert_words_for_users.items():
user_profile = self.example_user(username)
user_profiles.update({username: user_profile})
do_add_alert_words(user_profile, alert_words)
sender_user_profile = self.example_user("polonius")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
realm_alert_words_automaton = get_alert_word_automaton(sender_user_profile.realm)
def render(msg: Message, content: str) -> MessageRenderingResult:
return render_markdown(
msg, content, realm_alert_words_automaton=realm_alert_words_automaton
)
content = """Hello, everyone. Prod deployment has been completed
And this is a new line
to test out how Markdown convert this into something line ending split array
and this is a new line
last"""
rendering_result = render(msg, content)
expected_user_ids: Set[int] = {
user_profiles["hamlet"].id,
user_profiles["cordelia"].id,
user_profiles["iago"].id,
user_profiles["prospero"].id,
user_profiles["othello"].id,
}
self.assertEqual(rendering_result.user_ids_with_alert_words, expected_user_ids)
def test_alert_words_returns_user_ids_with_alert_words_in_french(self) -> None:
alert_words_for_users: Dict[str, List[str]] = {
"hamlet": ["réglementaire", "une politique", "une merveille"],
"cordelia": ["énormément", "Prod"],
"iago": ["prod"],
"prospero": ["deployment"],
"othello": ["last"],
}
user_profiles: Dict[str, UserProfile] = {}
for (username, alert_words) in alert_words_for_users.items():
user_profile = self.example_user(username)
user_profiles.update({username: user_profile})
do_add_alert_words(user_profile, alert_words)
sender_user_profile = self.example_user("polonius")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
realm_alert_words_automaton = get_alert_word_automaton(sender_user_profile.realm)
def render(msg: Message, content: str) -> MessageRenderingResult:
return render_markdown(
msg, content, realm_alert_words_automaton=realm_alert_words_automaton
)
content = """This is to test out alert words work in languages with accented characters too
bonjour est (énormément) ce a quoi ressemble le français
et j'espère qu'il n'y n' réglementaire a pas de mots d'alerte dans ce texte français
"""
rendering_result = render(msg, content)
expected_user_ids: Set[int] = {user_profiles["hamlet"].id, user_profiles["cordelia"].id}
# Only hamlet and cordelia have their alert-words appear in the message content
self.assertEqual(rendering_result.user_ids_with_alert_words, expected_user_ids)
def test_alert_words_returns_empty_user_ids_with_alert_words(self) -> None:
alert_words_for_users: Dict[str, List[str]] = {
"hamlet": [],
"cordelia": [],
"iago": [],
"prospero": [],
"othello": [],
"aaron": [],
}
user_profiles: Dict[str, UserProfile] = {}
for (username, alert_words) in alert_words_for_users.items():
user_profile = self.example_user(username)
user_profiles.update({username: user_profile})
do_add_alert_words(user_profile, alert_words)
sender_user_profile = self.example_user("polonius")
msg = Message(sender=user_profile, sending_client=get_client("test"))
realm_alert_words_automaton = get_alert_word_automaton(sender_user_profile.realm)
def render(msg: Message, content: str) -> MessageRenderingResult:
return render_markdown(
msg, content, realm_alert_words_automaton=realm_alert_words_automaton
)
content = """hello how is this possible how are you doing today
This is to test that the no user_ids who have alrert wourldword is participating
in sending of the message
"""
rendering_result = render(msg, content)
expected_user_ids: Set[int] = set()
# None of the users have their alert-words appear in the message content
self.assertEqual(rendering_result.user_ids_with_alert_words, expected_user_ids)
def get_mock_alert_words(self, num_words: int, word_length: int) -> List[str]:
alert_words = ["x" * word_length] * num_words # type List[str]
return alert_words
def test_alert_words_with_empty_alert_words(self) -> None:
alert_words_for_users: Dict[str, List[str]] = {
"hamlet": [],
"cordelia": [],
"iago": [],
"othello": [],
}
user_profiles: Dict[str, UserProfile] = {}
for (username, alert_words) in alert_words_for_users.items():
user_profile = self.example_user(username)
user_profiles.update({username: user_profile})
do_add_alert_words(user_profile, alert_words)
sender_user_profile = self.example_user("polonius")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
realm_alert_words_automaton = get_alert_word_automaton(sender_user_profile.realm)
def render(msg: Message, content: str) -> MessageRenderingResult:
return render_markdown(
msg, content, realm_alert_words_automaton=realm_alert_words_automaton
)
content = """This is to test a empty alert words i.e. no user has any alert-words set"""
rendering_result = render(msg, content)
expected_user_ids: Set[int] = set()
self.assertEqual(rendering_result.user_ids_with_alert_words, expected_user_ids)
def test_alert_words_retuns_user_ids_with_alert_words_with_huge_alert_words(self) -> None:
alert_words_for_users: Dict[str, List[str]] = {
"hamlet": ["issue124"],
"cordelia": self.get_mock_alert_words(500, 10),
"iago": self.get_mock_alert_words(500, 10),
"othello": self.get_mock_alert_words(500, 10),
}
user_profiles: Dict[str, UserProfile] = {}
for (username, alert_words) in alert_words_for_users.items():
user_profile = self.example_user(username)
user_profiles.update({username: user_profile})
do_add_alert_words(user_profile, alert_words)
sender_user_profile = self.example_user("polonius")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
realm_alert_words_automaton = get_alert_word_automaton(sender_user_profile.realm)
def render(msg: Message, content: str) -> MessageRenderingResult:
return render_markdown(
msg, content, realm_alert_words_automaton=realm_alert_words_automaton
)
content = """The code above will print 10 random values of numbers between 1 and 100.
The second line, for x in range(10), determines how many values will be printed (when you use
range(x), the number that you use in place of x will be the amount of values that you'll have
printed. if you want 20 values, use range(20). use range(5) if you only want 5 values returned,
etc.). I was talking abou the issue124 on github. Then the third line: print random.randint(1,101) will automatically select a random integer
between 1 and 100 for you. The process is fairly simple
"""
rendering_result = render(msg, content)
expected_user_ids: Set[int] = {user_profiles["hamlet"].id}
self.assertEqual(rendering_result.user_ids_with_alert_words, expected_user_ids)
def test_default_code_block_language(self) -> None:
realm = get_realm("zulip")
self.assertEqual(realm.default_code_block_language, None)
text = "```{}\nconsole.log('Hello World');\n```\n"
msg_with_js = markdown_convert_wrapper(text.format("js"))
msg_with_python = markdown_convert_wrapper(text.format("python"))
msg_without_language = markdown_convert_wrapper(text.format(""))
msg_with_quote = markdown_convert_wrapper(text.format("quote"))
msg_with_math = markdown_convert_wrapper(text.format("math"))
msg_with_none = markdown_convert_wrapper(text.format("none"))
do_set_realm_property(realm, "default_code_block_language", "javascript", acting_user=None)
msg_without_language_default_js = markdown_convert_wrapper(text.format(""))
msg_with_python_default_js = markdown_convert_wrapper(text.format("python"))
do_set_realm_property(realm, "default_code_block_language", "python", acting_user=None)
msg_without_language_default_py = markdown_convert_wrapper(text.format(""))
msg_with_none_default_py = markdown_convert_wrapper(text.format("none"))
do_set_realm_property(realm, "default_code_block_language", "quote", acting_user=None)
msg_without_language_default_quote = markdown_convert_wrapper(text.format(""))
do_set_realm_property(realm, "default_code_block_language", "math", acting_user=None)
msg_without_language_default_math = markdown_convert_wrapper(text.format(""))
do_set_realm_property(realm, "default_code_block_language", None, acting_user=None)
msg_without_language_final = markdown_convert_wrapper(text.format(""))
self.assertTrue(msg_with_js == msg_without_language_default_js)
self.assertTrue(
msg_with_python == msg_with_python_default_js == msg_without_language_default_py
)
self.assertTrue(msg_with_quote == msg_without_language_default_quote)
self.assertTrue(msg_with_math == msg_without_language_default_math)
self.assertTrue(msg_without_language == msg_without_language_final)
self.assertTrue(msg_with_none == msg_with_none_default_py)
nested_text = "````quote\n\n{}\n\n{}````".format(text.format("js"), text.format(""))
do_set_realm_property(realm, "default_code_block_language", "javascript", acting_user=None)
rendered = markdown_convert_wrapper(nested_text)
with_language, without_language = re.findall(r"<pre>(.*?)$", rendered, re.MULTILINE)
self.assertTrue(with_language == without_language)
do_set_realm_property(realm, "default_code_block_language", None, acting_user=None)
rendered = markdown_convert_wrapper(nested_text)
with_language, without_language = re.findall(r"<pre>(.*?)$", rendered, re.MULTILINE)
self.assertFalse(with_language == without_language)
def test_mention_wildcard(self) -> None:
user_profile = self.example_user("othello")
msg = Message(sender=user_profile, sending_client=get_client("test"))
content = "@**all** test"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
'<p><span class="user-mention" data-user-id="*">' "@all" "</span> test</p>",
)
self.assertTrue(rendering_result.mentions_wildcard)
def test_mention_everyone(self) -> None:
user_profile = self.example_user("othello")
msg = Message(sender=user_profile, sending_client=get_client("test"))
content = "@**everyone** test"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
'<p><span class="user-mention" data-user-id="*">' "@everyone" "</span> test</p>",
)
self.assertTrue(rendering_result.mentions_wildcard)
def test_mention_stream(self) -> None:
user_profile = self.example_user("othello")
msg = Message(sender=user_profile, sending_client=get_client("test"))
content = "@**stream** test"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
'<p><span class="user-mention" data-user-id="*">' "@stream" "</span> test</p>",
)
self.assertTrue(rendering_result.mentions_wildcard)
def test_mention_at_wildcard(self) -> None:
user_profile = self.example_user("othello")
msg = Message(sender=user_profile, sending_client=get_client("test"))
content = "@all test"
rendering_result = render_markdown(msg, content)
self.assertEqual(rendering_result.rendered_content, "<p>@all test</p>")
self.assertFalse(rendering_result.mentions_wildcard)
self.assertEqual(rendering_result.mentions_user_ids, set())
def test_mention_at_everyone(self) -> None:
user_profile = self.example_user("othello")
msg = Message(sender=user_profile, sending_client=get_client("test"))
content = "@everyone test"
rendering_result = render_markdown(msg, content)
self.assertEqual(rendering_result.rendered_content, "<p>@everyone test</p>")
self.assertFalse(rendering_result.mentions_wildcard)
self.assertEqual(rendering_result.mentions_user_ids, set())
def test_mention_word_starting_with_at_wildcard(self) -> None:
user_profile = self.example_user("othello")
msg = Message(sender=user_profile, sending_client=get_client("test"))
content = "test @alleycat.com test"
rendering_result = render_markdown(msg, content)
self.assertEqual(rendering_result.rendered_content, "<p>test @alleycat.com test</p>")
self.assertFalse(rendering_result.mentions_wildcard)
self.assertEqual(rendering_result.mentions_user_ids, set())
def test_mention_at_normal_user(self) -> None:
user_profile = self.example_user("othello")
msg = Message(sender=user_profile, sending_client=get_client("test"))
content = "@aaron test"
rendering_result = render_markdown(msg, content)
self.assertEqual(rendering_result.rendered_content, "<p>@aaron test</p>")
self.assertFalse(rendering_result.mentions_wildcard)
self.assertEqual(rendering_result.mentions_user_ids, set())
def test_mention_single(self) -> None:
sender_user_profile = self.example_user("othello")
user_profile = self.example_user("hamlet")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
user_id = user_profile.id
content = "@**King Hamlet**"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
'<p><span class="user-mention" ' f'data-user-id="{user_id}">' "@King Hamlet</span></p>",
)
self.assertEqual(rendering_result.mentions_user_ids, {user_profile.id})
content = f"@**|{user_id}**"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
'<p><span class="user-mention" ' f'data-user-id="{user_id}">' "@King Hamlet</span></p>",
)
self.assertEqual(rendering_result.mentions_user_ids, {user_profile.id})
def test_mention_silent(self) -> None:
sender_user_profile = self.example_user("othello")
user_profile = self.example_user("hamlet")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
user_id = user_profile.id
content = "@_**King Hamlet**"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
'<p><span class="user-mention silent" '
f'data-user-id="{user_id}">'
"King Hamlet</span></p>",
)
self.assertEqual(rendering_result.mentions_user_ids, set())
def test_silent_wildcard_mention(self) -> None:
user_profile = self.example_user("othello")
msg = Message(sender=user_profile, sending_client=get_client("test"))
wildcards = ["all", "everyone", "stream"]
for wildcard in wildcards:
content = f"@_**{wildcard}**"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
f'<p><span class="user-mention silent" data-user-id="*">{wildcard}</span></p>',
)
self.assertFalse(rendering_result.mentions_wildcard)
def test_mention_invalid_followed_by_valid(self) -> None:
sender_user_profile = self.example_user("othello")
user_profile = self.example_user("hamlet")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
user_id = user_profile.id
content = "@**Invalid user** and @**King Hamlet**"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
'<p>@<strong>Invalid user</strong> and <span class="user-mention" '
f'data-user-id="{user_id}">'
"@King Hamlet</span></p>",
)
self.assertEqual(rendering_result.mentions_user_ids, {user_profile.id})
def test_invalid_mention_not_uses_valid_mention_data(self) -> None:
sender_user_profile = self.example_user("othello")
hamlet = self.example_user("hamlet")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
# to use that data for creating a valid mention.
content = f"@**King Hamlet|{hamlet.id}** and @**aaron|{hamlet.id}**"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
f'<p><span class="user-mention" data-user-id="{hamlet.id}">'
f"@King Hamlet</span> and @<strong>aaron|{hamlet.id}</strong></p>",
)
self.assertEqual(rendering_result.mentions_user_ids, {hamlet.id})
def test_silent_mention_invalid_followed_by_valid(self) -> None:
sender_user_profile = self.example_user("othello")
user_profile = self.example_user("hamlet")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
user_id = user_profile.id
content = "@_**Invalid user** and @_**King Hamlet**"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
'<p>@_<strong>Invalid user</strong> and <span class="user-mention silent" '
f'data-user-id="{user_id}">'
"King Hamlet</span></p>",
)
self.assertEqual(rendering_result.mentions_user_ids, set())
content = f"@_**|123456789** and @_**|{user_id}**"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
"<p>@_<strong>|123456789</strong> and "
'<span class="user-mention silent" '
f'data-user-id="{user_id}">'
"King Hamlet</span></p>",
)
self.assertEqual(rendering_result.mentions_user_ids, set())
def test_possible_mentions(self) -> None:
def assert_mentions(content: str, names: Set[str], has_wildcards: bool = False) -> None:
self.assertEqual(possible_mentions(content), (names, has_wildcards))
aaron = self.example_user("aaron")
assert_mentions("", set())
assert_mentions("boring", set())
assert_mentions("@**all**", set(), True)
assert_mentions("smush@**steve**smush", set())
assert_mentions(
f"Hello @**King Hamlet**, @**|{aaron.id}** and @**Cordelia, Lear's daughter**\n@**Foo van Barson|1234** @**all**",
{"King Hamlet", f"|{aaron.id}", "Cordelia, Lear's daughter", "Foo van Barson|1234"},
True,
)
def test_mention_multiple(self) -> None:
sender_user_profile = self.example_user("othello")
hamlet = self.example_user("hamlet")
cordelia = self.example_user("cordelia")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "@**King Hamlet** and @**Cordelia, Lear's daughter**, check this out"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
"<p>"
'<span class="user-mention" '
f'data-user-id="{hamlet.id}">@King Hamlet</span> and '
'<span class="user-mention" '
f'data-user-id="{cordelia.id}">@Cordelia, Lear\'s daughter</span>, '
"check this out</p>",
)
self.assertEqual(rendering_result.mentions_user_ids, {hamlet.id, cordelia.id})
def test_mention_in_quotes(self) -> None:
othello = self.example_user("othello")
hamlet = self.example_user("hamlet")
cordelia = self.example_user("cordelia")
msg = Message(sender=othello, sending_client=get_client("test"))
content = "> @**King Hamlet** and @**Othello, the Moor of Venice**\n\n @**King Hamlet** and @**Cordelia, Lear's daughter**"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
"<blockquote>\n<p>"
f'<span class="user-mention silent" data-user-id="{hamlet.id}">King Hamlet</span>'
" and "
f'<span class="user-mention silent" data-user-id="{othello.id}">Othello, the Moor of Venice</span>'
"</p>\n</blockquote>\n"
"<p>"
f'<span class="user-mention" data-user-id="{hamlet.id}">@King Hamlet</span>'
" and "
f'<span class="user-mention" data-user-id="{cordelia.id}">@Cordelia, Lear\'s daughter</span>'
"</p>",
)
self.assertEqual(rendering_result.mentions_user_ids, {hamlet.id, cordelia.id})
# Both fenced quote and > quote should be identical for both silent and regular syntax.
expected = (
"<blockquote>\n<p>"
f'<span class="user-mention silent" data-user-id="{hamlet.id}">King Hamlet</span>'
"</p>\n</blockquote>"
)
content = "```quote\n@**King Hamlet**\n```"
rendering_result = render_markdown(msg, content)
self.assertEqual(rendering_result.rendered_content, expected)
self.assertEqual(rendering_result.mentions_user_ids, set())
content = "> @**King Hamlet**"
rendering_result = render_markdown(msg, content)
self.assertEqual(rendering_result.rendered_content, expected)
self.assertEqual(rendering_result.mentions_user_ids, set())
content = "```quote\n@_**King Hamlet**\n```"
rendering_result = render_markdown(msg, content)
self.assertEqual(rendering_result.rendered_content, expected)
self.assertEqual(rendering_result.mentions_user_ids, set())
content = "> @_**King Hamlet**"
rendering_result = render_markdown(msg, content)
self.assertEqual(rendering_result.rendered_content, expected)
self.assertEqual(rendering_result.mentions_user_ids, set())
def test_wildcard_mention_in_quotes(self) -> None:
user_profile = self.example_user("othello")
message = Message(sender=user_profile, sending_client=get_client("test"))
def assert_silent_mention(content: str, wildcard: str) -> None:
expected = (
"<blockquote>\n<p>"
f'<span class="user-mention silent" data-user-id="*">{wildcard}</span>'
"</p>\n</blockquote>"
)
rendering_result = render_markdown(message, content)
self.assertEqual(rendering_result.rendered_content, expected)
self.assertFalse(rendering_result.mentions_wildcard)
wildcards = ["all", "everyone", "stream"]
for wildcard in wildcards:
assert_silent_mention(f"> @**{wildcard}**", wildcard)
assert_silent_mention(f"> @_**{wildcard}**", wildcard)
assert_silent_mention(f"```quote\n@**{wildcard}**\n```", wildcard)
assert_silent_mention(f"```quote\n@_**{wildcard}**\n```", wildcard)
def test_mention_duplicate_full_name(self) -> None:
realm = get_realm("zulip")
def make_user(email: str, full_name: str) -> UserProfile:
return create_user(
email=email,
password="whatever",
realm=realm,
full_name=full_name,
)
sender_user_profile = self.example_user("othello")
twin1 = make_user("twin1@example.com", "Mark Twin")
twin2 = make_user("twin2@example.com", "Mark Twin")
cordelia = self.example_user("cordelia")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = f"@**Mark Twin|{twin1.id}**, @**Mark Twin|{twin2.id}** and @**Cordelia, Lear's daughter**, hi."
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
"<p>"
'<span class="user-mention" '
f'data-user-id="{twin1.id}">@Mark Twin</span>, '
'<span class="user-mention" '
f'data-user-id="{twin2.id}">@Mark Twin</span> and '
'<span class="user-mention" '
f'data-user-id="{cordelia.id}">@Cordelia, Lear\'s daughter</span>, '
"hi.</p>",
)
self.assertEqual(rendering_result.mentions_user_ids, {twin1.id, twin2.id, cordelia.id})
def test_mention_invalid(self) -> None:
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "Hey @**Nonexistent User**"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content, "<p>Hey @<strong>Nonexistent User</strong></p>"
)
self.assertEqual(rendering_result.mentions_user_ids, set())
def test_user_mention_atomic_string(self) -> None:
sender_user_profile = self.example_user("othello")
realm = get_realm("zulip")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
# Create a linkifier.
url_format_string = r"https://trac.example.com/ticket/%(id)s"
linkifier = RealmFilter(
realm=realm, pattern=r"#(?P<id>[0-9]{2,8})", url_format_string=url_format_string
)
linkifier.save()
self.assertEqual(
linkifier.__str__(),
"<RealmFilter(zulip): #(?P<id>[0-9]{2,8}) https://trac.example.com/ticket/%(id)s>",
)
# Create a user that potentially interferes with the pattern.
test_user = create_user(
email="atomic@example.com",
password="whatever",
realm=realm,
full_name="Atomic #123",
)
content = "@**Atomic #123**"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
'<p><span class="user-mention" '
f'data-user-id="{test_user.id}">'
"@Atomic #123</span></p>",
)
self.assertEqual(rendering_result.mentions_user_ids, {test_user.id})
content = "@_**Atomic #123**"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
'<p><span class="user-mention silent" '
f'data-user-id="{test_user.id}">'
"Atomic #123</span></p>",
)
self.assertEqual(rendering_result.mentions_user_ids, set())
def create_user_group_for_test(self, user_group_name: str) -> UserGroup:
othello = self.example_user("othello")
return create_user_group(user_group_name, [othello], get_realm("zulip"))
def test_user_group_mention_single(self) -> None:
sender_user_profile = self.example_user("othello")
user_profile = self.example_user("hamlet")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
user_id = user_profile.id
user_group = self.create_user_group_for_test("support")
content = "@**King Hamlet** @*support*"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
'<p><span class="user-mention" '
f'data-user-id="{user_id}">'
"@King Hamlet</span> "
'<span class="user-group-mention" '
f'data-user-group-id="{user_group.id}">'
"@support</span></p>",
)
self.assertEqual(rendering_result.mentions_user_ids, {user_profile.id})
self.assertEqual(rendering_result.mentions_user_group_ids, {user_group.id})
def test_invalid_user_group_followed_by_valid_mention_single(self) -> None:
sender_user_profile = self.example_user("othello")
user_profile = self.example_user("hamlet")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
user_id = user_profile.id
user_group = self.create_user_group_for_test("support")
content = "@**King Hamlet** @*Invalid user group* @*support*"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
'<p><span class="user-mention" '
f'data-user-id="{user_id}">'
"@King Hamlet</span> "
"@<em>Invalid user group</em> "
'<span class="user-group-mention" '
f'data-user-group-id="{user_group.id}">'
"@support</span></p>",
)
self.assertEqual(rendering_result.mentions_user_ids, {user_profile.id})
self.assertEqual(rendering_result.mentions_user_group_ids, {user_group.id})
def test_user_group_mention_atomic_string(self) -> None:
sender_user_profile = self.example_user("othello")
realm = get_realm("zulip")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
user_profile = self.example_user("hamlet")
# Create a linkifier.
url_format_string = r"https://trac.example.com/ticket/%(id)s"
linkifier = RealmFilter(
realm=realm, pattern=r"#(?P<id>[0-9]{2,8})", url_format_string=url_format_string
)
linkifier.save()
self.assertEqual(
linkifier.__str__(),
"<RealmFilter(zulip): #(?P<id>[0-9]{2,8}) https://trac.example.com/ticket/%(id)s>",
)
# Create a user-group that potentially interferes with the pattern.
user_id = user_profile.id
user_group = self.create_user_group_for_test("support #123")
content = "@**King Hamlet** @*support #123*"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
'<p><span class="user-mention" '
f'data-user-id="{user_id}">'
"@King Hamlet</span> "
'<span class="user-group-mention" '
f'data-user-group-id="{user_group.id}">'
"@support #123</span></p>",
)
self.assertEqual(rendering_result.mentions_user_ids, {user_profile.id})
self.assertEqual(rendering_result.mentions_user_group_ids, {user_group.id})
def test_possible_user_group_mentions(self) -> None:
def assert_mentions(content: str, names: Set[str]) -> None:
self.assertEqual(possible_user_group_mentions(content), names)
assert_mentions("", set())
assert_mentions("boring", set())
assert_mentions("@**all**", set())
assert_mentions("smush@*steve*smush", set())
assert_mentions(
"@*support* Hello @**King Hamlet** and @**Cordelia, Lear's daughter**\n"
"@**Foo van Barson** @**all**",
{"support"},
)
assert_mentions(
"Attention @*support*, @*frontend* and @*backend*\ngroups.",
{"support", "frontend", "backend"},
)
def test_user_group_mention_multiple(self) -> None:
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
support = self.create_user_group_for_test("support")
backend = self.create_user_group_for_test("backend")
content = "@*support* and @*backend*, check this out"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
"<p>"
'<span class="user-group-mention" '
f'data-user-group-id="{support.id}">'
"@support</span> "
"and "
'<span class="user-group-mention" '
f'data-user-group-id="{backend.id}">'
"@backend</span>, "
"check this out"
"</p>",
)
self.assertEqual(rendering_result.mentions_user_group_ids, {support.id, backend.id})
def test_user_group_mention_edit(self) -> None:
sender_user_profile = self.example_user("hamlet")
user_profile = self.example_user("othello")
self.create_user_group_for_test("support")
self.login("hamlet")
msg_id = self.send_stream_message(
sender_user_profile, "Denmark", topic_name="editing", content="test"
)
def update_message_and_check_flag(content: str, mentioned: bool) -> None:
result = self.client_patch(
"/json/messages/" + str(msg_id),
{
"message_id": msg_id,
"content": content,
},
)
self.assert_json_success(result)
um = UserMessage.objects.get(
user_profile_id=user_profile.id,
message_id=msg_id,
)
if mentioned:
self.assertIn("mentioned", um.flags_list())
else:
self.assertNotIn("mentioned", um.flags_list())
update_message_and_check_flag("@*support*", True)
update_message_and_check_flag("@*support-invalid* edited", False)
update_message_and_check_flag("@*support* edited", True)
update_message_and_check_flag("edited", False)
update_message_and_check_flag("@*support*", True)
update_message_and_check_flag("@_*support*", False)
def test_user_group_mention_invalid(self) -> None:
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "Hey @*Nonexistent group*"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content, "<p>Hey @<em>Nonexistent group</em></p>"
)
self.assertEqual(rendering_result.mentions_user_group_ids, set())
def test_user_group_silent_mention(self) -> None:
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
support = self.create_user_group_for_test("support")
content = "We'll add you to @_*support* user group."
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content,
"<p>We'll add you to "
f'<span class="user-group-mention silent" data-user-group-id="{support.id}">support</span>'
" user group.</p>",
)
self.assertEqual(rendering_result.mentions_user_group_ids, set())
def test_user_group_mention_in_quotes(self) -> None:
user_profile = self.example_user("othello")
message = Message(sender=user_profile, sending_client=get_client("test"))
backend = self.create_user_group_for_test("backend")
def assert_silent_mention(content: str) -> None:
expected = (
"<blockquote>\n<p>"
f'<span class="user-group-mention silent" data-user-group-id="{backend.id}">backend</span>'
"</p>\n</blockquote>"
)
rendering_result = render_markdown(message, content)
self.assertEqual(rendering_result.rendered_content, expected)
self.assertEqual(rendering_result.mentions_user_group_ids, set())
assert_silent_mention("> @*backend*")
assert_silent_mention("> @_*backend*")
assert_silent_mention("```quote\n@*backend*\n```")
assert_silent_mention("```quote\n@_*backend*\n```")
def test_stream_single(self) -> None:
denmark = get_stream("Denmark", get_realm("zulip"))
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "#**Denmark**"
self.assertEqual(
render_markdown(msg, content).rendered_content,
'<p><a class="stream" data-stream-id="{d.id}" href="/#narrow/stream/{d.id}-Denmark">#{d.name}</a></p>'.format(
d=denmark,
),
)
def test_invalid_stream_followed_by_valid_mention(self) -> None:
denmark = get_stream("Denmark", get_realm("zulip"))
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "#**Invalid** and #**Denmark**"
self.assertEqual(
render_markdown(msg, content).rendered_content,
'<p>#<strong>Invalid</strong> and <a class="stream" data-stream-id="{d.id}" href="/#narrow/stream/{d.id}-Denmark">#{d.name}</a></p>'.format(
d=denmark,
),
)
def test_stream_multiple(self) -> None:
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
realm = get_realm("zulip")
denmark = get_stream("Denmark", realm)
scotland = get_stream("Scotland", realm)
content = "Look to #**Denmark** and #**Scotland**, there something"
self.assertEqual(
render_markdown(msg, content).rendered_content,
"<p>Look to "
'<a class="stream" '
'data-stream-id="{denmark.id}" '
'href="/#narrow/stream/{denmark.id}-Denmark">#{denmark.name}</a> and '
'<a class="stream" '
'data-stream-id="{scotland.id}" '
'href="/#narrow/stream/{scotland.id}-Scotland">#{scotland.name}</a>, '
"there something</p>".format(denmark=denmark, scotland=scotland),
)
def test_stream_case_sensitivity(self) -> None:
realm = get_realm("zulip")
case_sens = Stream.objects.create(name="CaseSens", realm=realm)
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "#**CaseSens**"
self.assertEqual(
render_markdown(msg, content).rendered_content,
'<p><a class="stream" data-stream-id="{s.id}" href="/#narrow/stream/{s.id}-{s.name}">#{s.name}</a></p>'.format(
s=case_sens,
),
)
def test_stream_case_sensitivity_nonmatching(self) -> None:
realm = get_realm("zulip")
Stream.objects.create(name="CaseSens", realm=realm)
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "#**casesens**"
self.assertEqual(
render_markdown(msg, content).rendered_content, "<p>#<strong>casesens</strong></p>"
)
def test_topic_single(self) -> None:
denmark = get_stream("Denmark", get_realm("zulip"))
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "#**Denmark>some topic**"
self.assertEqual(
render_markdown(msg, content).rendered_content,
'<p><a class="stream-topic" data-stream-id="{d.id}" href="/#narrow/stream/{d.id}-Denmark/topic/some.20topic">#{d.name} > some topic</a></p>'.format(
d=denmark,
),
)
def test_topic_atomic_string(self) -> None:
realm = get_realm("zulip")
sender_user_profile = self.example_user("othello")
url_format_string = r"https://trac.example.com/ticket/%(id)s"
linkifier = RealmFilter(
realm=realm, pattern=r"#(?P<id>[0-9]{2,8})", url_format_string=url_format_string
)
linkifier.save()
self.assertEqual(
linkifier.__str__(),
"<RealmFilter(zulip): #(?P<id>[0-9]{2,8}) https://trac.example.com/ticket/%(id)s>",
)
denmark = get_stream("Denmark", realm)
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "#**Denmark>#1234**"
self.assertEqual(
render_markdown(msg, content).rendered_content,
'<p><a class="stream-topic" data-stream-id="{d.id}" href="/#narrow/stream/{d.id}-Denmark/topic/.231234">#{d.name} > #1234</a></p>'.format(
d=denmark,
),
)
def test_topic_multiple(self) -> None:
denmark = get_stream("Denmark", get_realm("zulip"))
scotland = get_stream("Scotland", get_realm("zulip"))
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "This has two links: #**Denmark>some topic** and #**Scotland>other topic**."
self.assertEqual(
render_markdown(msg, content).rendered_content,
"<p>This has two links: "
'<a class="stream-topic" data-stream-id="{denmark.id}" '
'href="/#narrow/stream/{denmark.id}-{denmark.name}/topic/some.20topic">'
"#{denmark.name} > some topic</a>"
" and "
'<a class="stream-topic" data-stream-id="{scotland.id}" '
'href="/#narrow/stream/{scotland.id}-{scotland.name}/topic/other.20topic">'
"#{scotland.name} > other topic</a>"
".</p>".format(denmark=denmark, scotland=scotland),
)
def test_possible_stream_names(self) -> None:
content = """#**test here**
This mentions #**Denmark** too.
#**garçon** #**천국** @**Ignore Person**
"""
self.assertEqual(
possible_linked_stream_names(content),
{"test here", "Denmark", "garçon", "천국"},
)
def test_stream_unicode(self) -> None:
realm = get_realm("zulip")
uni = Stream.objects.create(name="привет", realm=realm)
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "#**привет**"
quoted_name = ".D0.BF.D1.80.D0.B8.D0.B2.D0.B5.D1.82"
href = f"/#narrow/stream/{uni.id}-{quoted_name}"
self.assertEqual(
render_markdown(msg, content).rendered_content,
'<p><a class="stream" data-stream-id="{s.id}" href="{href}">#{s.name}</a></p>'.format(
s=uni,
href=href,
),
)
def test_stream_atomic_string(self) -> None:
realm = get_realm("zulip")
sender_user_profile = self.example_user("othello")
url_format_string = r"https://trac.example.com/ticket/%(id)s"
linkifier = RealmFilter(
realm=realm, pattern=r"#(?P<id>[0-9]{2,8})", url_format_string=url_format_string
)
linkifier.save()
self.assertEqual(
linkifier.__str__(),
"<RealmFilter(zulip): #(?P<id>[0-9]{2,8}) https://trac.example.com/ticket/%(id)s>",
)
stream = Stream.objects.create(name="Stream #1234", realm=realm)
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "#**Stream #1234**"
href = f"/#narrow/stream/{stream.id}-Stream-.231234"
self.assertEqual(
render_markdown(msg, content).rendered_content,
'<p><a class="stream" data-stream-id="{s.id}" href="{href}">#{s.name}</a></p>'.format(
s=stream,
href=href,
),
)
def test_stream_invalid(self) -> None:
sender_user_profile = self.example_user("othello")
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "There #**Nonexistentstream**"
rendering_result = render_markdown(msg, content)
self.assertEqual(
rendering_result.rendered_content, "<p>There #<strong>Nonexistentstream</strong></p>"
)
self.assertEqual(rendering_result.mentions_user_ids, set())
def test_image_preview_title(self) -> None:
msg = "[My favorite image](https://example.com/testimage.png)"
converted = markdown_convert_wrapper(msg)
self.assertEqual(
converted,
"<p>"
'<a href="https://example.com/testimage.png">My favorite image</a>'
"</p>\n"
'<div class="message_inline_image">'
'<a href="https://example.com/testimage.png" title="My favorite image">'
'<img data-src-fullsize="/thumbnail?url=https%3A%2F%2Fexample.com%2Ftestimage.png&size=full" src="/thumbnail?url=https%3A%2F%2Fexample.com%2Ftestimage.png&size=thumbnail">'
"</a>"
"</div>",
)
def test_mit_rendering(self) -> None:
msg = "**test**"
realm = get_realm("zephyr")
client = get_client("zephyr_mirror")
message = Message(sending_client=client, sender=self.mit_user("sipbtest"))
converted = markdown_convert(msg, message_realm=realm, message=message)
self.assertEqual(
converted.rendered_content,
"<p>**test**</p>",
)
msg = "* test"
converted = markdown_convert(msg, message_realm=realm, message=message)
self.assertEqual(
converted.rendered_content,
"<p>* test</p>",
)
msg = "https://lists.debian.org/debian-ctte/2014/02/msg00173.html"
converted = markdown_convert(msg, message_realm=realm, message=message)
self.assertEqual(
converted.rendered_content,
'<p><a href="https://lists.debian.org/debian-ctte/2014/02/msg00173.html">https://lists.debian.org/debian-ctte/2014/02/msg00173.html</a></p>',
)
def test_url_to_a(self) -> None:
url = "javascript://example.com/invalidURL"
converted = url_to_a(db_data=None, url=url, text=url)
self.assertEqual(
converted,
"javascript://example.com/invalidURL",
)
def test_disabled_code_block_processor(self) -> None:
msg = (
"Hello,\n\n"
+ " I am writing this message to test something. I am writing this message to test something."
)
converted = markdown_convert_wrapper(msg)
expected_output = (
"<p>Hello,</p>\n"
+ '<div class="codehilite"><pre><span></span><code>I am writing this message to test something. I am writing this message to test something.\n'
+ "</code></pre></div>"
)
self.assertEqual(converted, expected_output)
realm = do_create_realm(
string_id="code_block_processor_test", name="code_block_processor_test"
)
maybe_update_markdown_engines(realm.id, True)
rendering_result = markdown_convert(msg, message_realm=realm, email_gateway=True)
expected_output = (
"<p>Hello,</p>\n"
+ "<p>I am writing this message to test something. I am writing this message to test something.</p>"
)
self.assertEqual(rendering_result.rendered_content, expected_output)
def test_normal_link(self) -> None:
realm = get_realm("zulip")
sender_user_profile = self.example_user("othello")
message = Message(sender=sender_user_profile, sending_client=get_client("test"))
msg = "http://example.com/#settings/"
self.assertEqual(
markdown_convert(msg, message_realm=realm, message=message).rendered_content,
'<p><a href="http://example.com/#settings/">http://example.com/#settings/</a></p>',
)
def test_relative_link(self) -> None:
realm = get_realm("zulip")
sender_user_profile = self.example_user("othello")
message = Message(sender=sender_user_profile, sending_client=get_client("test"))
msg = "http://zulip.testserver/#narrow/stream/999-hello"
self.assertEqual(
markdown_convert(msg, message_realm=realm, message=message).rendered_content,
'<p><a href="#narrow/stream/999-hello">http://zulip.testserver/#narrow/stream/999-hello</a></p>',
)
def test_relative_link_streams_page(self) -> None:
realm = get_realm("zulip")
sender_user_profile = self.example_user("othello")
message = Message(sender=sender_user_profile, sending_client=get_client("test"))
msg = "http://zulip.testserver/#streams/all"
self.assertEqual(
markdown_convert(msg, message_realm=realm, message=message).rendered_content,
'<p><a href="#streams/all">http://zulip.testserver/#streams/all</a></p>',
)
def test_md_relative_link(self) -> None:
realm = get_realm("zulip")
sender_user_profile = self.example_user("othello")
message = Message(sender=sender_user_profile, sending_client=get_client("test"))
msg = "[hello](http://zulip.testserver/#narrow/stream/999-hello)"
self.assertEqual(
markdown_convert(msg, message_realm=realm, message=message).rendered_content,
'<p><a href="#narrow/stream/999-hello">hello</a></p>',
)
def test_html_entity_conversion(self) -> None:
msg = """\
Test raw: Hello, ©
Test inline code: `©`
Test fenced code:
```
©
©
```
Test quote:
~~~quote
©
~~~
Test a list:
* ©
* `©`
* ```©```
Test an indented block:
©"""
expected_output = """\
<p>Test raw: Hello, ©<br>
Test inline code: <code>&copy;</code></p>
<p>Test fenced code:</p>
<div class="codehilite"><pre><span></span><code>&copy;
&copy;
</code></pre></div>
<p>Test quote:</p>
<blockquote>
<p>©</p>
</blockquote>
<p>Test a list:</p>
<ul>
<li>©</li>
<li><code>&copy;</code></li>
<li><code>&copy;</code></li>
</ul>
<p>Test an indented block:</p>
<div class="codehilite"><pre><span></span><code>&copy;
</code></pre></div>"""
converted = markdown_convert_wrapper(dedent(msg))
self.assertEqual(converted, dedent(expected_output))
class MarkdownApiTests(ZulipTestCase):
def test_render_message_api(self) -> None:
content = "That is a **bold** statement"
result = self.api_post(
self.example_user("othello"),
"/api/v1/messages/render",
dict(content=content),
)
self.assert_json_success(result)
self.assertEqual(
result.json()["rendered"], "<p>That is a <strong>bold</strong> statement</p>"
)
def test_render_mention_stream_api(self) -> None:
content = "This mentions #**Denmark** and @**King Hamlet**."
result = self.api_post(
self.example_user("othello"),
"/api/v1/messages/render",
dict(content=content),
)
self.assert_json_success(result)
user_id = self.example_user("hamlet").id
stream_id = get_stream("Denmark", get_realm("zulip")).id
self.assertEqual(
result.json()["rendered"],
f'<p>This mentions <a class="stream" data-stream-id="{stream_id}" href="/#narrow/stream/{stream_id}-Denmark">#Denmark</a> and <span class="user-mention" data-user-id="{user_id}">@King Hamlet</span>.</p>',
)
class MarkdownErrorTests(ZulipTestCase):
def test_markdown_error_handling(self) -> None:
with self.simulated_markdown_failure():
with self.assertRaises(MarkdownRenderingException):
markdown_convert_wrapper("")
def test_send_message_errors(self) -> None:
message = "whatever"
with self.simulated_markdown_failure():
# handle i18n properly here on some systems.
with self.assertRaises(JsonableError):
self.send_stream_message(self.example_user("othello"), "Denmark", message)
@override_settings(MAX_MESSAGE_LENGTH=10)
def test_ultra_long_rendering(self) -> None:
msg = "mock rendered message\n" * 10 * settings.MAX_MESSAGE_LENGTH
with mock.patch("zerver.lib.markdown.timeout", return_value=msg), mock.patch(
"zerver.lib.markdown.markdown_logger"
):
with self.assertRaises(MarkdownRenderingException):
markdown_convert_wrapper(msg)
def test_curl_code_block_validation(self) -> None:
processor = SimulatedFencedBlockPreprocessor(Markdown())
processor.run_content_validators = True
markdown_input = [
"``` curl",
"curl {{ api_url }}/v1/register",
" -u BOT_EMAIL_ADDRESS:BOT_API_KEY",
' -d "queue_id=1375801870:2942"',
"```",
]
with self.assertRaises(MarkdownRenderingException):
processor.run(markdown_input)
def test_curl_code_block_without_validation(self) -> None:
processor = SimulatedFencedBlockPreprocessor(Markdown())
markdown_input = [
"``` curl",
"curl {{ api_url }}/v1/register",
" -u BOT_EMAIL_ADDRESS:BOT_API_KEY",
' -d "queue_id=1375801870:2942"',
"```",
]
expected = [
"",
"**curl:curl {{ api_url }}/v1/register",
" -u BOT_EMAIL_ADDRESS:BOT_API_KEY",
' -d "queue_id=1375801870:2942"**',
"",
"",
]
result = processor.run(markdown_input)
self.assertEqual(result, expected)
| true | true |
f7f47b8f369825a78e4be3c7da841cc9b76c5cef | 2,140 | py | Python | P_rect.py | grhsxy21/Insect_Identification | 3b9ffe3ba91c2271bd663c327e384a6679c67bc8 | [
"Apache-2.0"
] | null | null | null | P_rect.py | grhsxy21/Insect_Identification | 3b9ffe3ba91c2271bd663c327e384a6679c67bc8 | [
"Apache-2.0"
] | null | null | null | P_rect.py | grhsxy21/Insect_Identification | 3b9ffe3ba91c2271bd663c327e384a6679c67bc8 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# 先读图,然后二值化,
# 矩形度
import cv2
import numpy as np
from matplotlib import pyplot as plt
# 此处读入图片,作为接口
origin = cv2.imread('D:/GitHub/ZRB/Insect_Identification/picture/butterfly.png') #TODO改为绝对路径
grayimage = cv2.imread('D:/GitHub/ZRB/Insect_Identification/picture/butterfly.png', 0)
# 高斯滤波
#*img = cv2.GaussianBlur(src, (blur1, blur2), 0),其中src是要进行滤波的原图像,blur1,blur2)是高斯核的大小,blur1和blur2的选取一般是奇数,blur1和blur2的值可以不同。参数0表示标准差取0。
blur = cv2.GaussianBlur(grayimage, (5, 5), 0)
# 二值化:用大津法,此处选项若是THRESH_BINARY_INV,则同意选用白色背景的图片样本
ret, otsu = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
# 找轮廓
contours = cv2.findContours(otsu, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# 轮廓集数目
largest_area = 0
largest_contour_index = 0
num = len(contours[0]) #!cv2.findContour()返回两个值:contours,hierachy,要的是contours,所以后面应该是0而不是1。
for i in range(num):
area = cv2.contourArea(contours[0][i], False)
if area > largest_area:
largest_area = area
largest_contour_index = i
maxContour = contours[0][largest_contour_index]
# 画轮廓
cv2.drawContours(origin, maxContour, -1, (0, 0, 255), 4)
print ("最大面积" + str(largest_area))
# 查找最小外接矩形
minAreaRect = cv2.minAreaRect(maxContour)
box = cv2.boxPoints(minAreaRect)
box = np.int0(box)
# 画轮廓
cv2.drawContours(origin, [box], 0, (0, 255, 0), 4)
# 计算最小外接矩形面积
minAreaRect_Area = int(cv2.contourArea(box, False))
print ("最小外接矩形面积" + str(minAreaRect_Area))
# 特征一:矩形度的计算
P_Rect = largest_area * 1.0 / minAreaRect_Area
# 统一结果为3位小数
P_Rect = round(P_Rect, 3)
print ("矩形度" + str(P_Rect))
cv2.putText(origin, 'S_maxContour : ' + str(largest_area), (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (50, 50, 50), 2, cv2.LINE_AA)
cv2.putText(origin, 'S_minAreaRect: ' + str(minAreaRect_Area), (50, 85), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (50, 50, 50), 2, cv2.LINE_AA)
cv2.putText(origin, 'P_Rect: ' + str(P_Rect), (50, 120), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (50, 50, 50), 2, cv2.LINE_AA)
# 显示
cv2.namedWindow('Butterfly', cv2.WINDOW_AUTOSIZE)
cv2.imshow('Butterfly', origin)
cv2.imwrite('picture/p-rect.png',origin)
k = cv2.waitKey(0)
# 'ESC'
if k == 27:
cv2.destroyAllWindows()
| 30.140845 | 134 | 0.724766 |
import cv2
import numpy as np
from matplotlib import pyplot as plt
origin = cv2.imread('D:/GitHub/ZRB/Insect_Identification/picture/butterfly.png')
grayimage = cv2.imread('D:/GitHub/ZRB/Insect_Identification/picture/butterfly.png', 0)
blur = cv2.GaussianBlur(grayimage, (5, 5), 0)
ret, otsu = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
contours = cv2.findContours(otsu, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
largest_area = 0
largest_contour_index = 0
num = len(contours[0])
for i in range(num):
area = cv2.contourArea(contours[0][i], False)
if area > largest_area:
largest_area = area
largest_contour_index = i
maxContour = contours[0][largest_contour_index]
cv2.drawContours(origin, maxContour, -1, (0, 0, 255), 4)
print ("最大面积" + str(largest_area))
minAreaRect = cv2.minAreaRect(maxContour)
box = cv2.boxPoints(minAreaRect)
box = np.int0(box)
cv2.drawContours(origin, [box], 0, (0, 255, 0), 4)
minAreaRect_Area = int(cv2.contourArea(box, False))
print ("最小外接矩形面积" + str(minAreaRect_Area))
P_Rect = largest_area * 1.0 / minAreaRect_Area
P_Rect = round(P_Rect, 3)
print ("矩形度" + str(P_Rect))
cv2.putText(origin, 'S_maxContour : ' + str(largest_area), (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (50, 50, 50), 2, cv2.LINE_AA)
cv2.putText(origin, 'S_minAreaRect: ' + str(minAreaRect_Area), (50, 85), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (50, 50, 50), 2, cv2.LINE_AA)
cv2.putText(origin, 'P_Rect: ' + str(P_Rect), (50, 120), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (50, 50, 50), 2, cv2.LINE_AA)
cv2.namedWindow('Butterfly', cv2.WINDOW_AUTOSIZE)
cv2.imshow('Butterfly', origin)
cv2.imwrite('picture/p-rect.png',origin)
k = cv2.waitKey(0)
if k == 27:
cv2.destroyAllWindows()
| true | true |
f7f47c0aae1e220d20eec7c8265f52bc8bf4903e | 1,091 | py | Python | dream/server/models.py | icyblade/dream | 818e77f1c25e51f8cd966f7aa4eb1bcd4207b208 | [
"MIT"
] | null | null | null | dream/server/models.py | icyblade/dream | 818e77f1c25e51f8cd966f7aa4eb1bcd4207b208 | [
"MIT"
] | null | null | null | dream/server/models.py | icyblade/dream | 818e77f1c25e51f8cd966f7aa4eb1bcd4207b208 | [
"MIT"
] | null | null | null | from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
class Agent(db.Model):
id = db.Column(db.Integer, primary_key=True)
type = db.Column(db.Integer, nullable=False)
name = db.Column(db.Integer, unique=True, nullable=False)
port = db.Column(db.Integer, unique=True, nullable=False)
def __repr__(self):
return '<Name %s>' % self.name
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
class Config(db.Model):
key = db.Column(db.String(255), primary_key=True)
value = db.Column(db.String(255), unique=True, nullable=False)
def __repr__(self):
return '<Key %s>' % self.key
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
class Quota(db.Model):
type = db.Column(db.String(255), primary_key=True)
maximum_quota = db.Column(db.Integer, nullable=False)
def __repr__(self):
return '<Type %s>' % self.type
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
| 27.275 | 78 | 0.661778 | from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
class Agent(db.Model):
id = db.Column(db.Integer, primary_key=True)
type = db.Column(db.Integer, nullable=False)
name = db.Column(db.Integer, unique=True, nullable=False)
port = db.Column(db.Integer, unique=True, nullable=False)
def __repr__(self):
return '<Name %s>' % self.name
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
class Config(db.Model):
key = db.Column(db.String(255), primary_key=True)
value = db.Column(db.String(255), unique=True, nullable=False)
def __repr__(self):
return '<Key %s>' % self.key
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
class Quota(db.Model):
type = db.Column(db.String(255), primary_key=True)
maximum_quota = db.Column(db.Integer, nullable=False)
def __repr__(self):
return '<Type %s>' % self.type
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
| true | true |
f7f47c53a2a933b72826ea833613e7de93158dc2 | 799 | py | Python | sdk/python/pulumi_azure_native/datalakestore/__init__.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/datalakestore/__init__.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/datalakestore/__init__.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from .. import _utilities
import typing
# Export this package's modules as members:
from ._enums import *
from .account import *
from .firewall_rule import *
from .get_account import *
from .get_firewall_rule import *
from .get_trusted_id_provider import *
from .get_virtual_network_rule import *
from .trusted_id_provider import *
from .virtual_network_rule import *
from ._inputs import *
from . import outputs
# Make subpackages available:
if typing.TYPE_CHECKING:
import pulumi_azure_native.datalakestore.v20161101 as v20161101
else:
v20161101 = _utilities.lazy_import('pulumi_azure_native.datalakestore.v20161101')
| 30.730769 | 85 | 0.779725 |
from .. import _utilities
import typing
# Export this package's modules as members:
from ._enums import *
from .account import *
from .firewall_rule import *
from .get_account import *
from .get_firewall_rule import *
from .get_trusted_id_provider import *
from .get_virtual_network_rule import *
from .trusted_id_provider import *
from .virtual_network_rule import *
from ._inputs import *
from . import outputs
if typing.TYPE_CHECKING:
import pulumi_azure_native.datalakestore.v20161101 as v20161101
else:
v20161101 = _utilities.lazy_import('pulumi_azure_native.datalakestore.v20161101')
| true | true |
f7f47e525497e7534d89c7741a3282ccf0b6f148 | 3,777 | py | Python | src/oci/database_migration/models/update_host_dump_transfer_details.py | LaudateCorpus1/oci-python-sdk | b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | src/oci/database_migration/models/update_host_dump_transfer_details.py | LaudateCorpus1/oci-python-sdk | b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | src/oci/database_migration/models/update_host_dump_transfer_details.py | LaudateCorpus1/oci-python-sdk | b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | # coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class UpdateHostDumpTransferDetails(object):
"""
Optional additional properties for dump transfer in source or target host. Default kind is CURL
"""
#: A constant which can be used with the kind property of a UpdateHostDumpTransferDetails.
#: This constant has a value of "CURL"
KIND_CURL = "CURL"
#: A constant which can be used with the kind property of a UpdateHostDumpTransferDetails.
#: This constant has a value of "OCI_CLI"
KIND_OCI_CLI = "OCI_CLI"
def __init__(self, **kwargs):
"""
Initializes a new UpdateHostDumpTransferDetails object with values from keyword arguments. This class has the following subclasses and if you are using this class as input
to a service operations then you should favor using a subclass over the base class:
* :class:`~oci.database_migration.models.UpdateCurlTransferDetails`
* :class:`~oci.database_migration.models.UpdateOciCliDumpTransferDetails`
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param kind:
The value to assign to the kind property of this UpdateHostDumpTransferDetails.
Allowed values for this property are: "CURL", "OCI_CLI"
:type kind: str
"""
self.swagger_types = {
'kind': 'str'
}
self.attribute_map = {
'kind': 'kind'
}
self._kind = None
@staticmethod
def get_subtype(object_dictionary):
"""
Given the hash representation of a subtype of this class,
use the info in the hash to return the class of the subtype.
"""
type = object_dictionary['kind']
if type == 'CURL':
return 'UpdateCurlTransferDetails'
if type == 'OCI_CLI':
return 'UpdateOciCliDumpTransferDetails'
else:
return 'UpdateHostDumpTransferDetails'
@property
def kind(self):
"""
**[Required]** Gets the kind of this UpdateHostDumpTransferDetails.
Type of dump transfer to use during migration in source or target host. Default kind is CURL
Allowed values for this property are: "CURL", "OCI_CLI"
:return: The kind of this UpdateHostDumpTransferDetails.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""
Sets the kind of this UpdateHostDumpTransferDetails.
Type of dump transfer to use during migration in source or target host. Default kind is CURL
:param kind: The kind of this UpdateHostDumpTransferDetails.
:type: str
"""
allowed_values = ["CURL", "OCI_CLI"]
if not value_allowed_none_or_none_sentinel(kind, allowed_values):
raise ValueError(
"Invalid value for `kind`, must be None or one of {0}"
.format(allowed_values)
)
self._kind = kind
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 34.651376 | 245 | 0.660842 |
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class UpdateHostDumpTransferDetails(object):
KIND_CURL = "CURL"
KIND_OCI_CLI = "OCI_CLI"
def __init__(self, **kwargs):
self.swagger_types = {
'kind': 'str'
}
self.attribute_map = {
'kind': 'kind'
}
self._kind = None
@staticmethod
def get_subtype(object_dictionary):
type = object_dictionary['kind']
if type == 'CURL':
return 'UpdateCurlTransferDetails'
if type == 'OCI_CLI':
return 'UpdateOciCliDumpTransferDetails'
else:
return 'UpdateHostDumpTransferDetails'
@property
def kind(self):
return self._kind
@kind.setter
def kind(self, kind):
allowed_values = ["CURL", "OCI_CLI"]
if not value_allowed_none_or_none_sentinel(kind, allowed_values):
raise ValueError(
"Invalid value for `kind`, must be None or one of {0}"
.format(allowed_values)
)
self._kind = kind
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f7f47f046953df24126d669292bd4188fe84bc9c | 1,113 | py | Python | cloudkitty/storage/v1/sqlalchemy/alembic/versions/c703a1bad612_improve_qty_digit.py | wanghuiict/cloudkitty | 11ff713042eb0354f497f7051130630c46860735 | [
"Apache-2.0"
] | 97 | 2015-10-18T02:53:17.000Z | 2022-03-07T05:15:39.000Z | cloudkitty/storage/v1/sqlalchemy/alembic/versions/c703a1bad612_improve_qty_digit.py | shanafang9/cloudkitty | 911c90569ccb09ecf0d7aa11a5a707c8ebda09cf | [
"Apache-2.0"
] | 1 | 2017-11-29T15:39:27.000Z | 2017-11-29T15:39:27.000Z | cloudkitty/storage/v1/sqlalchemy/alembic/versions/c703a1bad612_improve_qty_digit.py | shanafang9/cloudkitty | 911c90569ccb09ecf0d7aa11a5a707c8ebda09cf | [
"Apache-2.0"
] | 54 | 2015-10-27T10:55:02.000Z | 2022-02-18T08:23:19.000Z | # Copyright 2017 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""improve_qty_digit
Revision ID: c703a1bad612
Revises: 307430ab38bc
Create Date: 2017-04-01 09:33:41.434750
"""
# revision identifiers, used by Alembic.
revision = 'c703a1bad612'
down_revision = '307430ab38bc'
from alembic import op # noqa: E402
import sqlalchemy as sa # noqa: E402
def upgrade():
with op.batch_alter_table('rated_data_frames') as batch_op:
batch_op.alter_column(
'qty',
type_=sa.Numeric(15, 5),
existing_type=sa.Numeric())
| 29.289474 | 78 | 0.709793 |
revision = 'c703a1bad612'
down_revision = '307430ab38bc'
from alembic import op
import sqlalchemy as sa
def upgrade():
with op.batch_alter_table('rated_data_frames') as batch_op:
batch_op.alter_column(
'qty',
type_=sa.Numeric(15, 5),
existing_type=sa.Numeric())
| true | true |
f7f47fda0f76310bb8c936fc43fa52df2e6e3415 | 775 | py | Python | workflow/scripts/compress_mrsfast.py | mrvollger/fastCN-smk | a835dbd72b6ca411f9d6b7c060a6aa15ff690ee4 | [
"MIT"
] | 2 | 2021-09-12T01:34:38.000Z | 2022-03-24T22:07:14.000Z | workflow/scripts/compress_mrsfast.py | mrvollger/fastCN-smk | a835dbd72b6ca411f9d6b7c060a6aa15ff690ee4 | [
"MIT"
] | null | null | null | workflow/scripts/compress_mrsfast.py | mrvollger/fastCN-smk | a835dbd72b6ca411f9d6b7c060a6aa15ff690ee4 | [
"MIT"
] | 1 | 2021-11-17T21:50:57.000Z | 2021-11-17T21:50:57.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: William T. Harvey
import gzip
file = gzip.open(snakemake.input.sam, "rt")
count = 0
line_cols = []
with gzip.open(snakemake.output.comp, "wt") as outfile:
outfile.write("qname,flag,rname,pos,mapq\n")
while True:
line = file.readline().rstrip()
if not line:
break
if line[0] == "@":
continue
if line.split("\t")[1:] == line_cols:
count += 1
elif line_cols == []:
line_cols = line.split("\t")[1:]
count = 1
else:
out_string = ",".join([str(count)] + line_cols)
outfile.write(f"{out_string}\n")
line_cols = line.split("\t")[1:]
count = 1
file.close()
| 22.794118 | 59 | 0.513548 |
import gzip
file = gzip.open(snakemake.input.sam, "rt")
count = 0
line_cols = []
with gzip.open(snakemake.output.comp, "wt") as outfile:
outfile.write("qname,flag,rname,pos,mapq\n")
while True:
line = file.readline().rstrip()
if not line:
break
if line[0] == "@":
continue
if line.split("\t")[1:] == line_cols:
count += 1
elif line_cols == []:
line_cols = line.split("\t")[1:]
count = 1
else:
out_string = ",".join([str(count)] + line_cols)
outfile.write(f"{out_string}\n")
line_cols = line.split("\t")[1:]
count = 1
file.close()
| true | true |
f7f4802beab0324e70a6c8fef04694f90d3b7654 | 1,761 | py | Python | gocardless_pro/resources/bank_authorisation.py | gdvalderrama/gocardless-pro-python | 0ff8001f5bba11673c4fa0f30d26eca61a1219ba | [
"MIT"
] | null | null | null | gocardless_pro/resources/bank_authorisation.py | gdvalderrama/gocardless-pro-python | 0ff8001f5bba11673c4fa0f30d26eca61a1219ba | [
"MIT"
] | null | null | null | gocardless_pro/resources/bank_authorisation.py | gdvalderrama/gocardless-pro-python | 0ff8001f5bba11673c4fa0f30d26eca61a1219ba | [
"MIT"
] | null | null | null | # WARNING: Do not edit by hand, this file was generated by Crank:
#
# https://github.com/gocardless/crank
#
class BankAuthorisation(object):
"""A thin wrapper around a bank_authorisation, providing easy access to its
attributes.
Example:
bank_authorisation = client.bank_authorisations.get()
bank_authorisation.id
"""
def __init__(self, attributes, api_response):
self.attributes = attributes
self.api_response = api_response
@property
def authorisation_type(self):
return self.attributes.get('authorisation_type')
@property
def authorised_at(self):
return self.attributes.get('authorised_at')
@property
def created_at(self):
return self.attributes.get('created_at')
@property
def expires_at(self):
return self.attributes.get('expires_at')
@property
def id(self):
return self.attributes.get('id')
@property
def last_visited_at(self):
return self.attributes.get('last_visited_at')
@property
def links(self):
return self.Links(self.attributes.get('links'))
@property
def redirect_uri(self):
return self.attributes.get('redirect_uri')
@property
def url(self):
return self.attributes.get('url')
class Links(object):
"""Wrapper for the response's 'links' attribute."""
def __init__(self, attributes):
self.attributes = attributes
@property
def billing_request(self):
return self.attributes.get('billing_request')
@property
def institution(self):
return self.attributes.get('institution')
| 17.969388 | 79 | 0.621806 |
class BankAuthorisation(object):
def __init__(self, attributes, api_response):
self.attributes = attributes
self.api_response = api_response
@property
def authorisation_type(self):
return self.attributes.get('authorisation_type')
@property
def authorised_at(self):
return self.attributes.get('authorised_at')
@property
def created_at(self):
return self.attributes.get('created_at')
@property
def expires_at(self):
return self.attributes.get('expires_at')
@property
def id(self):
return self.attributes.get('id')
@property
def last_visited_at(self):
return self.attributes.get('last_visited_at')
@property
def links(self):
return self.Links(self.attributes.get('links'))
@property
def redirect_uri(self):
return self.attributes.get('redirect_uri')
@property
def url(self):
return self.attributes.get('url')
class Links(object):
def __init__(self, attributes):
self.attributes = attributes
@property
def billing_request(self):
return self.attributes.get('billing_request')
@property
def institution(self):
return self.attributes.get('institution')
| true | true |
f7f480e3deb206a2dcffde930d3268460f088215 | 32,079 | py | Python | kartothek/io/eager.py | steffen-schroeder-by/kartothek | 1821ea5df60d4079d3911b3c2f17be11d8780e22 | [
"MIT"
] | null | null | null | kartothek/io/eager.py | steffen-schroeder-by/kartothek | 1821ea5df60d4079d3911b3c2f17be11d8780e22 | [
"MIT"
] | null | null | null | kartothek/io/eager.py | steffen-schroeder-by/kartothek | 1821ea5df60d4079d3911b3c2f17be11d8780e22 | [
"MIT"
] | null | null | null | import warnings
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union, cast
import deprecation
import pandas as pd
from simplekv import KeyValueStore
from kartothek.core.common_metadata import (
empty_dataframe_from_schema,
make_meta,
store_schema_metadata,
)
from kartothek.core.dataset import DatasetMetadata, DatasetMetadataBuilder
from kartothek.core.docs import default_docs
from kartothek.core.factory import DatasetFactory, _ensure_factory
from kartothek.core.naming import (
DEFAULT_METADATA_STORAGE_FORMAT,
DEFAULT_METADATA_VERSION,
METADATA_BASE_SUFFIX,
METADATA_FORMAT_JSON,
PARQUET_FILE_SUFFIX,
get_partition_file_prefix,
)
from kartothek.core.typing import StoreInput
from kartothek.core.utils import lazy_store
from kartothek.io.iter import store_dataframes_as_dataset__iter
from kartothek.io_components.delete import (
delete_common_metadata,
delete_indices,
delete_top_level_metadata,
)
from kartothek.io_components.gc import delete_files, dispatch_files_to_gc
from kartothek.io_components.index import update_indices_from_partitions
from kartothek.io_components.metapartition import (
SINGLE_TABLE,
MetaPartition,
parse_input_to_metapartition,
)
from kartothek.io_components.read import dispatch_metapartitions_from_factory
from kartothek.io_components.update import update_dataset_from_partitions
from kartothek.io_components.utils import (
_ensure_compatible_indices,
align_categories,
normalize_args,
sort_values_categorical,
validate_partition_keys,
)
from kartothek.io_components.write import raise_if_dataset_exists
from kartothek.serialization import DataFrameSerializer
from kartothek.serialization._parquet import ParquetSerializer
from kartothek.utils.ktk_adapters import get_dataset_keys
from kartothek.utils.migration_helpers import (
DEPRECATION_WARNING_REMOVE_PARAMETER,
deprecate_parameters,
deprecate_parameters_if_set,
get_deprecation_warning_remove_dict_multi_table,
get_deprecation_warning_remove_parameter_multi_table,
get_generic_function_deprecation_waring,
get_parameter_default_value_deprecation_warning,
get_parameter_generic_replacement_deprecation_warning,
get_parameter_type_change_deprecation_warning,
)
from kartothek.utils.store import copy_rename_keys
__all__ = (
"delete_dataset",
"read_dataset_as_dataframes",
"read_table",
"commit_dataset",
"store_dataframes_as_dataset",
"create_empty_dataset_header",
"write_single_partition",
"update_dataset_from_dataframes",
"build_dataset_indices",
"garbage_collect_dataset",
"copy_dataset",
)
@default_docs
@normalize_args
def delete_dataset(dataset_uuid=None, store=None, factory=None):
"""
Delete the entire dataset from the store.
Parameters
----------
"""
ds_factory = _ensure_factory(
dataset_uuid=dataset_uuid,
load_schema=False,
store=store,
factory=factory,
load_dataset_metadata=False,
)
# Remove possibly unreferenced files
garbage_collect_dataset(factory=ds_factory)
# Delete indices first since they do not affect dataset integrity
delete_indices(dataset_factory=ds_factory)
for metapartition in dispatch_metapartitions_from_factory(ds_factory):
metapartition = cast(MetaPartition, metapartition)
metapartition.delete_from_store(dataset_uuid=dataset_uuid, store=store)
# delete common metadata after partitions
delete_common_metadata(dataset_factory=ds_factory)
# Delete the top level metadata file
delete_top_level_metadata(dataset_factory=ds_factory)
@default_docs
@deprecate_parameters(
get_parameter_default_value_deprecation_warning(
from_value="False", to_value="True", deprecated_in="5.3", changed_in="6.0"
),
"dates_as_object",
)
@deprecate_parameters_if_set(
get_deprecation_warning_remove_dict_multi_table(
deprecated_in="5.3", changed_in="6.0"
),
"categoricals",
)
@deprecate_parameters_if_set(
get_deprecation_warning_remove_parameter_multi_table(
deprecated_in="5.3", removed_in="6.0"
),
"tables",
"label_filter",
"concat_partitions_on_primary_index",
)
def read_dataset_as_dataframes(
dataset_uuid: Optional[str] = None,
store=None,
tables: Optional[List[str]] = None,
columns: Dict[str, List[str]] = None,
concat_partitions_on_primary_index: bool = False,
predicate_pushdown_to_io: bool = True,
categoricals: Dict[str, List[str]] = None,
label_filter: Callable = None,
dates_as_object: bool = False,
predicates: Optional[List[List[Tuple[str, str, Any]]]] = None,
factory: Optional[DatasetFactory] = None,
dispatch_by: Optional[List[str]] = None,
) -> List[pd.DataFrame]:
"""
Read a dataset as a list of dataframes.
Every element of the list corresponds to a physical partition.
Parameters
----------
Returns
-------
List[pandas.DataFrame]
Returns a list of pandas.DataFrame. One element per partition
Examples
--------
Dataset in store contains two partitions with two files each
.. code ::
>>> import storefact
>>> from kartothek.io.eager import read_dataset_as_dataframes
>>> store = storefact.get_store_from_url('s3://bucket_with_dataset')
>>> dfs = read_dataset_as_dataframes('dataset_uuid', store, 'core')
"""
ds_factory = _ensure_factory(
dataset_uuid=dataset_uuid,
store=store,
factory=factory,
load_dataset_metadata=True,
)
mps = read_dataset_as_metapartitions(
tables=tables,
columns=columns,
concat_partitions_on_primary_index=concat_partitions_on_primary_index,
predicate_pushdown_to_io=predicate_pushdown_to_io,
categoricals=categoricals,
label_filter=label_filter,
dates_as_object=dates_as_object,
predicates=predicates,
factory=ds_factory,
dispatch_by=dispatch_by,
dispatch_metadata=False,
)
return [mp.data for mp in mps]
@default_docs
@deprecate_parameters(
get_parameter_default_value_deprecation_warning(
from_value="False", to_value="True", deprecated_in="5.3", changed_in="6.0"
),
"dates_as_object",
)
@deprecate_parameters_if_set(
get_deprecation_warning_remove_parameter_multi_table(
deprecated_in="5.3", removed_in="6.0"
),
"tables",
"concat_partitions_on_primary_index",
"label_filter",
"dispatch_metadata",
)
def read_dataset_as_metapartitions(
dataset_uuid=None,
store=None,
tables=None,
columns=None,
concat_partitions_on_primary_index=False,
predicate_pushdown_to_io=True,
categoricals=None,
label_filter=None,
dates_as_object=False,
predicates=None,
factory=None,
dispatch_by=None,
dispatch_metadata=True,
):
"""
Read a dataset as a list of :class:`kartothek.io_components.metapartition.MetaPartition`.
Every element of the list corresponds to a physical partition.
Parameters
----------
Returns
-------
List[kartothek.io_components.metapartition.MetaPartition]
Returns a tuple of the loaded dataframe and the dataset metadata
Examples
--------
Dataset in store contains two partitions with two files each
.. code ::
>>> import storefact
>>> from kartothek.io.eager import read_dataset_as_dataframe
>>> store = storefact.get_store_from_url('s3://bucket_with_dataset')
>>> list_mps = read_dataset_as_metapartitions('dataset_uuid', store, 'core')
"""
ds_factory = _ensure_factory(
dataset_uuid=dataset_uuid,
store=store,
factory=factory,
load_dataset_metadata=False,
)
if len(ds_factory.tables) > 1:
warnings.warn(
"Trying to read a dataset with multiple internal tables. This functionality will be removed in the next "
"major release. If you require a multi tabled data format, we recommend to switch to the kartothek Cube "
"functionality. "
"https://kartothek.readthedocs.io/en/stable/guide/cube/kartothek_cubes.html",
DeprecationWarning,
)
from .iter import read_dataset_as_metapartitions__iterator
ds_iter = read_dataset_as_metapartitions__iterator(
tables=tables,
columns=columns,
concat_partitions_on_primary_index=concat_partitions_on_primary_index,
predicate_pushdown_to_io=predicate_pushdown_to_io,
categoricals=categoricals,
label_filter=label_filter,
dates_as_object=dates_as_object,
predicates=predicates,
factory=ds_factory,
dispatch_by=dispatch_by,
dispatch_metadata=dispatch_metadata,
)
return list(ds_iter)
@deprecation.deprecated(
deprecated_in="5.3",
removed_in="6.0",
details=get_generic_function_deprecation_waring(
function_name="_check_compatible_list"
),
)
def _check_compatible_list(table, obj, argument_name=""):
if obj is None:
return obj
elif isinstance(obj, dict):
if table not in obj:
raise ValueError(
"Provided table {} is not compatible with input from argument {}.".format(
table, argument_name
)
)
return obj
elif isinstance(obj, list):
return {table: obj}
else:
raise TypeError(
"Unknown type encountered for argument {}. Expected `list`, got `{}` instead".format(
argument_name, type(obj)
)
)
@default_docs
@deprecate_parameters(
get_parameter_default_value_deprecation_warning(
from_value="False", to_value="True", deprecated_in="5.3", changed_in="6.0"
),
"dates_as_object",
)
@deprecate_parameters_if_set(
get_deprecation_warning_remove_dict_multi_table(
deprecated_in="5.3", changed_in="6.0"
),
"categoricals",
)
@deprecate_parameters_if_set(
get_deprecation_warning_remove_parameter_multi_table(
deprecated_in="5.3", removed_in="6.0"
),
"table",
"concat_partitions_on_primary_index",
"label_filter",
)
def read_table(
dataset_uuid: Optional[str] = None,
store=None,
table: Optional[str] = SINGLE_TABLE,
columns: Dict[str, List[str]] = None,
concat_partitions_on_primary_index: bool = False,
predicate_pushdown_to_io: bool = True,
categoricals: Dict[str, List[str]] = None,
label_filter: Callable = None,
dates_as_object: bool = False,
predicates: Optional[List[List[Tuple[str, str, Any]]]] = None,
factory: Optional[DatasetFactory] = None,
) -> pd.DataFrame:
"""
A utility function to load a single table with multiple partitions as a single dataframe in one go.
Mostly useful for smaller tables or datasets where all partitions fit into memory.
The order of partitions is not guaranteed to be stable in the resulting dataframe.
Parameters
----------
Returns
-------
pandas.DataFrame
Returns a pandas.DataFrame holding the data of the requested columns
Examples
--------
Dataset in store contains two partitions with two files each
.. code ::
>>> import storefact
>>> from kartothek.io.eager import read_table
>>> store = storefact.get_store_from_url('s3://bucket_with_dataset')
>>> df = read_table(store, 'dataset_uuid', 'core')
"""
if not isinstance(table, str):
raise TypeError("Argument `table` needs to be a string")
columns = _check_compatible_list(table, columns, "columns")
categoricals = _check_compatible_list(table, categoricals, "categoricals")
ds_factory = _ensure_factory(
dataset_uuid=dataset_uuid,
store=store,
factory=factory,
load_dataset_metadata=False,
)
partitions = read_dataset_as_dataframes(
tables=[table],
columns=columns,
concat_partitions_on_primary_index=concat_partitions_on_primary_index,
predicate_pushdown_to_io=predicate_pushdown_to_io,
categoricals=categoricals,
label_filter=label_filter,
dates_as_object=dates_as_object,
predicates=predicates,
factory=ds_factory,
)
empty_df = empty_dataframe_from_schema(
schema=ds_factory.table_meta[table],
columns=columns[table] if columns is not None else None,
)
if categoricals:
empty_df = empty_df.astype({col: "category" for col in categoricals[table]})
dfs = [partition_data[table] for partition_data in partitions] + [empty_df]
# require meta 4 otherwise, can't construct types/columns
if categoricals:
dfs = align_categories(dfs, categoricals[table])
df = pd.concat(dfs, ignore_index=True, sort=False)
# ensure column order
if len(empty_df.columns) > 0 and list(empty_df.columns) != list(df.columns):
df = df.reindex(empty_df.columns, copy=False, axis=1)
return df
@default_docs
@normalize_args
@deprecate_parameters_if_set(
DEPRECATION_WARNING_REMOVE_PARAMETER, "output_dataset_uuid", "df_serializer",
)
def commit_dataset(
store: Optional[StoreInput] = None,
dataset_uuid: Optional[str] = None,
new_partitions: Optional[Iterable[MetaPartition]] = None,
output_dataset_uuid: Optional[str] = None,
delete_scope: Optional[Iterable[Dict[str, Any]]] = None,
metadata: Dict = None,
df_serializer: DataFrameSerializer = None,
metadata_merger: Callable[[List[Dict]], Dict] = None,
default_metadata_version: int = DEFAULT_METADATA_VERSION,
partition_on: Optional[Iterable[str]] = None,
factory: Optional[DatasetFactory] = None,
secondary_indices: Optional[Iterable[str]] = None,
):
"""
Commit new state to an existing dataset. This can be used for three distinct operations
1. Add previously written partitions to this dataset
If for some reasons, the existing pipelines are not sufficient but you need more control, you can write the files outside of a kartothek pipeline and commit them whenever you choose to.
This should be used in combination with
:func:`~kartothek.io.eager.write_single_partition` and :func:`~kartothek.io.eager.create_empty_dataset_header`.
.. code::
import pandas as pd
from kartothek.io.eager import write_single_partition, commit_dataset
store = "hfs://my_store"
# The partition writing can be done concurrently and distributed if wanted.
# Only the information about what partitions have been written is required for the commit.
new_partitions = [
write_single_partition(
store=store,
dataset_uuid='dataset_uuid',
data=pd.DataFrame({'column': [1, 2]}),
)
]
new_dataset = commit_dataset(
store=store,
dataset_uuid='dataset_uuid',
new_partitions=new_partitions,
)
2. Simple delete of partitions
If you want to remove some partitions this is one of the simples ways of doing so. By simply providing a delete_scope, this removes the references to these files in an atomic commit.
.. code::
commit_dataset(
store=store,
dataset_uuid='dataset_uuid',
delete_scope=[
{
"partition_column": "part_value_to_be_removed"
}
],
)
3. Add additional metadata
To add new metadata to an existing dataset
.. code::
commit_dataset(
store=store,
dataset_uuid='dataset_uuid',
metadata={"new": "user_metadata"},
)
Note::
If you do not want the new metadata to be merged with the existing one, povide a custom ``metadata_merger``
Parameters
----------
new_partitions:
Input partition to be committed.
"""
if not new_partitions and not metadata and not delete_scope:
raise ValueError(
"Need to provide either new data, new metadata or a delete scope. None of it was provided."
)
store = lazy_store(store)
ds_factory, metadata_version, partition_on = validate_partition_keys(
dataset_uuid=dataset_uuid,
store=store,
ds_factory=factory,
default_metadata_version=default_metadata_version,
partition_on=partition_on,
)
mps = parse_input_to_metapartition(
new_partitions, metadata_version=metadata_version
)
if secondary_indices:
mps = mps.build_indices(columns=secondary_indices)
mps_list = [_maybe_infer_files_attribute(mp, dataset_uuid) for mp in mps]
dmd = update_dataset_from_partitions(
mps_list,
store_factory=store,
dataset_uuid=dataset_uuid,
ds_factory=ds_factory,
delete_scope=delete_scope,
metadata=metadata,
metadata_merger=metadata_merger,
)
return dmd
def _maybe_infer_files_attribute(metapartition, dataset_uuid):
new_mp = metapartition.as_sentinel()
for mp in metapartition:
if len(mp.files) == 0:
if mp.data is None or len(mp.data) == 0:
raise ValueError(
"Trying to commit partitions without `data` or `files` information."
"Either one is necessary to infer the dataset tables"
)
new_files = {}
for table in mp.data:
new_files[table] = (
get_partition_file_prefix(
dataset_uuid=dataset_uuid,
partition_label=mp.label,
table=table,
metadata_version=mp.metadata_version,
)
+ PARQUET_FILE_SUFFIX # noqa: W503 line break before binary operator
)
mp = mp.copy(files=new_files)
new_mp = new_mp.add_metapartition(mp)
return new_mp
@default_docs
@normalize_args
@deprecate_parameters_if_set(
get_parameter_type_change_deprecation_warning(
from_type="Optional[ParquetSerializer]",
to_type="Optional[DataFrameSerializer]",
deprecated_in="5.3",
changed_in="6.0",
),
"df_serializer",
)
def store_dataframes_as_dataset(
store: KeyValueStore,
dataset_uuid: str,
dfs: List[Union[pd.DataFrame, Dict[str, pd.DataFrame]]],
metadata: Optional[Dict[str, Dict[str, Any]]] = None,
partition_on: Optional[List[str]] = None,
df_serializer: Optional[ParquetSerializer] = None,
overwrite: bool = False,
secondary_indices=None,
metadata_storage_format: str = DEFAULT_METADATA_STORAGE_FORMAT,
metadata_version: int = DEFAULT_METADATA_VERSION,
):
"""
Utility function to store a list of dataframes as a partitioned dataset with multiple tables (files).
Useful for very small datasets where all data fits into memory.
Parameters
----------
dfs:
The dataframe(s) to be stored.
"""
if isinstance(dfs, (pd.DataFrame, dict)):
dfs = [dfs]
warnings.warn(
"Passing a single dataframe instead of an iterable is deprecated and may "
"be removed in the next major release.",
DeprecationWarning,
)
return store_dataframes_as_dataset__iter(
dfs,
store=store,
dataset_uuid=dataset_uuid,
metadata=metadata,
partition_on=partition_on,
df_serializer=df_serializer,
overwrite=overwrite,
secondary_indices=secondary_indices,
metadata_storage_format=metadata_storage_format,
metadata_version=metadata_version,
)
@default_docs
@normalize_args
@deprecate_parameters_if_set(
get_parameter_generic_replacement_deprecation_warning(
replacing_parameter="schema", deprecated_in="5.3", changed_in="6.0"
),
"table_meta",
)
def create_empty_dataset_header(
store,
dataset_uuid,
table_meta,
partition_on=None,
metadata=None,
overwrite=False,
metadata_storage_format=DEFAULT_METADATA_STORAGE_FORMAT,
metadata_version=DEFAULT_METADATA_VERSION,
):
"""
Create an dataset header without any partitions. This may be used in combination
with :func:`~kartothek.io.eager.write_single_partition` to create implicitly partitioned datasets.
.. note::
The created dataset will **always** have explicit_partition==False
.. warning::
This function should only be used in very rare occasions. Usually you're better off using
full end-to-end pipelines.
Parameters
----------
"""
store = lazy_store(store)()
if not overwrite:
raise_if_dataset_exists(dataset_uuid=dataset_uuid, store=store)
for table, schema in table_meta.items():
table_meta[table] = make_meta(schema, origin=table, partition_keys=partition_on)
store_schema_metadata(
schema=table_meta[table],
dataset_uuid=dataset_uuid,
store=store,
table=table,
)
dataset_builder = DatasetMetadataBuilder(
uuid=dataset_uuid,
metadata_version=metadata_version,
partition_keys=partition_on,
explicit_partitions=False,
table_meta=table_meta,
)
if metadata:
for key, value in metadata.items():
dataset_builder.add_metadata(key, value)
if metadata_storage_format.lower() == "json":
store.put(*dataset_builder.to_json())
elif metadata_storage_format.lower() == "msgpack":
store.put(*dataset_builder.to_msgpack())
else:
raise ValueError(
"Unknown metadata storage format encountered: {}".format(
metadata_storage_format
)
)
return dataset_builder.to_dataset()
@default_docs
@normalize_args
@deprecate_parameters_if_set(
get_parameter_type_change_deprecation_warning(
from_type="Optional[ParquetSerializer]",
to_type="Optional[DataFrameSerializer]",
deprecated_in="5.3",
changed_in="6.0",
),
"df_serializer",
)
@deprecate_parameters_if_set(
DEPRECATION_WARNING_REMOVE_PARAMETER, "metadata", "overwrite", "metadata_merger",
)
def write_single_partition(
store: Optional[KeyValueStore] = None,
dataset_uuid: Optional[str] = None,
data=None,
metadata: Optional[Dict[str, Dict[str, Any]]] = None,
df_serializer: Optional[ParquetSerializer] = None,
overwrite: bool = False,
metadata_merger=None,
metadata_version: int = DEFAULT_METADATA_VERSION,
partition_on: Optional[List[str]] = None,
factory=None,
secondary_indices=None,
):
"""
Write the parquet file(s) for a single partition. This will **not** update the dataset header and can therefore
be used for highly concurrent dataset writes.
For datasets with explicit partitions, the dataset header can be updated by calling
:func:`kartothek.io.eager.commit_dataset` with the output of this function.
.. note::
It is highly recommended to use the full pipelines whenever possible. This functionality should be
used with caution and should only be necessary in cases where traditional pipeline scheduling is not an
option.
.. note::
This function requires an existing dataset metadata file and the schemas for the tables to be present.
Either you have ensured that the dataset always exists though some other means or use
:func:`create_empty_dataset_header` at the start of your computation to ensure the basic dataset
metadata is there.
Parameters
----------
data: Dict
The input is defined according to :func:`~kartothek.io_components.metapartition.parse_input_to_metapartition`
Returns
-------
An empty :class:`~kartothek.io_components.metapartition.MetaPartition` referencing the new files
"""
if data is None:
raise TypeError("The parameter `data` is not optional")
_, ds_metadata_version, partition_on = validate_partition_keys(
dataset_uuid=dataset_uuid,
store=lazy_store(store),
ds_factory=factory,
default_metadata_version=metadata_version,
partition_on=partition_on,
)
mp = parse_input_to_metapartition(obj=data, metadata_version=ds_metadata_version)
if partition_on:
mp = mp.partition_on(partition_on)
if secondary_indices:
mp = mp.build_indices(columns=secondary_indices)
mp = mp.validate_schema_compatible(dataset_uuid=dataset_uuid, store=store)
mp = mp.store_dataframes(
store=store, dataset_uuid=dataset_uuid, df_serializer=df_serializer
)
return mp
@default_docs
@normalize_args
@deprecate_parameters_if_set(
get_parameter_type_change_deprecation_warning(
from_type="Optional[ParquetSerializer]",
to_type="Optional[DataFrameSerializer]",
deprecated_in="5.3",
changed_in="6.0",
),
"df_serializer",
)
@deprecate_parameters_if_set(
DEPRECATION_WARNING_REMOVE_PARAMETER,
"central_partition_metadata",
"load_dynamic_metadata",
)
def update_dataset_from_dataframes(
df_list: List[Union[pd.DataFrame, Dict[str, pd.DataFrame]]],
store: Optional[KeyValueStore] = None,
dataset_uuid: Optional[str] = None,
delete_scope=None,
metadata=None,
df_serializer: Optional[ParquetSerializer] = None,
metadata_merger: Callable = None,
central_partition_metadata: bool = True,
default_metadata_version: int = DEFAULT_METADATA_VERSION,
partition_on: Optional[List[str]] = None,
load_dynamic_metadata: bool = True,
sort_partitions_by: Optional[str] = None,
secondary_indices: Optional[List[str]] = None,
factory: Optional[DatasetFactory] = None,
) -> DatasetMetadata:
"""
Update a kartothek dataset in store at once, using a list of dataframes.
Useful for datasets which do not fit into memory.
Parameters
----------
df_list:
The dataframe(s) to be stored.
Returns
-------
The dataset metadata object (:class:`~kartothek.core.dataset.DatasetMetadata`).
See Also
--------
:ref:`mutating_datasets`
"""
ds_factory, metadata_version, partition_on = validate_partition_keys(
dataset_uuid=dataset_uuid,
store=store,
ds_factory=factory,
default_metadata_version=default_metadata_version,
partition_on=partition_on,
)
inferred_indices = _ensure_compatible_indices(ds_factory, secondary_indices)
del secondary_indices
mp = parse_input_to_metapartition(
df_list,
metadata_version=metadata_version,
expected_secondary_indices=inferred_indices,
)
if sort_partitions_by:
mp = mp.apply(partial(sort_values_categorical, columns=sort_partitions_by))
if partition_on:
mp = mp.partition_on(partition_on)
if inferred_indices:
mp = mp.build_indices(inferred_indices)
mp = mp.store_dataframes(
store=store, dataset_uuid=dataset_uuid, df_serializer=df_serializer
)
return update_dataset_from_partitions(
mp,
store_factory=store,
dataset_uuid=dataset_uuid,
ds_factory=ds_factory,
delete_scope=delete_scope,
metadata=metadata,
metadata_merger=metadata_merger,
)
@default_docs
@normalize_args
def build_dataset_indices(store, dataset_uuid, columns, factory=None):
"""
Function which builds a :class:`~kartothek.core.index.ExplicitSecondaryIndex`.
This function loads the dataset, computes the requested indices and writes
the indices to the dataset. The dataset partitions itself are not mutated.
Parameters
----------
"""
ds_factory = _ensure_factory(
dataset_uuid=dataset_uuid,
store=store,
factory=factory,
load_dataset_metadata=False,
)
cols_to_load = {
table: set(columns) & set(meta.names)
for table, meta in ds_factory.table_meta.items()
}
cols_to_load = {table: cols for table, cols in cols_to_load.items() if cols}
new_partitions = []
for mp in dispatch_metapartitions_from_factory(ds_factory):
mp = mp.load_dataframes(
store=ds_factory.store,
tables=list(cols_to_load.keys()),
columns=cols_to_load,
)
mp = mp.build_indices(columns=columns)
mp = mp.remove_dataframes() # Remove dataframe from memory
new_partitions.append(mp)
return update_indices_from_partitions(
new_partitions, dataset_metadata_factory=ds_factory
)
@default_docs
@normalize_args
def garbage_collect_dataset(dataset_uuid=None, store=None, factory=None):
"""
Remove auxiliary files that are no longer tracked by the dataset.
These files include indices that are no longer referenced by the metadata
as well as files in the directories of the tables that are no longer
referenced. The latter is only applied to static datasets.
Parameters
----------
"""
ds_factory = _ensure_factory(
dataset_uuid=dataset_uuid,
store=store,
factory=factory,
load_dataset_metadata=False,
)
nested_files = dispatch_files_to_gc(
dataset_uuid=None, store_factory=None, chunk_size=None, factory=ds_factory
)
# Given that `nested_files` is a generator with a single element, just
# return the output of `delete_files` on that element.
return delete_files(next(nested_files), store_factory=ds_factory.store_factory)
def copy_dataset(
source_dataset_uuid: str,
store: KeyValueStore,
target_dataset_uuid: Optional[str] = None,
target_store: Optional[KeyValueStore] = None,
) -> Dict[str, DatasetMetadata]:
"""
Copies and optionally renames a dataset, either from one store to another or
within one store.
Parameters
----------
source_dataset_uuid: str
UUID of source dataset
store: simplekv.KeyValueStore
Source store
target_dataset_uuid: Optional[str]
UUID of target dataset. May be the same as src_dataset_uuid, if store
and tgt_store are different. If empty, src_dataset_uuid is used
target_store: Optional[simplekv.KeyValueStore]
Target Store. May be the same as store, if src_dataset_uuid and
target_dataset_uuid are different. If empty, value from parameter store is
used
"""
if target_dataset_uuid is None:
target_dataset_uuid = source_dataset_uuid
if target_store is None:
target_store = store
if (source_dataset_uuid == target_dataset_uuid) & (store == target_store):
raise ValueError(
"Cannot copy to a dataset with the same UUID within the same store!"
)
ds_factory_source = _ensure_factory(
dataset_uuid=source_dataset_uuid,
store=store,
factory=None,
load_dataset_metadata=True,
)
# Create a dict of {source key: target key} entries
keys = get_dataset_keys(ds_factory_source.dataset_metadata)
mapped_keys = {
source_key: source_key.replace(source_dataset_uuid, target_dataset_uuid)
for source_key in keys
}
# Create a dict of metadata which has to be changed. This is only the
# <uuid>.by-dataset-metadata.json file
md_transformed = {
f"{target_dataset_uuid}{METADATA_BASE_SUFFIX}{METADATA_FORMAT_JSON}": DatasetMetadataBuilder.from_dataset(
ds_factory_source.dataset_metadata
)
.modify_uuid(target_dataset_uuid)
.to_dataset()
}
# Copy the keys from one store to another
copy_rename_keys(mapped_keys, store, target_store, md_transformed)
return md_transformed
| 31.824405 | 193 | 0.687989 | import warnings
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union, cast
import deprecation
import pandas as pd
from simplekv import KeyValueStore
from kartothek.core.common_metadata import (
empty_dataframe_from_schema,
make_meta,
store_schema_metadata,
)
from kartothek.core.dataset import DatasetMetadata, DatasetMetadataBuilder
from kartothek.core.docs import default_docs
from kartothek.core.factory import DatasetFactory, _ensure_factory
from kartothek.core.naming import (
DEFAULT_METADATA_STORAGE_FORMAT,
DEFAULT_METADATA_VERSION,
METADATA_BASE_SUFFIX,
METADATA_FORMAT_JSON,
PARQUET_FILE_SUFFIX,
get_partition_file_prefix,
)
from kartothek.core.typing import StoreInput
from kartothek.core.utils import lazy_store
from kartothek.io.iter import store_dataframes_as_dataset__iter
from kartothek.io_components.delete import (
delete_common_metadata,
delete_indices,
delete_top_level_metadata,
)
from kartothek.io_components.gc import delete_files, dispatch_files_to_gc
from kartothek.io_components.index import update_indices_from_partitions
from kartothek.io_components.metapartition import (
SINGLE_TABLE,
MetaPartition,
parse_input_to_metapartition,
)
from kartothek.io_components.read import dispatch_metapartitions_from_factory
from kartothek.io_components.update import update_dataset_from_partitions
from kartothek.io_components.utils import (
_ensure_compatible_indices,
align_categories,
normalize_args,
sort_values_categorical,
validate_partition_keys,
)
from kartothek.io_components.write import raise_if_dataset_exists
from kartothek.serialization import DataFrameSerializer
from kartothek.serialization._parquet import ParquetSerializer
from kartothek.utils.ktk_adapters import get_dataset_keys
from kartothek.utils.migration_helpers import (
DEPRECATION_WARNING_REMOVE_PARAMETER,
deprecate_parameters,
deprecate_parameters_if_set,
get_deprecation_warning_remove_dict_multi_table,
get_deprecation_warning_remove_parameter_multi_table,
get_generic_function_deprecation_waring,
get_parameter_default_value_deprecation_warning,
get_parameter_generic_replacement_deprecation_warning,
get_parameter_type_change_deprecation_warning,
)
from kartothek.utils.store import copy_rename_keys
__all__ = (
"delete_dataset",
"read_dataset_as_dataframes",
"read_table",
"commit_dataset",
"store_dataframes_as_dataset",
"create_empty_dataset_header",
"write_single_partition",
"update_dataset_from_dataframes",
"build_dataset_indices",
"garbage_collect_dataset",
"copy_dataset",
)
@default_docs
@normalize_args
def delete_dataset(dataset_uuid=None, store=None, factory=None):
ds_factory = _ensure_factory(
dataset_uuid=dataset_uuid,
load_schema=False,
store=store,
factory=factory,
load_dataset_metadata=False,
)
garbage_collect_dataset(factory=ds_factory)
delete_indices(dataset_factory=ds_factory)
for metapartition in dispatch_metapartitions_from_factory(ds_factory):
metapartition = cast(MetaPartition, metapartition)
metapartition.delete_from_store(dataset_uuid=dataset_uuid, store=store)
delete_common_metadata(dataset_factory=ds_factory)
delete_top_level_metadata(dataset_factory=ds_factory)
@default_docs
@deprecate_parameters(
get_parameter_default_value_deprecation_warning(
from_value="False", to_value="True", deprecated_in="5.3", changed_in="6.0"
),
"dates_as_object",
)
@deprecate_parameters_if_set(
get_deprecation_warning_remove_dict_multi_table(
deprecated_in="5.3", changed_in="6.0"
),
"categoricals",
)
@deprecate_parameters_if_set(
get_deprecation_warning_remove_parameter_multi_table(
deprecated_in="5.3", removed_in="6.0"
),
"tables",
"label_filter",
"concat_partitions_on_primary_index",
)
def read_dataset_as_dataframes(
dataset_uuid: Optional[str] = None,
store=None,
tables: Optional[List[str]] = None,
columns: Dict[str, List[str]] = None,
concat_partitions_on_primary_index: bool = False,
predicate_pushdown_to_io: bool = True,
categoricals: Dict[str, List[str]] = None,
label_filter: Callable = None,
dates_as_object: bool = False,
predicates: Optional[List[List[Tuple[str, str, Any]]]] = None,
factory: Optional[DatasetFactory] = None,
dispatch_by: Optional[List[str]] = None,
) -> List[pd.DataFrame]:
ds_factory = _ensure_factory(
dataset_uuid=dataset_uuid,
store=store,
factory=factory,
load_dataset_metadata=True,
)
mps = read_dataset_as_metapartitions(
tables=tables,
columns=columns,
concat_partitions_on_primary_index=concat_partitions_on_primary_index,
predicate_pushdown_to_io=predicate_pushdown_to_io,
categoricals=categoricals,
label_filter=label_filter,
dates_as_object=dates_as_object,
predicates=predicates,
factory=ds_factory,
dispatch_by=dispatch_by,
dispatch_metadata=False,
)
return [mp.data for mp in mps]
@default_docs
@deprecate_parameters(
get_parameter_default_value_deprecation_warning(
from_value="False", to_value="True", deprecated_in="5.3", changed_in="6.0"
),
"dates_as_object",
)
@deprecate_parameters_if_set(
get_deprecation_warning_remove_parameter_multi_table(
deprecated_in="5.3", removed_in="6.0"
),
"tables",
"concat_partitions_on_primary_index",
"label_filter",
"dispatch_metadata",
)
def read_dataset_as_metapartitions(
dataset_uuid=None,
store=None,
tables=None,
columns=None,
concat_partitions_on_primary_index=False,
predicate_pushdown_to_io=True,
categoricals=None,
label_filter=None,
dates_as_object=False,
predicates=None,
factory=None,
dispatch_by=None,
dispatch_metadata=True,
):
ds_factory = _ensure_factory(
dataset_uuid=dataset_uuid,
store=store,
factory=factory,
load_dataset_metadata=False,
)
if len(ds_factory.tables) > 1:
warnings.warn(
"Trying to read a dataset with multiple internal tables. This functionality will be removed in the next "
"major release. If you require a multi tabled data format, we recommend to switch to the kartothek Cube "
"functionality. "
"https://kartothek.readthedocs.io/en/stable/guide/cube/kartothek_cubes.html",
DeprecationWarning,
)
from .iter import read_dataset_as_metapartitions__iterator
ds_iter = read_dataset_as_metapartitions__iterator(
tables=tables,
columns=columns,
concat_partitions_on_primary_index=concat_partitions_on_primary_index,
predicate_pushdown_to_io=predicate_pushdown_to_io,
categoricals=categoricals,
label_filter=label_filter,
dates_as_object=dates_as_object,
predicates=predicates,
factory=ds_factory,
dispatch_by=dispatch_by,
dispatch_metadata=dispatch_metadata,
)
return list(ds_iter)
@deprecation.deprecated(
deprecated_in="5.3",
removed_in="6.0",
details=get_generic_function_deprecation_waring(
function_name="_check_compatible_list"
),
)
def _check_compatible_list(table, obj, argument_name=""):
if obj is None:
return obj
elif isinstance(obj, dict):
if table not in obj:
raise ValueError(
"Provided table {} is not compatible with input from argument {}.".format(
table, argument_name
)
)
return obj
elif isinstance(obj, list):
return {table: obj}
else:
raise TypeError(
"Unknown type encountered for argument {}. Expected `list`, got `{}` instead".format(
argument_name, type(obj)
)
)
@default_docs
@deprecate_parameters(
get_parameter_default_value_deprecation_warning(
from_value="False", to_value="True", deprecated_in="5.3", changed_in="6.0"
),
"dates_as_object",
)
@deprecate_parameters_if_set(
get_deprecation_warning_remove_dict_multi_table(
deprecated_in="5.3", changed_in="6.0"
),
"categoricals",
)
@deprecate_parameters_if_set(
get_deprecation_warning_remove_parameter_multi_table(
deprecated_in="5.3", removed_in="6.0"
),
"table",
"concat_partitions_on_primary_index",
"label_filter",
)
def read_table(
dataset_uuid: Optional[str] = None,
store=None,
table: Optional[str] = SINGLE_TABLE,
columns: Dict[str, List[str]] = None,
concat_partitions_on_primary_index: bool = False,
predicate_pushdown_to_io: bool = True,
categoricals: Dict[str, List[str]] = None,
label_filter: Callable = None,
dates_as_object: bool = False,
predicates: Optional[List[List[Tuple[str, str, Any]]]] = None,
factory: Optional[DatasetFactory] = None,
) -> pd.DataFrame:
if not isinstance(table, str):
raise TypeError("Argument `table` needs to be a string")
columns = _check_compatible_list(table, columns, "columns")
categoricals = _check_compatible_list(table, categoricals, "categoricals")
ds_factory = _ensure_factory(
dataset_uuid=dataset_uuid,
store=store,
factory=factory,
load_dataset_metadata=False,
)
partitions = read_dataset_as_dataframes(
tables=[table],
columns=columns,
concat_partitions_on_primary_index=concat_partitions_on_primary_index,
predicate_pushdown_to_io=predicate_pushdown_to_io,
categoricals=categoricals,
label_filter=label_filter,
dates_as_object=dates_as_object,
predicates=predicates,
factory=ds_factory,
)
empty_df = empty_dataframe_from_schema(
schema=ds_factory.table_meta[table],
columns=columns[table] if columns is not None else None,
)
if categoricals:
empty_df = empty_df.astype({col: "category" for col in categoricals[table]})
dfs = [partition_data[table] for partition_data in partitions] + [empty_df]
if categoricals:
dfs = align_categories(dfs, categoricals[table])
df = pd.concat(dfs, ignore_index=True, sort=False)
# ensure column order
if len(empty_df.columns) > 0 and list(empty_df.columns) != list(df.columns):
df = df.reindex(empty_df.columns, copy=False, axis=1)
return df
@default_docs
@normalize_args
@deprecate_parameters_if_set(
DEPRECATION_WARNING_REMOVE_PARAMETER, "output_dataset_uuid", "df_serializer",
)
def commit_dataset(
store: Optional[StoreInput] = None,
dataset_uuid: Optional[str] = None,
new_partitions: Optional[Iterable[MetaPartition]] = None,
output_dataset_uuid: Optional[str] = None,
delete_scope: Optional[Iterable[Dict[str, Any]]] = None,
metadata: Dict = None,
df_serializer: DataFrameSerializer = None,
metadata_merger: Callable[[List[Dict]], Dict] = None,
default_metadata_version: int = DEFAULT_METADATA_VERSION,
partition_on: Optional[Iterable[str]] = None,
factory: Optional[DatasetFactory] = None,
secondary_indices: Optional[Iterable[str]] = None,
):
if not new_partitions and not metadata and not delete_scope:
raise ValueError(
"Need to provide either new data, new metadata or a delete scope. None of it was provided."
)
store = lazy_store(store)
ds_factory, metadata_version, partition_on = validate_partition_keys(
dataset_uuid=dataset_uuid,
store=store,
ds_factory=factory,
default_metadata_version=default_metadata_version,
partition_on=partition_on,
)
mps = parse_input_to_metapartition(
new_partitions, metadata_version=metadata_version
)
if secondary_indices:
mps = mps.build_indices(columns=secondary_indices)
mps_list = [_maybe_infer_files_attribute(mp, dataset_uuid) for mp in mps]
dmd = update_dataset_from_partitions(
mps_list,
store_factory=store,
dataset_uuid=dataset_uuid,
ds_factory=ds_factory,
delete_scope=delete_scope,
metadata=metadata,
metadata_merger=metadata_merger,
)
return dmd
def _maybe_infer_files_attribute(metapartition, dataset_uuid):
new_mp = metapartition.as_sentinel()
for mp in metapartition:
if len(mp.files) == 0:
if mp.data is None or len(mp.data) == 0:
raise ValueError(
"Trying to commit partitions without `data` or `files` information."
"Either one is necessary to infer the dataset tables"
)
new_files = {}
for table in mp.data:
new_files[table] = (
get_partition_file_prefix(
dataset_uuid=dataset_uuid,
partition_label=mp.label,
table=table,
metadata_version=mp.metadata_version,
)
+ PARQUET_FILE_SUFFIX # noqa: W503 line break before binary operator
)
mp = mp.copy(files=new_files)
new_mp = new_mp.add_metapartition(mp)
return new_mp
@default_docs
@normalize_args
@deprecate_parameters_if_set(
get_parameter_type_change_deprecation_warning(
from_type="Optional[ParquetSerializer]",
to_type="Optional[DataFrameSerializer]",
deprecated_in="5.3",
changed_in="6.0",
),
"df_serializer",
)
def store_dataframes_as_dataset(
store: KeyValueStore,
dataset_uuid: str,
dfs: List[Union[pd.DataFrame, Dict[str, pd.DataFrame]]],
metadata: Optional[Dict[str, Dict[str, Any]]] = None,
partition_on: Optional[List[str]] = None,
df_serializer: Optional[ParquetSerializer] = None,
overwrite: bool = False,
secondary_indices=None,
metadata_storage_format: str = DEFAULT_METADATA_STORAGE_FORMAT,
metadata_version: int = DEFAULT_METADATA_VERSION,
):
if isinstance(dfs, (pd.DataFrame, dict)):
dfs = [dfs]
warnings.warn(
"Passing a single dataframe instead of an iterable is deprecated and may "
"be removed in the next major release.",
DeprecationWarning,
)
return store_dataframes_as_dataset__iter(
dfs,
store=store,
dataset_uuid=dataset_uuid,
metadata=metadata,
partition_on=partition_on,
df_serializer=df_serializer,
overwrite=overwrite,
secondary_indices=secondary_indices,
metadata_storage_format=metadata_storage_format,
metadata_version=metadata_version,
)
@default_docs
@normalize_args
@deprecate_parameters_if_set(
get_parameter_generic_replacement_deprecation_warning(
replacing_parameter="schema", deprecated_in="5.3", changed_in="6.0"
),
"table_meta",
)
def create_empty_dataset_header(
store,
dataset_uuid,
table_meta,
partition_on=None,
metadata=None,
overwrite=False,
metadata_storage_format=DEFAULT_METADATA_STORAGE_FORMAT,
metadata_version=DEFAULT_METADATA_VERSION,
):
store = lazy_store(store)()
if not overwrite:
raise_if_dataset_exists(dataset_uuid=dataset_uuid, store=store)
for table, schema in table_meta.items():
table_meta[table] = make_meta(schema, origin=table, partition_keys=partition_on)
store_schema_metadata(
schema=table_meta[table],
dataset_uuid=dataset_uuid,
store=store,
table=table,
)
dataset_builder = DatasetMetadataBuilder(
uuid=dataset_uuid,
metadata_version=metadata_version,
partition_keys=partition_on,
explicit_partitions=False,
table_meta=table_meta,
)
if metadata:
for key, value in metadata.items():
dataset_builder.add_metadata(key, value)
if metadata_storage_format.lower() == "json":
store.put(*dataset_builder.to_json())
elif metadata_storage_format.lower() == "msgpack":
store.put(*dataset_builder.to_msgpack())
else:
raise ValueError(
"Unknown metadata storage format encountered: {}".format(
metadata_storage_format
)
)
return dataset_builder.to_dataset()
@default_docs
@normalize_args
@deprecate_parameters_if_set(
get_parameter_type_change_deprecation_warning(
from_type="Optional[ParquetSerializer]",
to_type="Optional[DataFrameSerializer]",
deprecated_in="5.3",
changed_in="6.0",
),
"df_serializer",
)
@deprecate_parameters_if_set(
DEPRECATION_WARNING_REMOVE_PARAMETER, "metadata", "overwrite", "metadata_merger",
)
def write_single_partition(
store: Optional[KeyValueStore] = None,
dataset_uuid: Optional[str] = None,
data=None,
metadata: Optional[Dict[str, Dict[str, Any]]] = None,
df_serializer: Optional[ParquetSerializer] = None,
overwrite: bool = False,
metadata_merger=None,
metadata_version: int = DEFAULT_METADATA_VERSION,
partition_on: Optional[List[str]] = None,
factory=None,
secondary_indices=None,
):
if data is None:
raise TypeError("The parameter `data` is not optional")
_, ds_metadata_version, partition_on = validate_partition_keys(
dataset_uuid=dataset_uuid,
store=lazy_store(store),
ds_factory=factory,
default_metadata_version=metadata_version,
partition_on=partition_on,
)
mp = parse_input_to_metapartition(obj=data, metadata_version=ds_metadata_version)
if partition_on:
mp = mp.partition_on(partition_on)
if secondary_indices:
mp = mp.build_indices(columns=secondary_indices)
mp = mp.validate_schema_compatible(dataset_uuid=dataset_uuid, store=store)
mp = mp.store_dataframes(
store=store, dataset_uuid=dataset_uuid, df_serializer=df_serializer
)
return mp
@default_docs
@normalize_args
@deprecate_parameters_if_set(
get_parameter_type_change_deprecation_warning(
from_type="Optional[ParquetSerializer]",
to_type="Optional[DataFrameSerializer]",
deprecated_in="5.3",
changed_in="6.0",
),
"df_serializer",
)
@deprecate_parameters_if_set(
DEPRECATION_WARNING_REMOVE_PARAMETER,
"central_partition_metadata",
"load_dynamic_metadata",
)
def update_dataset_from_dataframes(
df_list: List[Union[pd.DataFrame, Dict[str, pd.DataFrame]]],
store: Optional[KeyValueStore] = None,
dataset_uuid: Optional[str] = None,
delete_scope=None,
metadata=None,
df_serializer: Optional[ParquetSerializer] = None,
metadata_merger: Callable = None,
central_partition_metadata: bool = True,
default_metadata_version: int = DEFAULT_METADATA_VERSION,
partition_on: Optional[List[str]] = None,
load_dynamic_metadata: bool = True,
sort_partitions_by: Optional[str] = None,
secondary_indices: Optional[List[str]] = None,
factory: Optional[DatasetFactory] = None,
) -> DatasetMetadata:
ds_factory, metadata_version, partition_on = validate_partition_keys(
dataset_uuid=dataset_uuid,
store=store,
ds_factory=factory,
default_metadata_version=default_metadata_version,
partition_on=partition_on,
)
inferred_indices = _ensure_compatible_indices(ds_factory, secondary_indices)
del secondary_indices
mp = parse_input_to_metapartition(
df_list,
metadata_version=metadata_version,
expected_secondary_indices=inferred_indices,
)
if sort_partitions_by:
mp = mp.apply(partial(sort_values_categorical, columns=sort_partitions_by))
if partition_on:
mp = mp.partition_on(partition_on)
if inferred_indices:
mp = mp.build_indices(inferred_indices)
mp = mp.store_dataframes(
store=store, dataset_uuid=dataset_uuid, df_serializer=df_serializer
)
return update_dataset_from_partitions(
mp,
store_factory=store,
dataset_uuid=dataset_uuid,
ds_factory=ds_factory,
delete_scope=delete_scope,
metadata=metadata,
metadata_merger=metadata_merger,
)
@default_docs
@normalize_args
def build_dataset_indices(store, dataset_uuid, columns, factory=None):
ds_factory = _ensure_factory(
dataset_uuid=dataset_uuid,
store=store,
factory=factory,
load_dataset_metadata=False,
)
cols_to_load = {
table: set(columns) & set(meta.names)
for table, meta in ds_factory.table_meta.items()
}
cols_to_load = {table: cols for table, cols in cols_to_load.items() if cols}
new_partitions = []
for mp in dispatch_metapartitions_from_factory(ds_factory):
mp = mp.load_dataframes(
store=ds_factory.store,
tables=list(cols_to_load.keys()),
columns=cols_to_load,
)
mp = mp.build_indices(columns=columns)
mp = mp.remove_dataframes() # Remove dataframe from memory
new_partitions.append(mp)
return update_indices_from_partitions(
new_partitions, dataset_metadata_factory=ds_factory
)
@default_docs
@normalize_args
def garbage_collect_dataset(dataset_uuid=None, store=None, factory=None):
ds_factory = _ensure_factory(
dataset_uuid=dataset_uuid,
store=store,
factory=factory,
load_dataset_metadata=False,
)
nested_files = dispatch_files_to_gc(
dataset_uuid=None, store_factory=None, chunk_size=None, factory=ds_factory
)
# Given that `nested_files` is a generator with a single element, just
# return the output of `delete_files` on that element.
return delete_files(next(nested_files), store_factory=ds_factory.store_factory)
def copy_dataset(
source_dataset_uuid: str,
store: KeyValueStore,
target_dataset_uuid: Optional[str] = None,
target_store: Optional[KeyValueStore] = None,
) -> Dict[str, DatasetMetadata]:
if target_dataset_uuid is None:
target_dataset_uuid = source_dataset_uuid
if target_store is None:
target_store = store
if (source_dataset_uuid == target_dataset_uuid) & (store == target_store):
raise ValueError(
"Cannot copy to a dataset with the same UUID within the same store!"
)
ds_factory_source = _ensure_factory(
dataset_uuid=source_dataset_uuid,
store=store,
factory=None,
load_dataset_metadata=True,
)
# Create a dict of {source key: target key} entries
keys = get_dataset_keys(ds_factory_source.dataset_metadata)
mapped_keys = {
source_key: source_key.replace(source_dataset_uuid, target_dataset_uuid)
for source_key in keys
}
# Create a dict of metadata which has to be changed. This is only the
# <uuid>.by-dataset-metadata.json file
md_transformed = {
f"{target_dataset_uuid}{METADATA_BASE_SUFFIX}{METADATA_FORMAT_JSON}": DatasetMetadataBuilder.from_dataset(
ds_factory_source.dataset_metadata
)
.modify_uuid(target_dataset_uuid)
.to_dataset()
}
# Copy the keys from one store to another
copy_rename_keys(mapped_keys, store, target_store, md_transformed)
return md_transformed
| true | true |
f7f48105fe1cdb06aa9f6fc1ec60545a296ae843 | 82,949 | py | Python | meraki/merakiapi.py | storybook808/Meraki-Bulk-Configuration-Tool | 65e240b89f16e02fd767f1e1a1742728c0f2cc0a | [
"MIT"
] | 1 | 2018-06-28T17:00:07.000Z | 2018-06-28T17:00:07.000Z | meraki/merakiapi.py | storybook808/Meraki-Bulk-Configuration-Tool | 65e240b89f16e02fd767f1e1a1742728c0f2cc0a | [
"MIT"
] | null | null | null | meraki/merakiapi.py | storybook808/Meraki-Bulk-Configuration-Tool | 65e240b89f16e02fd767f1e1a1742728c0f2cc0a | [
"MIT"
] | null | null | null | #######################################################################################################################
#
# Cisco Meraki Provisioning API Python 3.x Module
#
# Overview
# The purpose of this Python module is to provide a standard Python module to interact with the Meraki Provisioning API.
# Each method in this function interacts seamlessly with the API and either returns data from the method call or a
# status message indicating the result of the API call
#
# Dependencies
# - Python 3.x
# - 'requests' module
#
#######################################################################################################################
from __future__ import print_function
import requests
import json
from ipaddress import ip_address
import re
import warnings
tzlist = ['Africa/Abidjan',
'Africa/Accra',
'Africa/Addis_Ababa',
'Africa/Algiers',
'Africa/Asmara',
'Africa/Asmera',
'Africa/Bamako',
'Africa/Bangui',
'Africa/Banjul',
'Africa/Bissau',
'Africa/Blantyre',
'Africa/Brazzaville',
'Africa/Bujumbura',
'Africa/Cairo',
'Africa/Casablanca',
'Africa/Ceuta',
'Africa/Conakry',
'Africa/Dakar',
'Africa/Dar_es_Salaam',
'Africa/Djibouti',
'Africa/Douala',
'Africa/El_Aaiun',
'Africa/Freetown',
'Africa/Gaborone',
'Africa/Harare',
'Africa/Johannesburg',
'Africa/Juba',
'Africa/Kampala',
'Africa/Khartoum',
'Africa/Kigali',
'Africa/Kinshasa',
'Africa/Lagos',
'Africa/Libreville',
'Africa/Lome',
'Africa/Luanda',
'Africa/Lubumbashi',
'Africa/Lusaka',
'Africa/Malabo',
'Africa/Maputo',
'Africa/Maseru',
'Africa/Mbabane',
'Africa/Mogadishu',
'Africa/Monrovia',
'Africa/Nairobi',
'Africa/Ndjamena',
'Africa/Niamey',
'Africa/Nouakchott',
'Africa/Ouagadougou',
'Africa/Porto-Novo',
'Africa/Sao_Tome',
'Africa/Timbuktu',
'Africa/Tripoli',
'Africa/Tunis',
'Africa/Windhoek',
'America/Adak',
'America/Anchorage',
'America/Anguilla',
'America/Antigua',
'America/Araguaina',
'America/Argentina/Buenos_Aires',
'America/Argentina/Catamarca',
'America/Argentina/ComodRivadavia',
'America/Argentina/Cordoba',
'America/Argentina/Jujuy',
'America/Argentina/La_Rioja',
'America/Argentina/Mendoza',
'America/Argentina/Rio_Gallegos',
'America/Argentina/Salta',
'America/Argentina/San_Juan',
'America/Argentina/San_Luis',
'America/Argentina/Tucuman',
'America/Argentina/Ushuaia',
'America/Aruba',
'America/Asuncion',
'America/Atikokan',
'America/Atka',
'America/Bahia',
'America/Bahia_Banderas',
'America/Barbados',
'America/Belem',
'America/Belize',
'America/Blanc-Sablon',
'America/Boa_Vista',
'America/Bogota',
'America/Boise',
'America/Buenos_Aires',
'America/Cambridge_Bay',
'America/Campo_Grande',
'America/Cancun',
'America/Caracas',
'America/Catamarca',
'America/Cayenne',
'America/Cayman',
'America/Chicago',
'America/Chihuahua',
'America/Coral_Harbour',
'America/Cordoba',
'America/Costa_Rica',
'America/Creston',
'America/Cuiaba',
'America/Curacao',
'America/Danmarkshavn',
'America/Dawson',
'America/Dawson_Creek',
'America/Denver',
'America/Detroit',
'America/Dominica',
'America/Edmonton',
'America/Eirunepe',
'America/El_Salvador',
'America/Ensenada',
'America/Fort_Nelson',
'America/Fort_Wayne',
'America/Fortaleza',
'America/Glace_Bay',
'America/Godthab',
'America/Goose_Bay',
'America/Grand_Turk',
'America/Grenada',
'America/Guadeloupe',
'America/Guatemala',
'America/Guayaquil',
'America/Guyana',
'America/Halifax',
'America/Havana',
'America/Hermosillo',
'America/Indiana/Indianapolis',
'America/Indiana/Knox',
'America/Indiana/Marengo',
'America/Indiana/Petersburg',
'America/Indiana/Tell_City',
'America/Indiana/Vevay',
'America/Indiana/Vincennes',
'America/Indiana/Winamac',
'America/Indianapolis',
'America/Inuvik',
'America/Iqaluit',
'America/Jamaica',
'America/Jujuy',
'America/Juneau',
'America/Kentucky/Louisville',
'America/Kentucky/Monticello',
'America/Knox_IN',
'America/Kralendijk',
'America/La_Paz',
'America/Lima',
'America/Los_Angeles',
'America/Louisville',
'America/Lower_Princes',
'America/Maceio',
'America/Managua',
'America/Manaus',
'America/Marigot',
'America/Martinique',
'America/Matamoros',
'America/Mazatlan',
'America/Mendoza',
'America/Menominee',
'America/Merida',
'America/Metlakatla',
'America/Mexico_City',
'America/Miquelon',
'America/Moncton',
'America/Monterrey',
'America/Montevideo',
'America/Montreal',
'America/Montserrat',
'America/Nassau',
'America/New_York',
'America/Nipigon',
'America/Nome',
'America/Noronha',
'America/North_Dakota/Beulah',
'America/North_Dakota/Center',
'America/North_Dakota/New_Salem',
'America/Ojinaga',
'America/Panama',
'America/Pangnirtung',
'America/Paramaribo',
'America/Phoenix',
'America/Port_of_Spain',
'America/Port-au-Prince',
'America/Porto_Acre',
'America/Porto_Velho',
'America/Puerto_Rico',
'America/Rainy_River',
'America/Rankin_Inlet',
'America/Recife',
'America/Regina',
'America/Resolute',
'America/Rio_Branco',
'America/Rosario',
'America/Santa_Isabel',
'America/Santarem',
'America/Santiago',
'America/Santo_Domingo',
'America/Sao_Paulo',
'America/Scoresbysund',
'America/Shiprock',
'America/Sitka',
'America/St_Barthelemy',
'America/St_Johns',
'America/St_Kitts',
'America/St_Lucia',
'America/St_Thomas',
'America/St_Vincent',
'America/Swift_Current',
'America/Tegucigalpa',
'America/Thule',
'America/Thunder_Bay',
'America/Tijuana',
'America/Toronto',
'America/Tortola',
'America/Vancouver',
'America/Virgin',
'America/Whitehorse',
'America/Winnipeg',
'America/Yakutat',
'America/Yellowknife',
'Antarctica/Casey',
'Antarctica/Davis',
'Antarctica/DumontDUrville',
'Antarctica/Macquarie',
'Antarctica/Mawson',
'Antarctica/McMurdo',
'Antarctica/Palmer',
'Antarctica/Rothera',
'Antarctica/South_Pole',
'Antarctica/Syowa',
'Antarctica/Troll',
'Antarctica/Vostok',
'Arctic/Longyearbyen',
'Asia/Aden',
'Asia/Almaty',
'Asia/Amman',
'Asia/Anadyr',
'Asia/Aqtau',
'Asia/Aqtobe',
'Asia/Ashgabat',
'Asia/Ashkhabad',
'Asia/Baghdad',
'Asia/Bahrain',
'Asia/Baku',
'Asia/Bangkok',
'Asia/Barnaul',
'Asia/Beirut',
'Asia/Bishkek',
'Asia/Brunei',
'Asia/Calcutta',
'Asia/Chita',
'Asia/Choibalsan',
'Asia/Chongqing',
'Asia/Chungking',
'Asia/Colombo',
'Asia/Dacca',
'Asia/Damascus',
'Asia/Dhaka',
'Asia/Dili',
'Asia/Dubai',
'Asia/Dushanbe',
'Asia/Gaza',
'Asia/Harbin',
'Asia/Hebron',
'Asia/Ho_Chi_Minh',
'Asia/Hong_Kong',
'Asia/Hovd',
'Asia/Irkutsk',
'Asia/Istanbul',
'Asia/Jakarta',
'Asia/Jayapura',
'Asia/Jerusalem',
'Asia/Kabul',
'Asia/Kamchatka',
'Asia/Karachi',
'Asia/Kashgar',
'Asia/Kathmandu',
'Asia/Katmandu',
'Asia/Khandyga',
'Asia/Kolkata',
'Asia/Krasnoyarsk',
'Asia/Kuala_Lumpur',
'Asia/Kuching',
'Asia/Kuwait',
'Asia/Macao',
'Asia/Macau',
'Asia/Magadan',
'Asia/Makassar',
'Asia/Manila',
'Asia/Muscat',
'Asia/Nicosia',
'Asia/Novokuznetsk',
'Asia/Novosibirsk',
'Asia/Omsk',
'Asia/Oral',
'Asia/Phnom_Penh',
'Asia/Pontianak',
'Asia/Pyongyang',
'Asia/Qatar',
'Asia/Qyzylorda',
'Asia/Rangoon',
'Asia/Riyadh',
'Asia/Saigon',
'Asia/Sakhalin',
'Asia/Samarkand',
'Asia/Seoul',
'Asia/Shanghai',
'Asia/Singapore',
'Asia/Srednekolymsk',
'Asia/Taipei',
'Asia/Tashkent',
'Asia/Tbilisi',
'Asia/Tehran',
'Asia/Tel_Aviv',
'Asia/Thimbu',
'Asia/Thimphu',
'Asia/Tokyo',
'Asia/Tomsk',
'Asia/Ujung_Pandang',
'Asia/Ulaanbaatar',
'Asia/Ulan_Bator',
'Asia/Urumqi',
'Asia/Ust-Nera',
'Asia/Vientiane',
'Asia/Vladivostok',
'Asia/Yakutsk',
'Asia/Yekaterinburg',
'Asia/Yerevan',
'Atlantic/Azores',
'Atlantic/Bermuda',
'Atlantic/Canary',
'Atlantic/Cape_Verde',
'Atlantic/Faeroe',
'Atlantic/Faroe',
'Atlantic/Jan_Mayen',
'Atlantic/Madeira',
'Atlantic/Reykjavik',
'Atlantic/South_Georgia',
'Atlantic/St_Helena',
'Atlantic/Stanley',
'Australia/ACT',
'Australia/Adelaide',
'Australia/Brisbane',
'Australia/Broken_Hill',
'Australia/Canberra',
'Australia/Currie',
'Australia/Darwin',
'Australia/Eucla',
'Australia/Hobart',
'Australia/LHI',
'Australia/Lindeman',
'Australia/Lord_Howe',
'Australia/Melbourne',
'Australia/North',
'Australia/NSW',
'Australia/Perth',
'Australia/Queensland',
'Australia/South',
'Australia/Sydney',
'Australia/Tasmania',
'Australia/Victoria',
'Australia/West',
'Australia/Yancowinna',
'Brazil/Acre',
'Brazil/DeNoronha',
'Brazil/East',
'Brazil/West',
'Canada/Atlantic',
'Canada/Central',
'Canada/Eastern',
'Canada/East-Saskatchewan',
'Canada/Mountain',
'Canada/Newfoundland',
'Canada/Pacific',
'Canada/Saskatchewan',
'Canada/Yukon',
'CET',
'Chile/Continental',
'Chile/EasterIsland',
'CST6CDT',
'Cuba',
'EET',
'Egypt',
'Eire',
'EST',
'EST5EDT',
'Etc/GMT',
'Etc/GMT+0',
'Etc/GMT+1',
'Etc/GMT+10',
'Etc/GMT+11',
'Etc/GMT+12',
'Etc/GMT+2',
'Etc/GMT+3',
'Etc/GMT+4',
'Etc/GMT+5',
'Etc/GMT+6',
'Etc/GMT+7',
'Etc/GMT+8',
'Etc/GMT+9',
'Etc/GMT0',
'Etc/GMT-0',
'Etc/GMT-1',
'Etc/GMT-10',
'Etc/GMT-11',
'Etc/GMT-12',
'Etc/GMT-13',
'Etc/GMT-14',
'Etc/GMT-2',
'Etc/GMT-3',
'Etc/GMT-4',
'Etc/GMT-5',
'Etc/GMT-6',
'Etc/GMT-7',
'Etc/GMT-8',
'Etc/GMT-9',
'Etc/Greenwich',
'Etc/UCT',
'Etc/Universal',
'Etc/UTC',
'Etc/Zulu',
'Europe/Amsterdam',
'Europe/Andorra',
'Europe/Astrakhan',
'Europe/Athens',
'Europe/Belfast',
'Europe/Belgrade',
'Europe/Berlin',
'Europe/Bratislava',
'Europe/Brussels',
'Europe/Bucharest',
'Europe/Budapest',
'Europe/Busingen',
'Europe/Chisinau',
'Europe/Copenhagen',
'Europe/Dublin',
'Europe/Gibraltar',
'Europe/Guernsey',
'Europe/Helsinki',
'Europe/Isle_of_Man',
'Europe/Istanbul',
'Europe/Jersey',
'Europe/Kaliningrad',
'Europe/Kiev',
'Europe/Kirov',
'Europe/Lisbon',
'Europe/Ljubljana',
'Europe/London',
'Europe/Luxembourg',
'Europe/Madrid',
'Europe/Malta',
'Europe/Mariehamn',
'Europe/Minsk',
'Europe/Monaco',
'Europe/Moscow',
'Europe/Nicosia',
'Europe/Oslo',
'Europe/Paris',
'Europe/Podgorica',
'Europe/Prague',
'Europe/Riga',
'Europe/Rome',
'Europe/Samara',
'Europe/San_Marino',
'Europe/Sarajevo',
'Europe/Simferopol',
'Europe/Skopje',
'Europe/Sofia',
'Europe/Stockholm',
'Europe/Tallinn',
'Europe/Tirane',
'Europe/Tiraspol',
'Europe/Ulyanovsk',
'Europe/Uzhgorod',
'Europe/Vaduz',
'Europe/Vatican',
'Europe/Vienna',
'Europe/Vilnius',
'Europe/Volgograd',
'Europe/Warsaw',
'Europe/Zagreb',
'Europe/Zaporozhye',
'Europe/Zurich',
'GB',
'GB-Eire',
'GMT',
'GMT+0',
'GMT0',
'GMT-0',
'Greenwich',
'Hongkong',
'HST',
'Iceland',
'Indian/Antananarivo',
'Indian/Chagos',
'Indian/Christmas',
'Indian/Cocos',
'Indian/Comoro',
'Indian/Kerguelen',
'Indian/Mahe',
'Indian/Maldives',
'Indian/Mauritius',
'Indian/Mayotte',
'Indian/Reunion',
'Iran',
'Israel',
'Jamaica',
'Japan',
'Kwajalein',
'Libya',
'MET',
'Mexico/BajaNorte',
'Mexico/BajaSur',
'Mexico/General',
'MST',
'MST7MDT',
'Navajo',
'NZ',
'NZ-CHAT',
'Pacific/Apia',
'Pacific/Auckland',
'Pacific/Bougainville',
'Pacific/Chatham',
'Pacific/Chuuk',
'Pacific/Easter',
'Pacific/Efate',
'Pacific/Enderbury',
'Pacific/Fakaofo',
'Pacific/Fiji',
'Pacific/Funafuti',
'Pacific/Galapagos',
'Pacific/Gambier',
'Pacific/Guadalcanal',
'Pacific/Guam',
'Pacific/Honolulu',
'Pacific/Johnston',
'Pacific/Kiritimati',
'Pacific/Kosrae',
'Pacific/Kwajalein',
'Pacific/Majuro',
'Pacific/Marquesas',
'Pacific/Midway',
'Pacific/Nauru',
'Pacific/Niue',
'Pacific/Norfolk',
'Pacific/Noumea',
'Pacific/Pago_Pago',
'Pacific/Palau',
'Pacific/Pitcairn',
'Pacific/Pohnpei',
'Pacific/Ponape',
'Pacific/Port_Moresby',
'Pacific/Rarotonga',
'Pacific/Saipan',
'Pacific/Samoa',
'Pacific/Tahiti',
'Pacific/Tarawa',
'Pacific/Tongatapu',
'Pacific/Truk',
'Pacific/Wake',
'Pacific/Wallis',
'Pacific/Yap',
'Poland',
'Portugal',
'PRC',
'PST8PDT',
'ROC',
'ROK',
'Singapore',
'Turkey',
'UCT',
'Universal',
'US/Alaska',
'US/Aleutian',
'US/Arizona',
'US/Central',
'US/Eastern',
'US/East-Indiana',
'US/Hawaii',
'US/Indiana-Starke',
'US/Michigan',
'US/Mountain',
'US/Pacific',
'US/Pacific-New',
'US/Samoa',
'UTC',
'WET',
'W-SU',
'Zulu'
]
base_url = 'https://dashboard.meraki.com/api/v0'
class Error(Exception):
#
# Base module exception.
#
pass
class ListLengthWarn(Warning):
#
# Thrown when zip list lengths mismatch
#
pass
class IgnoredArgument(Warning):
#
# Thrown when argument will be ignored
#
pass
class OrgPermissionError(Error):
#
# Thrown when supplied API Key does not have access to supplied Organization ID
#
def __init__(self):
self.default = 'Invalid Organization ID - Current API Key does not have access to this Organization'
def __str__(self):
return repr(self.default)
class EmailFormatError(Error):
#
# #Thrown when incorrect email format has been entered
#
def __init__(self):
self.default = 'Incorrect E-mail Address Format Entered - Must be in the format name@domain.dom'
def __str__(self):
return repr(self.default)
class ListError(Error):
#
# Raised when empty list is passed when required
#
def __init__(self, message):
self.message = message
class DashboardObject(object):
#
# Base Dashboard object
#
pass
class SSID(DashboardObject):
# SSID Object Class
# Refer to https://dashboard.meraki.com/manage/support/api_docs#ssids for details on accepted parameters
#
# Provides a simplified object for downloading and manipulating SSID Attributes from Dashboard
validparams = ['name', 'enabled', 'authMode', 'encryptionMode', 'psk', 'radiusServers', 'radiusAccountingEnabled',
'radiusAccountingServers', 'ipAssignmentMode', 'useVlanTagging', 'concentratorNetworkId', 'vlanID',
'defaultVlanId', 'apTagsAndVlanIds', 'walledGardenEnabled', 'walledGardenRanges', 'splashPage',
'perClientBandwidthLimitUp', 'perClientBandwidthLimitDown']
type = 'ssid'
def __init__(self, ssidnum, **params):
self.__setattr__('ssidnum', ssidnum)
for p in params.keys():
if p in self.validparams:
self.__setattr__(p, params[p])
else:
raise ValueError('Invalid parameter {0}, please refer to https://dashboard.meraki.com/manage/support/'
'api_docs#ssids for valid parameters'.format(str(p)))
def __isjson(myjson):
#
# Validates if passed object is valid JSON, used to prevent json.loads exceptions
#
try:
json_object = json.loads(myjson)
except ValueError:
return False
return True
def __isvalidtz(tz):
#
# Validates if TZ exists in accepted TZ list
#
validtz = False
for zone in tzlist:
if validtz is False and format(str(tz)) == zone:
validtz = True
break
else:
validtz = False
if validtz is False:
raise ValueError(
'Please enter a valid tz value from https://en.wikipedia.org/wiki/List_of_tz_database_time_zones')
return None
def __comparelist(*args):
#
# Compare length of multiple list arguments passed to function and exception if any are none and warn if any are
# different in length the first list passed
#
length = len(args[0])
if any(lst is None for lst in args):
raise ListError('Empty list passed to function')
if any(len(lst) != length for lst in args):
warnings.warn('All lists are not of equal length', ListLengthWarn)
return 2
else:
return 0
def __hasorgaccess(apikey, targetorg):
#
# Validate if API Key has access to passed Organization ID
#
geturl = '{0}/organizations'.format(str(base_url))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
dashboard = requests.get(geturl, headers=headers)
currentorgs = json.loads(dashboard.text)
orgs = []
validjson = __isjson(dashboard.text)
if validjson is True:
for org in currentorgs:
if int(org['id']) == int(targetorg):
orgs.append(org['id'])
return None
else:
pass
raise OrgPermissionError
return None
def __validemail(emailaddress):
#
# Validate email address format
#
if not re.match(r"[^@]+@[^@]+\.[^@]+", emailaddress):
raise EmailFormatError
def __validip(ip):
#
# Validate IP format
#
try:
ip_address(ip)
except ValueError:
raise ValueError('Invalid IP Address')
def __validsubnetip(subnetip):
#
# Validate correct subnet entry
#
if not re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}[/]\d{1,2}$", subnetip):
raise ValueError('Invalid Subnet IP Address {0} - Address must be formatted as #.#.#.#/#'.format(str(subnetip)))
else:
ip, netmask = str.split(subnetip, '/')
if int(netmask) < 1 or int(netmask) > 30:
raise ValueError('Invalid Subnet Mask Length {0} - Must be between 1 and 30'.format(str(subnetip)))
try:
ip_address(ip)
except ValueError:
raise ValueError('Invalid Subnet IP Address {0}'.format(str(subnetip)))
def __listtotag(taglist):
#
# Converts list variable to space separated string for API pass to Dashboard
#
liststr = ' '
if not isinstance(taglist, list):
taglist = list(taglist)
for t in taglist:
liststr = liststr + t + ' '
return liststr
def __returnhandler(statuscode, returntext, objtype, suppressprint):
#
# Parses Dashboard return information and returns error data based on status code and error JSON
#
validreturn = __isjson(returntext)
noerr = False
errmesg = ''
if validreturn:
returntext = json.loads(returntext)
try:
errmesg = returntext['errors']
except KeyError:
noerr = True
except TypeError:
noerr = True
if str(statuscode) == '200' and validreturn:
if suppressprint is False:
print('{0} Operation Successful - See returned data for results\n'.format(str(objtype)))
return returntext
elif str(statuscode) == '200':
if suppressprint is False:
print('{0} Operation Successful\n'.format(str(objtype)))
return None
elif str(statuscode) == '201' and validreturn:
if suppressprint is False:
print('{0} Added Successfully - See returned data for results\n'.format(str(objtype)))
return returntext
elif str(statuscode) == '201':
if suppressprint is False:
print('{0} Added Successfully\n'.format(str(objtype)))
return None
elif str(statuscode) == '204' and validreturn:
if suppressprint is False:
print('{0} Deleted Successfully - See returned data for results\n'.format(str(objtype)))
return returntext
elif str(statuscode) == '204':
print('{0} Deleted Successfully\n'.format(str(objtype)))
return None
elif str(statuscode) == '400' and validreturn and noerr is False:
if suppressprint is False:
print('Bad Request - See returned data for error details\n')
return errmesg
elif str(statuscode) == '400' and validreturn and noerr:
if suppressprint is False:
print('Bad Request - See returned data for details\n')
return returntext
elif str(statuscode) == '400':
if suppressprint is False:
print('Bad Request - No additional error data available\n')
elif str(statuscode) == '401' and validreturn and noerr is False:
if suppressprint is False:
print('Unauthorized Access - See returned data for error details\n')
return errmesg
elif str(statuscode) == '401' and validreturn:
if suppressprint is False:
print('Unauthorized Access')
return returntext
elif str(statuscode) == '404' and validreturn and noerr is False:
if suppressprint is False:
print('Resource Not Found - See returned data for error details\n')
return errmesg
elif str(statuscode) == '404' and validreturn:
if suppressprint is False:
print('Resource Not Found')
return returntext
elif str(statuscode) == '500':
if suppressprint is False:
print('HTTP 500 - Server Error')
return returntext
elif validreturn and noerr is False:
if suppressprint is False:
print('HTTP Status Code: {0} - See returned data for error details\n'.format(str(statuscode)))
return errmesg
else:
print('HTTP Status Code: {0} - No returned data\n'.format(str(statuscode)))
def myorgaccess(apikey, suppressprint=False):
#
# Query Dashboard for OrgID's that API key has access to
#
calltype = 'Organization'
geturl = '{0}/organizations'.format(str(base_url))
headers = {
'X-Cisco-Meraki-API-Key': format(str(apikey)),
'Content-Type': 'application/json'
}
dashboard = requests.get(geturl, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def getorg(apikey, orgid, suppressprint=False):
calltype = 'Organization'
geturl = '{0}/organizations/{1}'.format(str(base_url), str(orgid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
dashboard = requests.get(geturl, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def getorginventory(apikey, orgid, suppressprint=False):
#
# Pull organization inventory and return decoded JSON string
#
#
# Confirm API Key has Admin Access Otherwise Raise Error
#
__hasorgaccess(apikey, orgid)
calltype = 'Inventory'
geturl = '{0}/organizations/{1}/inventory'.format(str(base_url), str(orgid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
dashboard = requests.get(geturl, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def getnetworkdevices(apikey, networkid, suppressprint=False):
#
# Get network inventory and return as decoded JSON string
#
calltype = 'Network'
geturl = '{0}/networks/{1}/devices'.format(str(base_url), str(networkid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
dashboard = requests.get(geturl, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def getorgadmins(apikey, orgid, suppressprint=False):
#
# Get administrators for organization and return decoded JSON string
#
#
# Confirm API Key has Admin Access Otherwise Raise Error
#
__hasorgaccess(apikey, orgid)
calltype = 'Organization'
geturl = '{0}/organizations/{1}/admins'.format(str(base_url), str(orgid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
dashboard = requests.get(geturl, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def getnetworklist(apikey, orgid, suppressprint=False):
#
# Confirm API Key has Admin Access Otherwise Raise Error
#
__hasorgaccess(apikey, orgid)
calltype = 'Network'
geturl = '{0}/organizations/{1}/networks'.format(str(base_url), str(orgid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
dashboard = requests.get(geturl, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def getlicensestate(apikey, orgid, suppressprint=False):
#
# Confirm API Key has Admin Access Otherwise Raise Error
#
__hasorgaccess(apikey, orgid)
calltype = 'License'
geturl = '{0}/organizations/{1}/licenseState'.format(str(base_url), str(orgid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
dashboard = requests.get(geturl, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def getdevicedetail(apikey, networkid, serialnumber, suppressprint=False):
calltype = 'Device Detail'
geturl = '{0}/networks/{1}/devices/{2}'.format(str(base_url), str(networkid), str(serialnumber))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
dashboard = requests.get(geturl, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def getnetworkdetail(apikey, networkid, suppressprint=False):
calltype = 'Network Detail'
geturl = '{0}/networks/{1}'.format(str(base_url), str(networkid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
dashboard = requests.get(geturl, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def getnetworktrafficstats(apikey, networkid, timespan=86400, devicetype='combined', suppressprint=False):
calltype = 'Network Detail'
geturl = '{0}/networks/{1}/traffic?timespan={2}&deviceType={3}'.format(str(base_url), str(networkid), str(timespan),
str(devicetype))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
dashboard = requests.get(geturl, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def getnonmerakivpnpeers(apikey, orgid, suppressprint=False):
#
# Confirm API Key has Admin Access Otherwise Raise Error
#
__hasorgaccess(apikey, orgid)
calltype = 'Non-Meraki VPN'
geturl = '{0}/organizations/{1}/thirdPartyVPNPeers'.format(str(base_url), str(orgid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
dashboard = requests.get(geturl, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def getsnmpsettings(apikey, orgid, suppressprint=False):
#
# Confirm API Key has Admin Access Otherwise Raise Error
#
__hasorgaccess(apikey, orgid)
calltype = 'SNMP Settings'
geturl = '{0}/organizations/{1}/snmp'.format(str(base_url), str(orgid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
dashboard = requests.get(geturl, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def getsamlroles(apikey, orgid, suppressprint=False):
#
# Confirm API Key has Admin Access Otherwise Raise Error
#
__hasorgaccess(apikey, orgid)
calltype = 'SAML Roles'
geturl = '{0}/organizations/{1}/samlRoles'.format(str(base_url), str(orgid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
dashboard = requests.get(geturl, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def getsamlroledetail(apikey, orgid, roleid, suppressprint=False):
#
# Confirm API Key has Admin Access Otherwise Raise Error
#
__hasorgaccess(apikey, orgid)
calltype = 'SAML Role Detail'
geturl = '{0}/organizations/{1}/samlRoles/{2}'.format(str(base_url), str(orgid), str(roleid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
dashboard = requests.get(geturl, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def getswitchstacks(apikey, networkid, suppressprint=False):
calltype = 'Switch Stacks'
geturl = '{0}/networks/{1}/switchStacks'.format(str(base_url), str(networkid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
dashboard = requests.get(geturl, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def getswitchstackmembers(apikey, networkid, stackid, suppressprint=False):
calltype = 'Switch Stack Members'
geturl = '{0}/networks/{1}/switchStacks/{2}'.format(str(base_url), str(networkid), str(stackid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
dashboard = requests.get(geturl, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def getswitchports(apikey, serialnum, suppressprint=False):
calltype = 'Switch Port'
geturl = '{0}/devices/{1}/switchPorts'.format(str(base_url), str(serialnum))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
dashboard = requests.get(geturl, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def getswitchportdetail(apikey, serialnum, portnum, suppressprint=False):
calltype = 'Switch Port Detail'
geturl = '{0}/devices/{1}/switchPorts/{2}'.format(str(base_url), str(serialnum), str(portnum))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
dashboard = requests.get(geturl, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def getssids(apikey, networkid, suppressprint=False):
calltype = 'SSID'
geturl = '{0}/networks/{1}/ssids'.format(str(base_url), str(networkid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
dashboard = requests.get(geturl, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def getssiddetail(apikey, networkid, ssidnum, suppressprint=False):
calltype = 'SSID Detail'
geturl = '{0}/networks/{1}/ssids/{2}'.format(str(base_url), str(networkid), str(ssidnum))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
dashboard = requests.get(geturl, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def getvlans(apikey, networkid, suppressprint=False):
calltype = 'VLANs'
geturl = '{0}/networks/{1}/vlans'.format(str(base_url), str(networkid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
dashboard = requests.get(geturl, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def getvlandetail(apikey, networkid, vlanid, suppressprint=False):
calltype = 'VLAN Detail'
geturl = '{0}/networks/{1}/vlans/{2}'.format(str(base_url), str(networkid), str(vlanid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
dashboard = requests.get(geturl, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def gettemplates(apikey, orgid, suppressprint=False):
#
# Confirm API Key has Admin Access Otherwise Raise Error
#
__hasorgaccess(apikey, orgid)
calltype = 'Templates'
geturl = '{0}/organizations/{1}/configTemplates'.format(str(base_url), str(orgid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
dashboard = requests.get(geturl, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def getclients(apikey, serialnum, timestamp=86400, suppressprint=False):
calltype = 'Device Clients'
geturl = '{0}/devices/{1}/clients?timespan={2}'.format(str(base_url), str(serialnum), str(timestamp))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
dashboard = requests.get(geturl, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def bindtotemplate(apikey, networkid, templateid, autobind=False, suppressprint=False):
calltype = 'Template Bind'
posturl = '{0}/networks/{1}/bind'.format(str(base_url), str(networkid))
postdata={}
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
postdata['configTemplateId'] = format(str(templateid))
postdata['autoBind'] = autobind
dashboard = requests.post(posturl, data=json.dumps(postdata), headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def adddevtonet(apikey, networkid, serial, suppressprint=False):
calltype = 'Device'
posturl = '{0}/networks/{1}/devices/claim'.format(str(base_url), str(networkid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
postdata = {
'serial': format(str(serial))
}
dashboard = requests.post(posturl, data=json.dumps(postdata), headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def claim(apikey, orgid, serial=None, licensekey=None, licensemode=None, orderid=None, suppressprint=False):
calltype = 'Claim'
posturl = '{0}/organization/{1}/claim'.format(str(base_url), str(orgid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
x = 0
postdata = {}
for x in [serial, licensekey, orderid]:
if x is None:
pass
else:
x += 1
if x > 1:
raise AttributeError('Mutiple identifiers passed, please pass only one of either serial number, license key, '
'or order ID')
if (licensekey is None and licensemode is not None) or (licensemode is None and licensekey is not None):
raise AttributeError('If claiming a license key both license and licensemode attributes must be passed')
if serial is not None:
postdata['serial'] = serial
elif licensekey is not None and licensemode is not None:
postdata['license'] = serial
postdata['licenseMode'] = serial
elif orderid is not None:
postdata['orderId'] = orderid
dashboard = requests.post(posturl, data=json.dumps(postdata), headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def unbindfromtemplate(apikey, networkid, suppressprint=False):
calltype = 'Network Unbind'
posturl = '{0}/networks/{1}/unbind'.format(str(base_url), str(networkid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
dashboard = requests.post(posturl, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def deltemplate(apikey, orgid, templateid, suppressprint=False):
#
# Confirm API Key has Admin Access Otherwise Raise Error
#
__hasorgaccess(apikey, orgid)
calltype = 'Template'
delurl = '{0}/organizations/{1}/configTemplates/{2}'.format(str(base_url), str(orgid), str(templateid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
dashboard = requests.delete(delurl, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def delsamlrole(apikey, orgid, roleid, suppressprint=False):
#
# Confirm API Key has Admin Access Otherwise Raise Error
#
__hasorgaccess(apikey, orgid)
calltype = 'SAML Role'
delurl = '{0}/organizations/{1}/samlRoles/{2}'.format(str(base_url), str(orgid), str(roleid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
dashboard = requests.delete(delurl, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def updatevlan(apikey, networkid, vlanid, vlanname=None, mxip=None, subnetip=None, suppressprint=False):
calltype = 'VLAN'
puturl = '{0}/networks/{1}/vlans/{2}'.format(str(base_url), str(networkid), str(vlanid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
putdata = {}
if vlanname is not None:
putdata['name'] = format(str(vlanname))
if mxip is not None:
putdata['applianceIp'] = format(str(mxip))
if subnetip is not None:
putdata['subnet'] = format(str(subnetip))
putdata = json.dumps(putdata)
dashboard = requests.put(puturl, data=putdata, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def addvlan(apikey, networkid, vlanid, vlanname, mxip, subnetip, suppressprint=False):
calltype = 'VLAN'
posturl = '{0}/networks/{1}/vlans'.format(str(base_url), str(networkid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
postdata = {
'id': format(str(vlanid)),
'name': format(str(vlanname)),
'applianceIp': format(str(mxip)),
'subnet': format(str(subnetip))
}
postdata = json.dumps(postdata)
dashboard = requests.post(posturl, data=postdata, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def delvlan(apikey, networkid, vlanid, suppressprint=False):
calltype = 'VLAN'
delurl = '{0}/networks/{1}/vlans/{2}'.format(str(base_url), str(networkid), str(vlanid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
dashboard = requests.delete(delurl, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def addadmin(apikey, orgid, email, name, orgaccess=None, tags=None, tagaccess=None, networks=None,
netaccess=None, suppressprint=False):
#
# Confirm API Key has Admin Access Otherwise Raise Error
#
__hasorgaccess(apikey, orgid)
calltype = 'Administrator'
posturl = '{0}/organizations/{1}/admins'.format(str(base_url), str(orgid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
posttags = []
if orgaccess is None and tags is None and networks is None:
print("Administrator accounts must be granted access to either an Organization, Networks, or Tags")
return None
if tags is not None and tagaccess is None:
print("If tags are defined you must define matching access arguments.\nFor example, tags = ['tag1', 'tag2'], "
"must have matching access arguments: tagaccess = 'full', 'read-only'")
return None
elif tagaccess is not None and tags is None:
print("If tag access levels are defined you must define matching tag arguments\nFor example, tags = "
"['tag1', 'tag2'] must have matching access arguments: tagaccess = 'full', 'read-only'")
return None
elif tagaccess is None and tags is None:
pass
elif len(tags) != len(tagaccess):
print("The number of tags and access arguments must match.\n")
print("For example, tags = ['tag1', 'tag2'] must have matching access arguments: tagaccess = "
"['full', 'read-only']")
return None
elif tags is not None and tagaccess is not None:
x = 0
while x < len(tags):
posttags.append({'tag': tags[x], 'access': tagaccess[x]})
x += 1
else:
pass
postnets = []
if networks is not None and netaccess is None:
print("If networks are defined you must define matching access arguments\nFor example networks = "
"['net1', 'net2'] must have matching access arguments: netaccess = 'full', 'read-only'")
return None
elif netaccess is not None and networks is None:
print("If network access levels are defined you must define matching network arguments\nFor example, networks"
" = ['net1', 'net2'] must have matching access arguments: netaccess = 'full', 'read-only'")
return None
elif netaccess is None and networks is None:
pass
elif len(networks) != len(netaccess):
print("The number of networks and access arguments must match.\n")
print("For example, networks = ['net1', 'net2'] must have matching access arguments: netaccess = "
"['full', 'read-only']")
return None
elif networks is not None and netaccess is not None:
x = 0
while x < len(networks):
postnets.append({'id': networks[x], 'access': netaccess[x]})
x += 1
else:
pass
postdata = []
if len(posttags) == 0 and len(postnets) == 0:
postdata = {
'orgAccess': orgaccess,
'email': format(str(email)),
'name': format(str(name))
}
elif len(posttags) > 0 and len(postnets) == 0:
postdata = {
'name': format(str(name)),
'email': format(str(email)),
'orgAccess': orgaccess,
'tags': posttags
}
elif len(postnets) > 0 and len(posttags) == 0:
postdata = {
'name': format(str(name)),
'email': format(str(email)),
'orgAccess': orgaccess,
'networks': postnets
}
elif len(postnets) > 0 and len(posttags) > 0:
postdata = {
'name': format(str(name)),
'email': format(str(email)),
'orgAccess': orgaccess,
'tags': posttags,
'networks': postnets
}
dashboard = requests.post(posturl, data=json.dumps(postdata), headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def deladmin(apikey, orgid, adminid, suppressprint=False):
#
# Confirm API Key has Admin Access Otherwise Raise Error
#
__hasorgaccess(apikey, orgid)
calltype = 'Administrator'
delurl = '{0}/organizations/{1}/admins/{2}'.format(str(base_url), str(orgid), str(adminid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
dashboard = requests.delete(delurl, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def addnetwork(apikey, orgid, name, nettype, tags, tz, suppressprint=False):
#
# Confirm API Key has Admin Access Otherwise Raise Error
#
__hasorgaccess(apikey, orgid)
calltype = 'Network'
posturl = '{0}/organizations/{1}/networks'.format(str(base_url), str(orgid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
__isvalidtz(tz)
postdata = {
'name': format(str(name)),
'type': format(str(nettype)),
'tags': format(str(tags)),
'timeZone': format(str(tz))
}
postdata = json.dumps(postdata)
dashboard = requests.post(posturl, data=postdata, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def delnetwork(apikey, networkid, suppressprint=False):
calltype = 'Network'
delurl = '{0}/networks/{1}'.format(str(base_url), str(networkid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
dashboard = requests.delete(delurl, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def updateadmin(apikey, orgid, adminid, email, name=None, orgaccess=None, tags=None, tagaccess=None,
networks=None, netaccess=None, suppressprint=False):
#
# Confirm API Key has Admin Access Otherwise Raise Error
#
__hasorgaccess(apikey, orgid)
calltype = 'Administrator'
puturl = '{0}/organizations/{1}/admins/{2}'.format(str(base_url), str(orgid), str(adminid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
puttags = []
if orgaccess is None and tags is None and networks is None and name is None:
print("Administrator account updates must include Organization, Networks, or Tags permission changes or an "
"updated name attribute")
return None
if tags is not None and tagaccess is None:
print("If tags are defined you must define matching access arguments.\nFor example, tags = ['tag1', 'tag2'], "
"must have matching access arguments: tagaccess = 'full', 'read-only'")
return None
elif tagaccess is not None and tags is None:
print("If tag access levels are defined you must define matching tag arguments\nFor example, tags = "
"['tag1', 'tag2'] must have matching access arguments: tagaccess = 'full', 'read-only'")
return None
elif tagaccess is None and tags is None:
pass
elif len(tags) != len(tagaccess):
print("The number of tags and access arguments must match.\n")
print("For example, tags = ['tag1', 'tag2'] must have matching access arguments: tagaccess = "
"['full', 'read-only']")
return None
elif tags is not None and tagaccess is not None:
x = 0
while x < len(tags):
puttags.append({'tag': tags[x], 'access': tagaccess[x]})
x += 1
else:
pass
putnets = []
if networks is not None and netaccess is None:
print("If networks are defined you must define matching access arguments\nFor example networks = "
"['net1', 'net2'] must have matching access arguments: netaccess = 'full', 'read-only'")
return None
elif netaccess is not None and networks is None:
print("If network access levels are defined you must define matching network arguments\nFor example, networks"
" = ['net1', 'net2'] must have matching access arguments: netaccess = 'full', 'read-only'")
return None
elif netaccess is None and networks is None:
pass
elif len(networks) != len(netaccess):
print("The number of networks and access arguments must match.\n")
print("For example, networks = ['net1', 'net2'] must have matching access arguments: netaccess = "
"['full', 'read-only']")
return None
elif networks is not None and netaccess is not None:
x = 0
while x < len(networks):
putnets.append({'id': networks[x], 'access': netaccess[x]})
x += 1
else:
pass
putdata = []
if name is not None:
if len(puttags) == 0 and len(putnets) == 0:
putdata = {
'orgAccess': orgaccess,
'email': format(str(email)),
'name': format(str(name))
}
elif len(puttags) > 0 and len(putnets) == 0:
putdata = {
'name': format(str(name)),
'email': format(str(email)),
'orgAccess': orgaccess,
'tags': puttags
}
elif len(putnets) > 0 and len(puttags) == 0:
putdata = {
'name': format(str(name)),
'email': format(str(email)),
'orgAccess': orgaccess,
'networks': putnets
}
elif len(putnets) > 0 and len(puttags) > 0:
putdata = {
'name': format(str(name)),
'email': format(str(email)),
'orgAccess': orgaccess,
'tags': puttags,
'networks': putnets
}
elif name is None:
if len(puttags) > 0 and len(putnets) == 0:
putdata = {
'email': format(str(email)),
'orgAccess': orgaccess,
'tags': puttags
}
elif len(putnets) > 0 and len(puttags) == 0:
putdata = {
'email': format(str(email)),
'orgAccess': orgaccess,
'networks': putnets
}
elif len(putnets) > 0 and len(puttags) > 0:
putdata = {
'email': format(str(email)),
'orgAccess': orgaccess,
'tags': puttags,
'networks': putnets
}
dashboard = requests.put(puturl, data=json.dumps(putdata), headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def getvpnsettings(apikey, networkid, suppressprint=False):
calltype = 'AutoVPN'
geturl = '{0}/networks/{1}/siteToSiteVpn'.format(str(base_url), str(networkid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
dashboard = requests.get(geturl, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def updatevpnsettings(apikey, networkid, mode='None', subnets=None, usevpn=None, hubnetworks=None, defaultroute=None,
suppressprint=False):
calltype = 'AutoVPN'
puturl = '{0}/networks/{1}/siteToSiteVpn'.format(str(base_url), str(networkid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
__comparelist(hubnetworks, defaultroute)
if hubnetworks is not None and defaultroute is not None:
hubmodes = zip(hubnetworks, defaultroute)
else:
hubmodes = []
__comparelist(subnets, usevpn)
vpnsubnets = list(zip(subnets, usevpn))
hubs = []
for h, d in hubmodes:
hubs.append({'hubId': h, 'useDefaultRoute': d})
subnets = []
for s, i in vpnsubnets:
__validsubnetip(s)
subnets.append({'localSubnet': s, 'useVpn': i})
putdata = {'mode': mode, 'hubs': hubs, 'subnets': subnets}
print(putdata)
putdata = json.dumps(putdata)
dashboard = requests.put(puturl, data=putdata, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def updatenonmerakivpn(apikey, orgid, names, ips, secrets, remotenets, tags=None, suppressprint=False):
#
# Function to update non-Meraki VPN peer information for an organization. This function will desctructively
# overwrite ALL existing peer information. If you only wish to add/update an existing peer you must download
# all current peer information and make re-upload the modified array of all peers
#
#
# Confirm API Key has Admin Access Otherwise Raise Error
#
__hasorgaccess(apikey, orgid)
calltype = 'Non-Meraki VPN'
puturl = '{0}/organizations/{1}/thirdPartyVPNPeers'.format(str(base_url), str(orgid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
#
# Will only upload peer information if lists are passed to the function, otherwise will fail. If tags argument is
# None will assume all peers should be available to all networks.
#
if isinstance(names, list) and isinstance(ips, list) and isinstance(secrets, list)\
and isinstance(remotenets, list) and (tags is None or isinstance(tags, list)):
if len(names) + len(ips) + len(secrets) + len(remotenets) / 4 != len(names):
warnings.warn('Peers will be added up to the length of the shortest list passed', ListLengthWarn)
if tags is None:
tags = []
for x in names:
tags.append(['all'])
for n in remotenets:
if isinstance(n, list):
for sn in n:
__validsubnetip(sn)
else:
__validsubnetip(n)
peerlist = list(zip(names, ips, secrets, remotenets, tags))
putdata = []
peer = {}
for n, i, s, r, t in peerlist:
peer['name'] = n
peer['publicIp'] = i
peer['privateSubnets'] = r
peer['secret'] = s
peer['tags'] = t
putdata.append((peer.copy()))
peer.clear()
else:
raise TypeError('All peer arguments must be passed as lists, tags argument may be excluded')
putdata = json.dumps(putdata)
dashboard = requests.put(puturl, data=putdata, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def getnonmerakivpn(apikey, orgid, suppressprint=False):
calltype = 'Non-Meraki VPN'
geturl = '{0}/organizations/{1}/thirdPartyVPNPeers'.format(str(base_url), str(orgid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
dashboard = requests.get(geturl, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def appendnonmerakivpn(apikey, orgid, names, ips, secrets, remotenets, tags=None, suppressprint=False):
#
# Function to update non-Meraki VPN peer information for an organization. This function will desctructively
# overwrite ALL existing peer information. If you only wish to add/update an existing peer you must download
# all current peer information and make re-upload the modified array of all peers
#
#
# Confirm API Key has Admin Access Otherwise Raise Error
#
__hasorgaccess(apikey, orgid)
calltype = 'Non-Meraki VPN'
puturl = '{0}/organizations/{1}/thirdPartyVPNPeers'.format(str(base_url), str(orgid))
geturl = '{0}/organizations/{1}/thirdPartyVPNPeers'.format(str(base_url), str(orgid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
currentpeers = json.loads(requests.get(geturl, headers=headers).text)
#
# Will only upload peer information if lists are passed to the function, otherwise will fail. If tags argument is
# None will assume all peers should be available to all networks.
#
if any(isinstance(el, list) for el in remotenets) is False:
remotenets = [remotenets]
warnings.warn('Variable remotenets was not passed as list of lists, it has been converted', ListLengthWarn)
if isinstance(names, list) and isinstance(ips, list) and isinstance(secrets, list)\
and isinstance(remotenets, list) and (tags is None or isinstance(tags, list)):
if len(names) + len(ips) + len(secrets) + len(remotenets) / 4 != len(names):
warnings.warn('Peers will be added up to the length of the shortest list passed', ListLengthWarn)
if tags is None:
tags = []
for x in names:
tags.append(['all'])
for n in remotenets:
if isinstance(n, list):
for sn in n:
__validsubnetip(sn)
else:
__validsubnetip(n)
peerlist = list(zip(names, ips, secrets, remotenets, tags))
putdata = []
peer = {}
for n, i, s, r, t in peerlist:
peer['name'] = n
peer['publicIp'] = i
peer['privateSubnets'] = r
peer['secret'] = s
peer['tags'] = t
putdata.append((peer.copy()))
peer.clear()
for x in currentpeers:
peer['name'] = x['name']
peer['publicIp'] = x['publicIp']
peer['privateSubnets'] = x['privateSubnets']
peer['secret'] = x['secret']
peer['tags'] = x['tags']
putdata.append((peer.copy()))
peer.clear()
else:
raise TypeError('All peer arguments must be passed as lists, tags argument may be excluded')
putdata = json.dumps(putdata)
dashboard = requests.put(puturl, data=putdata, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def updatesnmpsettings(apikey, orgid, v2c=False, v3=False, v3authmode='SHA', v3authpw=None, v3privmode='AES128',
v3privpw=None, allowedips=None, suppressprint=False):
#
# Confirm API Key has Admin Access Otherwise Raise Error
#
__hasorgaccess(apikey, orgid)
calltype = 'SNMP'
puturl = '{0}/organizations/{1}/snmp'.format(str(base_url), str(orgid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
putdata = {}
if v3authmode not in ['SHA', 'MD5']:
raise ValueError('Valid authentication modes are "SHA" or "MD5"')
if v3privmode not in ['DES', 'AES128']:
raise ValueError('Valid privacy modes are "DES" and "AES128"')
if v3 and (v3authpw is None or v3privpw is None):
raise ValueError('If SNMPv3 is enabled a authentication and privacy password must be provided')
elif v3 and (len(v3authpw) < 8 or len(v3privpw) < 8):
raise ValueError('Authentication and privacy passwords must be a minimum of 8 characters')
elif v3:
putdata['v3AuthMode'] = v3authmode
putdata['v3AuthPass'] = v3authpw
putdata['v3PrivMode'] = v3privmode
putdata['v3PrivPass'] = v3privpw
putdata['v2cEnabled'] = v2c
putdata['v3Enabled'] = v3
if allowedips is not None:
if isinstance(allowedips, list):
allowiplist = str(allowedips[0])
__validip(allowiplist)
if len(allowedips) > 1:
for i in allowedips[1:]:
__validip(str(i))
allowiplist = allowiplist + ':' + i
else:
__validip(str(allowedips))
allowiplist = str(allowedips)
putdata['peerIps'] = allowiplist
else:
putdata['peerIps'] = None
putdata = json.dumps(putdata)
dashboard = requests.put(puturl, data=putdata, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def removedevfromnet(apikey, networkid, serial, suppressprint=False):
calltype = 'Device'
posturl = '{0}/networks/{1}/devices/{2}/remove'.format(str(base_url), str(networkid), str(serial))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
dashboard = requests.post(posturl, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def addorg(apikey, neworgname, suppressprint=False):
calltype = 'Organization'
posturl = '{0}/organizations/'.format(str(base_url))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
postdata = {
'name': format(str(neworgname))
}
dashboard = requests.post(posturl, data=json.dumps(postdata), headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def cloneorg(apikey, orgid, neworgname, suppressprint=False):
__hasorgaccess(apikey, orgid)
calltype = 'Organization Clone'
posturl = '{0}/organizations/{1}/clone'.format(str(base_url), str(orgid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
postdata = {
'name': format(str(neworgname))
}
dashboard = requests.post(posturl, data=json.dumps(postdata), headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def renameorg(apikey, orgid, neworgname, suppressprint=False):
__hasorgaccess(apikey, orgid)
calltype = 'Organization Rename'
puturl = '{0}/organizations/{1}'.format(str(base_url), str(orgid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
putdata = {
'name': format(str(neworgname))
}
dashboard = requests.put(puturl, data=json.dumps(putdata), headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def updatenetwork(apikey, networkid, name, tz, tags, suppressprint=False):
calltype = 'Network'
puturl = '{0}/organizations/{1}'.format(str(base_url), str(networkid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
putdata = {}
if name:
putdata['name'] = name
if tz:
__isvalidtz(tz)
putdata['timeZone'] = format(str(tz))
if tags:
putdata['tags'] = __listtotag(tags)
dashboard = requests.put(puturl, data=json.dumps(putdata), headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def updatedevice(apikey, networkid, sn, name, tags, lat, lng, address, suppressprint=False):
calltype = 'Device'
posturl = '{0}/networks/{1}/devices/{2}'.format(str(base_url), str(networkid), str(sn))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
putdata = {}
if name:
putdata['name'] = name
if tags:
putdata['tags'] = __listtotag(tags)
if lat and not lng:
raise ValueError('If latitude is entered a longitude value must also be entered')
elif lng and not lat:
raise ValueError('If longitude is entered a latitude value must also be entered')
elif lat and lng:
putdata['lat'] = lat
putdata['lng'] = lng
if address:
putdata['address'] = address
dashboard = requests.put(posturl, data=json.dumps(putdata), headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def updatessid(apikey, networkid, ssidnum, name, enabled, authmode, encryptionmode, psk, suppressprint=False):
calltype = 'SSID'
puturl = '{0}/networks/{1}/ssids/{2}'.format(str(base_url), str(networkid), str(ssidnum))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
putdata = {}
if name:
putdata['name'] = str(name)
if enabled and (enabled is not False or not True):
raise ValueError("Enabled must be a boolean variable")
else:
putdata['enabled'] = str(enabled)
if authmode not in ['psk', 'open']:
raise ValueError("Authentication mode must be psk or open")
elif authmode == 'psk' and (not encryptionmode or not psk):
raise ValueError("If authentication mode is set to psk, encryption mode and psk must also be passed")
elif authmode == 'open' and (encryptionmode or psk):
warnings.warn(IgnoredArgument("If authentication mode is open, encryption mode and psk will be ignored"))
elif authmode:
putdata['authMode'] = str(authmode)
if encryptionmode and (authmode is not 'psk' or not psk or not authmode):
raise ValueError("If encryption mode is passed, authentication mode must be psk and psk must also be passed")
elif encryptionmode:
putdata['encryptionMode'] = str(encryptionmode)
if psk and (authmode is not 'psk' or not encryptionmode or not authmode):
raise ValueError("If psk is passed, authentication mode and encryption mode must also be passed")
elif len(psk) < 8 and encryptionmode == 'wpa':
raise ValueError("If encryption mode is wpa, the psk must be a minimum of 8 characters")
elif psk:
putdata['psk'] = str(psk)
dashboard = requests.put(puturl, data=json.dumps(putdata), headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def updateswitchport(apikey, serialnum, portnum, name, tags, enabled, porttype, vlan, voicevlan, allowedvlans, poe,
isolation, rstp, stpguard, accesspolicynum, suppressprint=False):
calltype = 'Switch Port'
puturl = '{0}/devices/{1}/switchPorts/{2}'.format(str(base_url), str(serialnum), str(portnum))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
putdata = {}
if name:
putdata['name'] = str(name)
if tags:
putdata['tags'] = __listtotag(tags)
putdata['enabled'] = str(enabled)
if porttype and porttype not in ['access', 'trunk', 'TRUNK', 'ACCESS']:
raise ValueError("Type must be either 'access' or 'trunk'")
elif porttype:
putdata['type'] = str(porttype)
if vlan:
putdata['vlan'] = str(vlan)
if voicevlan:
putdata['voiceVlan'] = voicevlan
if allowedvlans:
putdata['allowedVlans'] = allowedvlans
putdata['poeEnabled'] = str(poe)
putdata['isolation'] = isolation
putdata['rstpEnabled'] = rstp
if stpguard and stpguard not in ['disabled', 'root guard', 'BPDU guard']:
raise ValueError("Valid values for STP Guard are 'disabled', 'root guard', or 'BPDU Guard'")
elif stpguard:
putdata['stpGuard'] = stpguard
if accesspolicynum:
putdata['accessPolicyNumber'] = accesspolicynum
dashboard = requests.put(puturl, data=json.dumps(putdata), headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def addsamlrole(apikey, orgid, rolename, orgaccess, tags, tagaccess, networks, netaccess, suppressprint=False):
#
# Confirm API Key has Admin Access Otherwise Raise Error
#
__hasorgaccess(apikey, orgid)
calltype = 'SAML Role'
posturl = '{0}/organizations/{1}/samlRoles'.format(str(base_url), str(orgid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
if not orgaccess and not tags and not networks:
raise AttributeError("At least one of organization access, tag based access, or network based access must be "
"defined")
if orgaccess and orgaccess not in ['read-only', 'full', 'none']:
raise ValueError("Organization access must be either 'read-only' or 'full' or 'none'")
posttags = []
taglist = False
if (tags and not tagaccess) or (tagaccess and not tags):
raise AttributeError("Both tags and tag access lists must be passed if tag based permissions are defined")
elif tags and tagaccess:
taglist = True
if taglist is True:
tagcompare = __comparelist(tags, tagaccess)
if tagcompare == 2:
warnings.warn(ListLengthWarn("Tags and tag access list are not the same length, lists will be joined to "
"the shortest length list"))
tagzip = zip(tags, tagaccess)
for t, ta in tagzip:
posttags.append({'tag': t, 'access': ta})
elif tagcompare == 0:
tagzip = zip(tags, tagaccess)
for t, ta in tagzip:
posttags.append({'tag': t, 'access': ta})
postnets = []
netlist = False
if (networks and not netaccess) or (netaccess and not networks):
raise AttributeError("Both network and network access lists must be passed if network based permissions "
"are defined")
elif networks and netaccess:
netlist = True
if netlist is True:
netcompare = __comparelist(networks, netaccess)
if netcompare == 2:
warnings.warn(ListLengthWarn("Networks and tag access list are not the same length, lists will be joined to"
" the shortest length list"))
netzip = zip(networks, netaccess)
for n, na in netzip:
postnets.append({'id': n, 'access': na})
elif netcompare == 0:
netzip = zip(networks, netaccess)
for n, na in netzip:
postnets.append({'id': n, 'access': na})
postdata = {}
if not rolename:
raise ValueError("Role name must be passed for role creation")
else:
postdata['role'] = str(rolename)
if orgaccess:
postdata['orgAccess'] = str(orgaccess)
if taglist is True:
postdata['tags'] = posttags
if netlist is True:
postdata['networks'] = postnets
dashboard = requests.post(posturl, data=json.dumps(postdata), headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def updatesamlrole(apikey, orgid, roleid, rolename, orgaccess, tags, tagaccess, networks, netaccess,
suppressprint=False):
#
# Confirm API Key has Admin Access Otherwise Raise Error
#
__hasorgaccess(apikey, orgid)
calltype = 'SAML Role'
puturl = '{0}/organizations/{1}/samlRoles/{2}'.format(str(base_url), str(orgid), str(roleid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
if orgaccess and orgaccess not in ['read-only', 'full', 'none']:
raise ValueError("Organization access must be either 'read-only' or 'full' or 'none")
puttags = []
taglist = False
if (tags and not tagaccess) or (tagaccess and not tags):
raise AttributeError("Both tags and tag access lists must be passed if tag based permissions are defined")
elif tags and tagaccess:
taglist = True
if taglist is True:
tagcompare = __comparelist(tags, tagaccess)
if tagcompare == 2:
warnings.warn(ListLengthWarn("Tags and tag access list are not the same length, lists will be joined to "
"the shortest length list"))
tagzip = zip(tags, tagaccess)
for t, ta in tagzip:
puttags.append({'tag': t, 'access': ta})
elif tagcompare == 0:
tagzip = zip(tags, tagaccess)
for t, ta in tagzip:
puttags.append({'tag': t, 'access': ta})
putnets = []
netlist = False
if (networks and not netaccess) or (netaccess and not networks):
raise AttributeError("Both network and network access lists must be passed if network based permissions "
"are defined")
elif networks and netaccess:
netlist = True
if netlist is True:
netcompare = __comparelist(networks, netaccess)
if netcompare == 2:
warnings.warn(ListLengthWarn("Networks and tag access list are not the same length, lists will be joined to"
" the shortest length list"))
netzip = zip(networks, netaccess)
for n, na in netzip:
putnets.append({'id': n, 'access': na})
elif netcompare == 0:
netzip = zip(networks, netaccess)
for n, na in netzip:
putnets.append({'id': n, 'access': na})
roledata = {}
if rolename:
roledata['role'] = str(rolename)
if orgaccess:
roledata['orgAccess'] = str(orgaccess)
if taglist is True:
roledata['tags'] = puttags
if netlist is True:
roledata['networks'] = putnets
putdata = [roledata]
print(roledata, putdata, sep='\n')
dashboard = requests.put(puturl, data=json.dumps(roledata), headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result | 33.153078 | 120 | 0.603383 | 'Europe/Zaporozhye',
'Europe/Zurich',
'GB',
'GB-Eire',
'GMT',
'GMT+0',
'GMT0',
'GMT-0',
'Greenwich',
'Hongkong',
'HST',
'Iceland',
'Indian/Antananarivo',
'Indian/Chagos',
'Indian/Christmas',
'Indian/Cocos',
'Indian/Comoro',
'Indian/Kerguelen',
'Indian/Mahe',
'Indian/Maldives',
'Indian/Mauritius',
'Indian/Mayotte',
'Indian/Reunion',
'Iran',
'Israel',
'Jamaica',
'Japan',
'Kwajalein',
'Libya',
'MET',
'Mexico/BajaNorte',
'Mexico/BajaSur',
'Mexico/General',
'MST',
'MST7MDT',
'Navajo',
'NZ',
'NZ-CHAT',
'Pacific/Apia',
'Pacific/Auckland',
'Pacific/Bougainville',
'Pacific/Chatham',
'Pacific/Chuuk',
'Pacific/Easter',
'Pacific/Efate',
'Pacific/Enderbury',
'Pacific/Fakaofo',
'Pacific/Fiji',
'Pacific/Funafuti',
'Pacific/Galapagos',
'Pacific/Gambier',
'Pacific/Guadalcanal',
'Pacific/Guam',
'Pacific/Honolulu',
'Pacific/Johnston',
'Pacific/Kiritimati',
'Pacific/Kosrae',
'Pacific/Kwajalein',
'Pacific/Majuro',
'Pacific/Marquesas',
'Pacific/Midway',
'Pacific/Nauru',
'Pacific/Niue',
'Pacific/Norfolk',
'Pacific/Noumea',
'Pacific/Pago_Pago',
'Pacific/Palau',
'Pacific/Pitcairn',
'Pacific/Pohnpei',
'Pacific/Ponape',
'Pacific/Port_Moresby',
'Pacific/Rarotonga',
'Pacific/Saipan',
'Pacific/Samoa',
'Pacific/Tahiti',
'Pacific/Tarawa',
'Pacific/Tongatapu',
'Pacific/Truk',
'Pacific/Wake',
'Pacific/Wallis',
'Pacific/Yap',
'Poland',
'Portugal',
'PRC',
'PST8PDT',
'ROC',
'ROK',
'Singapore',
'Turkey',
'UCT',
'Universal',
'US/Alaska',
'US/Aleutian',
'US/Arizona',
'US/Central',
'US/Eastern',
'US/East-Indiana',
'US/Hawaii',
'US/Indiana-Starke',
'US/Michigan',
'US/Mountain',
'US/Pacific',
'US/Pacific-New',
'US/Samoa',
'UTC',
'WET',
'W-SU',
'Zulu'
]
base_url = 'https://dashboard.meraki.com/api/v0'
class Error(Exception):
pass
class ListLengthWarn(Warning):
pass
class IgnoredArgument(Warning):
pass
class OrgPermissionError(Error):
def __init__(self):
self.default = 'Invalid Organization ID - Current API Key does not have access to this Organization'
def __str__(self):
return repr(self.default)
class EmailFormatError(Error):
'Incorrect E-mail Address Format Entered - Must be in the format name@domain.dom'
def __str__(self):
return repr(self.default)
class ListError(Error):
def __init__(self, message):
self.message = message
class DashboardObject(object):
pass
class SSID(DashboardObject):
nabled', 'authMode', 'encryptionMode', 'psk', 'radiusServers', 'radiusAccountingEnabled',
'radiusAccountingServers', 'ipAssignmentMode', 'useVlanTagging', 'concentratorNetworkId', 'vlanID',
'defaultVlanId', 'apTagsAndVlanIds', 'walledGardenEnabled', 'walledGardenRanges', 'splashPage',
'perClientBandwidthLimitUp', 'perClientBandwidthLimitDown']
type = 'ssid'
def __init__(self, ssidnum, **params):
self.__setattr__('ssidnum', ssidnum)
for p in params.keys():
if p in self.validparams:
self.__setattr__(p, params[p])
else:
raise ValueError('Invalid parameter {0}, please refer to https://dashboard.meraki.com/manage/support/'
'api_docs#ssids for valid parameters'.format(str(p)))
def __isjson(myjson):
try:
json_object = json.loads(myjson)
except ValueError:
return False
return True
def __isvalidtz(tz):
validtz = False
for zone in tzlist:
if validtz is False and format(str(tz)) == zone:
validtz = True
break
else:
validtz = False
if validtz is False:
raise ValueError(
'Please enter a valid tz value from https://en.wikipedia.org/wiki/List_of_tz_database_time_zones')
return None
def __comparelist(*args):
length = len(args[0])
if any(lst is None for lst in args):
raise ListError('Empty list passed to function')
if any(len(lst) != length for lst in args):
warnings.warn('All lists are not of equal length', ListLengthWarn)
return 2
else:
return 0
def __hasorgaccess(apikey, targetorg):
geturl = '{0}/organizations'.format(str(base_url))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
dashboard = requests.get(geturl, headers=headers)
currentorgs = json.loads(dashboard.text)
orgs = []
validjson = __isjson(dashboard.text)
if validjson is True:
for org in currentorgs:
if int(org['id']) == int(targetorg):
orgs.append(org['id'])
return None
else:
pass
raise OrgPermissionError
return None
def __validemail(emailaddress):
if not re.match(r"[^@]+@[^@]+\.[^@]+", emailaddress):
raise EmailFormatError
def __validip(ip):
try:
ip_address(ip)
except ValueError:
raise ValueError('Invalid IP Address')
def __validsubnetip(subnetip):
if not re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}[/]\d{1,2}$", subnetip):
raise ValueError('Invalid Subnet IP Address {0} - Address must be formatted as #.#.#.#/#'.format(str(subnetip)))
else:
ip, netmask = str.split(subnetip, '/')
if int(netmask) < 1 or int(netmask) > 30:
raise ValueError('Invalid Subnet Mask Length {0} - Must be between 1 and 30'.format(str(subnetip)))
try:
ip_address(ip)
except ValueError:
raise ValueError('Invalid Subnet IP Address {0}'.format(str(subnetip)))
def __listtotag(taglist):
liststr = ' '
if not isinstance(taglist, list):
taglist = list(taglist)
for t in taglist:
liststr = liststr + t + ' '
return liststr
def __returnhandler(statuscode, returntext, objtype, suppressprint):
validreturn = __isjson(returntext)
noerr = False
errmesg = ''
if validreturn:
returntext = json.loads(returntext)
try:
errmesg = returntext['errors']
except KeyError:
noerr = True
except TypeError:
noerr = True
if str(statuscode) == '200' and validreturn:
if suppressprint is False:
print('{0} Operation Successful - See returned data for results\n'.format(str(objtype)))
return returntext
elif str(statuscode) == '200':
if suppressprint is False:
print('{0} Operation Successful\n'.format(str(objtype)))
return None
elif str(statuscode) == '201' and validreturn:
if suppressprint is False:
print('{0} Added Successfully - See returned data for results\n'.format(str(objtype)))
return returntext
elif str(statuscode) == '201':
if suppressprint is False:
print('{0} Added Successfully\n'.format(str(objtype)))
return None
elif str(statuscode) == '204' and validreturn:
if suppressprint is False:
print('{0} Deleted Successfully - See returned data for results\n'.format(str(objtype)))
return returntext
elif str(statuscode) == '204':
print('{0} Deleted Successfully\n'.format(str(objtype)))
return None
elif str(statuscode) == '400' and validreturn and noerr is False:
if suppressprint is False:
print('Bad Request - See returned data for error details\n')
return errmesg
elif str(statuscode) == '400' and validreturn and noerr:
if suppressprint is False:
print('Bad Request - See returned data for details\n')
return returntext
elif str(statuscode) == '400':
if suppressprint is False:
print('Bad Request - No additional error data available\n')
elif str(statuscode) == '401' and validreturn and noerr is False:
if suppressprint is False:
print('Unauthorized Access - See returned data for error details\n')
return errmesg
elif str(statuscode) == '401' and validreturn:
if suppressprint is False:
print('Unauthorized Access')
return returntext
elif str(statuscode) == '404' and validreturn and noerr is False:
if suppressprint is False:
print('Resource Not Found - See returned data for error details\n')
return errmesg
elif str(statuscode) == '404' and validreturn:
if suppressprint is False:
print('Resource Not Found')
return returntext
elif str(statuscode) == '500':
if suppressprint is False:
print('HTTP 500 - Server Error')
return returntext
elif validreturn and noerr is False:
if suppressprint is False:
print('HTTP Status Code: {0} - See returned data for error details\n'.format(str(statuscode)))
return errmesg
else:
print('HTTP Status Code: {0} - No returned data\n'.format(str(statuscode)))
def myorgaccess(apikey, suppressprint=False):
#
calltype = 'Organization'
geturl = '{0}/organizations'.format(str(base_url))
headers = {
'X-Cisco-Meraki-API-Key': format(str(apikey)),
'Content-Type': 'application/json'
}
dashboard = requests.get(geturl, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def getorg(apikey, orgid, suppressprint=False):
calltype = 'Organization'
geturl = '{0}/organizations/{1}'.format(str(base_url), str(orgid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
dashboard = requests.get(geturl, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def getorginventory(apikey, orgid, suppressprint=False):
#
# Pull organization inventory and return decoded JSON string
#
#
# Confirm API Key has Admin Access Otherwise Raise Error
#
__hasorgaccess(apikey, orgid)
calltype = 'Inventory'
geturl = '{0}/organizations/{1}/inventory'.format(str(base_url), str(orgid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
dashboard = requests.get(geturl, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def getnetworkdevices(apikey, networkid, suppressprint=False):
#
# Get network inventory and return as decoded JSON string
#
calltype = 'Network'
geturl = '{0}/networks/{1}/devices'.format(str(base_url), str(networkid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
dashboard = requests.get(geturl, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def getorgadmins(apikey, orgid, suppressprint=False):
#
# Get administrators for organization and return decoded JSON string
#
#
# Confirm API Key has Admin Access Otherwise Raise Error
#
__hasorgaccess(apikey, orgid)
calltype = 'Organization'
geturl = '{0}/organizations/{1}/admins'.format(str(base_url), str(orgid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
dashboard = requests.get(geturl, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def getnetworklist(apikey, orgid, suppressprint=False):
#
# Confirm API Key has Admin Access Otherwise Raise Error
#
__hasorgaccess(apikey, orgid)
calltype = 'Network'
geturl = '{0}/organizations/{1}/networks'.format(str(base_url), str(orgid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
dashboard = requests.get(geturl, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def getlicensestate(apikey, orgid, suppressprint=False):
#
# Confirm API Key has Admin Access Otherwise Raise Error
#
__hasorgaccess(apikey, orgid)
calltype = 'License'
geturl = '{0}/organizations/{1}/licenseState'.format(str(base_url), str(orgid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
dashboard = requests.get(geturl, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def getdevicedetail(apikey, networkid, serialnumber, suppressprint=False):
calltype = 'Device Detail'
geturl = '{0}/networks/{1}/devices/{2}'.format(str(base_url), str(networkid), str(serialnumber))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
dashboard = requests.get(geturl, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def getnetworkdetail(apikey, networkid, suppressprint=False):
calltype = 'Network Detail'
geturl = '{0}/networks/{1}'.format(str(base_url), str(networkid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
dashboard = requests.get(geturl, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def getnetworktrafficstats(apikey, networkid, timespan=86400, devicetype='combined', suppressprint=False):
calltype = 'Network Detail'
geturl = '{0}/networks/{1}/traffic?timespan={2}&deviceType={3}'.format(str(base_url), str(networkid), str(timespan),
str(devicetype))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
dashboard = requests.get(geturl, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def getnonmerakivpnpeers(apikey, orgid, suppressprint=False):
#
# Confirm API Key has Admin Access Otherwise Raise Error
#
__hasorgaccess(apikey, orgid)
calltype = 'Non-Meraki VPN'
geturl = '{0}/organizations/{1}/thirdPartyVPNPeers'.format(str(base_url), str(orgid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
dashboard = requests.get(geturl, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def getsnmpsettings(apikey, orgid, suppressprint=False):
#
# Confirm API Key has Admin Access Otherwise Raise Error
#
__hasorgaccess(apikey, orgid)
calltype = 'SNMP Settings'
geturl = '{0}/organizations/{1}/snmp'.format(str(base_url), str(orgid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
dashboard = requests.get(geturl, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def getsamlroles(apikey, orgid, suppressprint=False):
#
# Confirm API Key has Admin Access Otherwise Raise Error
#
__hasorgaccess(apikey, orgid)
calltype = 'SAML Roles'
geturl = '{0}/organizations/{1}/samlRoles'.format(str(base_url), str(orgid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
dashboard = requests.get(geturl, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def getsamlroledetail(apikey, orgid, roleid, suppressprint=False):
#
# Confirm API Key has Admin Access Otherwise Raise Error
#
__hasorgaccess(apikey, orgid)
calltype = 'SAML Role Detail'
geturl = '{0}/organizations/{1}/samlRoles/{2}'.format(str(base_url), str(orgid), str(roleid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
dashboard = requests.get(geturl, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def getswitchstacks(apikey, networkid, suppressprint=False):
calltype = 'Switch Stacks'
geturl = '{0}/networks/{1}/switchStacks'.format(str(base_url), str(networkid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
dashboard = requests.get(geturl, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def getswitchstackmembers(apikey, networkid, stackid, suppressprint=False):
calltype = 'Switch Stack Members'
geturl = '{0}/networks/{1}/switchStacks/{2}'.format(str(base_url), str(networkid), str(stackid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
dashboard = requests.get(geturl, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def getswitchports(apikey, serialnum, suppressprint=False):
calltype = 'Switch Port'
geturl = '{0}/devices/{1}/switchPorts'.format(str(base_url), str(serialnum))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
dashboard = requests.get(geturl, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def getswitchportdetail(apikey, serialnum, portnum, suppressprint=False):
calltype = 'Switch Port Detail'
geturl = '{0}/devices/{1}/switchPorts/{2}'.format(str(base_url), str(serialnum), str(portnum))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
dashboard = requests.get(geturl, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def getssids(apikey, networkid, suppressprint=False):
calltype = 'SSID'
geturl = '{0}/networks/{1}/ssids'.format(str(base_url), str(networkid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
dashboard = requests.get(geturl, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def getssiddetail(apikey, networkid, ssidnum, suppressprint=False):
calltype = 'SSID Detail'
geturl = '{0}/networks/{1}/ssids/{2}'.format(str(base_url), str(networkid), str(ssidnum))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
dashboard = requests.get(geturl, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def getvlans(apikey, networkid, suppressprint=False):
calltype = 'VLANs'
geturl = '{0}/networks/{1}/vlans'.format(str(base_url), str(networkid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
dashboard = requests.get(geturl, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def getvlandetail(apikey, networkid, vlanid, suppressprint=False):
calltype = 'VLAN Detail'
geturl = '{0}/networks/{1}/vlans/{2}'.format(str(base_url), str(networkid), str(vlanid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
dashboard = requests.get(geturl, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def gettemplates(apikey, orgid, suppressprint=False):
#
# Confirm API Key has Admin Access Otherwise Raise Error
#
__hasorgaccess(apikey, orgid)
calltype = 'Templates'
geturl = '{0}/organizations/{1}/configTemplates'.format(str(base_url), str(orgid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
dashboard = requests.get(geturl, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def getclients(apikey, serialnum, timestamp=86400, suppressprint=False):
calltype = 'Device Clients'
geturl = '{0}/devices/{1}/clients?timespan={2}'.format(str(base_url), str(serialnum), str(timestamp))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
dashboard = requests.get(geturl, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def bindtotemplate(apikey, networkid, templateid, autobind=False, suppressprint=False):
calltype = 'Template Bind'
posturl = '{0}/networks/{1}/bind'.format(str(base_url), str(networkid))
postdata={}
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
postdata['configTemplateId'] = format(str(templateid))
postdata['autoBind'] = autobind
dashboard = requests.post(posturl, data=json.dumps(postdata), headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def adddevtonet(apikey, networkid, serial, suppressprint=False):
calltype = 'Device'
posturl = '{0}/networks/{1}/devices/claim'.format(str(base_url), str(networkid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
postdata = {
'serial': format(str(serial))
}
dashboard = requests.post(posturl, data=json.dumps(postdata), headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def claim(apikey, orgid, serial=None, licensekey=None, licensemode=None, orderid=None, suppressprint=False):
calltype = 'Claim'
posturl = '{0}/organization/{1}/claim'.format(str(base_url), str(orgid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
x = 0
postdata = {}
for x in [serial, licensekey, orderid]:
if x is None:
pass
else:
x += 1
if x > 1:
raise AttributeError('Mutiple identifiers passed, please pass only one of either serial number, license key, '
'or order ID')
if (licensekey is None and licensemode is not None) or (licensemode is None and licensekey is not None):
raise AttributeError('If claiming a license key both license and licensemode attributes must be passed')
if serial is not None:
postdata['serial'] = serial
elif licensekey is not None and licensemode is not None:
postdata['license'] = serial
postdata['licenseMode'] = serial
elif orderid is not None:
postdata['orderId'] = orderid
dashboard = requests.post(posturl, data=json.dumps(postdata), headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def unbindfromtemplate(apikey, networkid, suppressprint=False):
calltype = 'Network Unbind'
posturl = '{0}/networks/{1}/unbind'.format(str(base_url), str(networkid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
dashboard = requests.post(posturl, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def deltemplate(apikey, orgid, templateid, suppressprint=False):
#
# Confirm API Key has Admin Access Otherwise Raise Error
#
__hasorgaccess(apikey, orgid)
calltype = 'Template'
delurl = '{0}/organizations/{1}/configTemplates/{2}'.format(str(base_url), str(orgid), str(templateid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
dashboard = requests.delete(delurl, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def delsamlrole(apikey, orgid, roleid, suppressprint=False):
#
# Confirm API Key has Admin Access Otherwise Raise Error
#
__hasorgaccess(apikey, orgid)
calltype = 'SAML Role'
delurl = '{0}/organizations/{1}/samlRoles/{2}'.format(str(base_url), str(orgid), str(roleid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
dashboard = requests.delete(delurl, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def updatevlan(apikey, networkid, vlanid, vlanname=None, mxip=None, subnetip=None, suppressprint=False):
calltype = 'VLAN'
puturl = '{0}/networks/{1}/vlans/{2}'.format(str(base_url), str(networkid), str(vlanid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
putdata = {}
if vlanname is not None:
putdata['name'] = format(str(vlanname))
if mxip is not None:
putdata['applianceIp'] = format(str(mxip))
if subnetip is not None:
putdata['subnet'] = format(str(subnetip))
putdata = json.dumps(putdata)
dashboard = requests.put(puturl, data=putdata, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def addvlan(apikey, networkid, vlanid, vlanname, mxip, subnetip, suppressprint=False):
calltype = 'VLAN'
posturl = '{0}/networks/{1}/vlans'.format(str(base_url), str(networkid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
postdata = {
'id': format(str(vlanid)),
'name': format(str(vlanname)),
'applianceIp': format(str(mxip)),
'subnet': format(str(subnetip))
}
postdata = json.dumps(postdata)
dashboard = requests.post(posturl, data=postdata, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def delvlan(apikey, networkid, vlanid, suppressprint=False):
calltype = 'VLAN'
delurl = '{0}/networks/{1}/vlans/{2}'.format(str(base_url), str(networkid), str(vlanid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
dashboard = requests.delete(delurl, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def addadmin(apikey, orgid, email, name, orgaccess=None, tags=None, tagaccess=None, networks=None,
netaccess=None, suppressprint=False):
#
# Confirm API Key has Admin Access Otherwise Raise Error
#
__hasorgaccess(apikey, orgid)
calltype = 'Administrator'
posturl = '{0}/organizations/{1}/admins'.format(str(base_url), str(orgid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
posttags = []
if orgaccess is None and tags is None and networks is None:
print("Administrator accounts must be granted access to either an Organization, Networks, or Tags")
return None
if tags is not None and tagaccess is None:
print("If tags are defined you must define matching access arguments.\nFor example, tags = ['tag1', 'tag2'], "
"must have matching access arguments: tagaccess = 'full', 'read-only'")
return None
elif tagaccess is not None and tags is None:
print("If tag access levels are defined you must define matching tag arguments\nFor example, tags = "
"['tag1', 'tag2'] must have matching access arguments: tagaccess = 'full', 'read-only'")
return None
elif tagaccess is None and tags is None:
pass
elif len(tags) != len(tagaccess):
print("The number of tags and access arguments must match.\n")
print("For example, tags = ['tag1', 'tag2'] must have matching access arguments: tagaccess = "
"['full', 'read-only']")
return None
elif tags is not None and tagaccess is not None:
x = 0
while x < len(tags):
posttags.append({'tag': tags[x], 'access': tagaccess[x]})
x += 1
else:
pass
postnets = []
if networks is not None and netaccess is None:
print("If networks are defined you must define matching access arguments\nFor example networks = "
"['net1', 'net2'] must have matching access arguments: netaccess = 'full', 'read-only'")
return None
elif netaccess is not None and networks is None:
print("If network access levels are defined you must define matching network arguments\nFor example, networks"
" = ['net1', 'net2'] must have matching access arguments: netaccess = 'full', 'read-only'")
return None
elif netaccess is None and networks is None:
pass
elif len(networks) != len(netaccess):
print("The number of networks and access arguments must match.\n")
print("For example, networks = ['net1', 'net2'] must have matching access arguments: netaccess = "
"['full', 'read-only']")
return None
elif networks is not None and netaccess is not None:
x = 0
while x < len(networks):
postnets.append({'id': networks[x], 'access': netaccess[x]})
x += 1
else:
pass
postdata = []
if len(posttags) == 0 and len(postnets) == 0:
postdata = {
'orgAccess': orgaccess,
'email': format(str(email)),
'name': format(str(name))
}
elif len(posttags) > 0 and len(postnets) == 0:
postdata = {
'name': format(str(name)),
'email': format(str(email)),
'orgAccess': orgaccess,
'tags': posttags
}
elif len(postnets) > 0 and len(posttags) == 0:
postdata = {
'name': format(str(name)),
'email': format(str(email)),
'orgAccess': orgaccess,
'networks': postnets
}
elif len(postnets) > 0 and len(posttags) > 0:
postdata = {
'name': format(str(name)),
'email': format(str(email)),
'orgAccess': orgaccess,
'tags': posttags,
'networks': postnets
}
dashboard = requests.post(posturl, data=json.dumps(postdata), headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def deladmin(apikey, orgid, adminid, suppressprint=False):
#
# Confirm API Key has Admin Access Otherwise Raise Error
#
__hasorgaccess(apikey, orgid)
calltype = 'Administrator'
delurl = '{0}/organizations/{1}/admins/{2}'.format(str(base_url), str(orgid), str(adminid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
dashboard = requests.delete(delurl, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def addnetwork(apikey, orgid, name, nettype, tags, tz, suppressprint=False):
#
# Confirm API Key has Admin Access Otherwise Raise Error
#
__hasorgaccess(apikey, orgid)
calltype = 'Network'
posturl = '{0}/organizations/{1}/networks'.format(str(base_url), str(orgid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
__isvalidtz(tz)
postdata = {
'name': format(str(name)),
'type': format(str(nettype)),
'tags': format(str(tags)),
'timeZone': format(str(tz))
}
postdata = json.dumps(postdata)
dashboard = requests.post(posturl, data=postdata, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def delnetwork(apikey, networkid, suppressprint=False):
calltype = 'Network'
delurl = '{0}/networks/{1}'.format(str(base_url), str(networkid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
dashboard = requests.delete(delurl, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def updateadmin(apikey, orgid, adminid, email, name=None, orgaccess=None, tags=None, tagaccess=None,
networks=None, netaccess=None, suppressprint=False):
#
# Confirm API Key has Admin Access Otherwise Raise Error
#
__hasorgaccess(apikey, orgid)
calltype = 'Administrator'
puturl = '{0}/organizations/{1}/admins/{2}'.format(str(base_url), str(orgid), str(adminid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
puttags = []
if orgaccess is None and tags is None and networks is None and name is None:
print("Administrator account updates must include Organization, Networks, or Tags permission changes or an "
"updated name attribute")
return None
if tags is not None and tagaccess is None:
print("If tags are defined you must define matching access arguments.\nFor example, tags = ['tag1', 'tag2'], "
"must have matching access arguments: tagaccess = 'full', 'read-only'")
return None
elif tagaccess is not None and tags is None:
print("If tag access levels are defined you must define matching tag arguments\nFor example, tags = "
"['tag1', 'tag2'] must have matching access arguments: tagaccess = 'full', 'read-only'")
return None
elif tagaccess is None and tags is None:
pass
elif len(tags) != len(tagaccess):
print("The number of tags and access arguments must match.\n")
print("For example, tags = ['tag1', 'tag2'] must have matching access arguments: tagaccess = "
"['full', 'read-only']")
return None
elif tags is not None and tagaccess is not None:
x = 0
while x < len(tags):
puttags.append({'tag': tags[x], 'access': tagaccess[x]})
x += 1
else:
pass
putnets = []
if networks is not None and netaccess is None:
print("If networks are defined you must define matching access arguments\nFor example networks = "
"['net1', 'net2'] must have matching access arguments: netaccess = 'full', 'read-only'")
return None
elif netaccess is not None and networks is None:
print("If network access levels are defined you must define matching network arguments\nFor example, networks"
" = ['net1', 'net2'] must have matching access arguments: netaccess = 'full', 'read-only'")
return None
elif netaccess is None and networks is None:
pass
elif len(networks) != len(netaccess):
print("The number of networks and access arguments must match.\n")
print("For example, networks = ['net1', 'net2'] must have matching access arguments: netaccess = "
"['full', 'read-only']")
return None
elif networks is not None and netaccess is not None:
x = 0
while x < len(networks):
putnets.append({'id': networks[x], 'access': netaccess[x]})
x += 1
else:
pass
putdata = []
if name is not None:
if len(puttags) == 0 and len(putnets) == 0:
putdata = {
'orgAccess': orgaccess,
'email': format(str(email)),
'name': format(str(name))
}
elif len(puttags) > 0 and len(putnets) == 0:
putdata = {
'name': format(str(name)),
'email': format(str(email)),
'orgAccess': orgaccess,
'tags': puttags
}
elif len(putnets) > 0 and len(puttags) == 0:
putdata = {
'name': format(str(name)),
'email': format(str(email)),
'orgAccess': orgaccess,
'networks': putnets
}
elif len(putnets) > 0 and len(puttags) > 0:
putdata = {
'name': format(str(name)),
'email': format(str(email)),
'orgAccess': orgaccess,
'tags': puttags,
'networks': putnets
}
elif name is None:
if len(puttags) > 0 and len(putnets) == 0:
putdata = {
'email': format(str(email)),
'orgAccess': orgaccess,
'tags': puttags
}
elif len(putnets) > 0 and len(puttags) == 0:
putdata = {
'email': format(str(email)),
'orgAccess': orgaccess,
'networks': putnets
}
elif len(putnets) > 0 and len(puttags) > 0:
putdata = {
'email': format(str(email)),
'orgAccess': orgaccess,
'tags': puttags,
'networks': putnets
}
dashboard = requests.put(puturl, data=json.dumps(putdata), headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def getvpnsettings(apikey, networkid, suppressprint=False):
calltype = 'AutoVPN'
geturl = '{0}/networks/{1}/siteToSiteVpn'.format(str(base_url), str(networkid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
dashboard = requests.get(geturl, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def updatevpnsettings(apikey, networkid, mode='None', subnets=None, usevpn=None, hubnetworks=None, defaultroute=None,
suppressprint=False):
calltype = 'AutoVPN'
puturl = '{0}/networks/{1}/siteToSiteVpn'.format(str(base_url), str(networkid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
__comparelist(hubnetworks, defaultroute)
if hubnetworks is not None and defaultroute is not None:
hubmodes = zip(hubnetworks, defaultroute)
else:
hubmodes = []
__comparelist(subnets, usevpn)
vpnsubnets = list(zip(subnets, usevpn))
hubs = []
for h, d in hubmodes:
hubs.append({'hubId': h, 'useDefaultRoute': d})
subnets = []
for s, i in vpnsubnets:
__validsubnetip(s)
subnets.append({'localSubnet': s, 'useVpn': i})
putdata = {'mode': mode, 'hubs': hubs, 'subnets': subnets}
print(putdata)
putdata = json.dumps(putdata)
dashboard = requests.put(puturl, data=putdata, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def updatenonmerakivpn(apikey, orgid, names, ips, secrets, remotenets, tags=None, suppressprint=False):
#
# Function to update non-Meraki VPN peer information for an organization. This function will desctructively
# overwrite ALL existing peer information. If you only wish to add/update an existing peer you must download
# all current peer information and make re-upload the modified array of all peers
#
#
# Confirm API Key has Admin Access Otherwise Raise Error
#
__hasorgaccess(apikey, orgid)
calltype = 'Non-Meraki VPN'
puturl = '{0}/organizations/{1}/thirdPartyVPNPeers'.format(str(base_url), str(orgid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
#
# Will only upload peer information if lists are passed to the function, otherwise will fail. If tags argument is
# None will assume all peers should be available to all networks.
#
if isinstance(names, list) and isinstance(ips, list) and isinstance(secrets, list)\
and isinstance(remotenets, list) and (tags is None or isinstance(tags, list)):
if len(names) + len(ips) + len(secrets) + len(remotenets) / 4 != len(names):
warnings.warn('Peers will be added up to the length of the shortest list passed', ListLengthWarn)
if tags is None:
tags = []
for x in names:
tags.append(['all'])
for n in remotenets:
if isinstance(n, list):
for sn in n:
__validsubnetip(sn)
else:
__validsubnetip(n)
peerlist = list(zip(names, ips, secrets, remotenets, tags))
putdata = []
peer = {}
for n, i, s, r, t in peerlist:
peer['name'] = n
peer['publicIp'] = i
peer['privateSubnets'] = r
peer['secret'] = s
peer['tags'] = t
putdata.append((peer.copy()))
peer.clear()
else:
raise TypeError('All peer arguments must be passed as lists, tags argument may be excluded')
putdata = json.dumps(putdata)
dashboard = requests.put(puturl, data=putdata, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def getnonmerakivpn(apikey, orgid, suppressprint=False):
calltype = 'Non-Meraki VPN'
geturl = '{0}/organizations/{1}/thirdPartyVPNPeers'.format(str(base_url), str(orgid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
dashboard = requests.get(geturl, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def appendnonmerakivpn(apikey, orgid, names, ips, secrets, remotenets, tags=None, suppressprint=False):
#
# Function to update non-Meraki VPN peer information for an organization. This function will desctructively
# overwrite ALL existing peer information. If you only wish to add/update an existing peer you must download
# all current peer information and make re-upload the modified array of all peers
#
#
# Confirm API Key has Admin Access Otherwise Raise Error
#
__hasorgaccess(apikey, orgid)
calltype = 'Non-Meraki VPN'
puturl = '{0}/organizations/{1}/thirdPartyVPNPeers'.format(str(base_url), str(orgid))
geturl = '{0}/organizations/{1}/thirdPartyVPNPeers'.format(str(base_url), str(orgid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
currentpeers = json.loads(requests.get(geturl, headers=headers).text)
#
# Will only upload peer information if lists are passed to the function, otherwise will fail. If tags argument is
# None will assume all peers should be available to all networks.
#
if any(isinstance(el, list) for el in remotenets) is False:
remotenets = [remotenets]
warnings.warn('Variable remotenets was not passed as list of lists, it has been converted', ListLengthWarn)
if isinstance(names, list) and isinstance(ips, list) and isinstance(secrets, list)\
and isinstance(remotenets, list) and (tags is None or isinstance(tags, list)):
if len(names) + len(ips) + len(secrets) + len(remotenets) / 4 != len(names):
warnings.warn('Peers will be added up to the length of the shortest list passed', ListLengthWarn)
if tags is None:
tags = []
for x in names:
tags.append(['all'])
for n in remotenets:
if isinstance(n, list):
for sn in n:
__validsubnetip(sn)
else:
__validsubnetip(n)
peerlist = list(zip(names, ips, secrets, remotenets, tags))
putdata = []
peer = {}
for n, i, s, r, t in peerlist:
peer['name'] = n
peer['publicIp'] = i
peer['privateSubnets'] = r
peer['secret'] = s
peer['tags'] = t
putdata.append((peer.copy()))
peer.clear()
for x in currentpeers:
peer['name'] = x['name']
peer['publicIp'] = x['publicIp']
peer['privateSubnets'] = x['privateSubnets']
peer['secret'] = x['secret']
peer['tags'] = x['tags']
putdata.append((peer.copy()))
peer.clear()
else:
raise TypeError('All peer arguments must be passed as lists, tags argument may be excluded')
putdata = json.dumps(putdata)
dashboard = requests.put(puturl, data=putdata, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def updatesnmpsettings(apikey, orgid, v2c=False, v3=False, v3authmode='SHA', v3authpw=None, v3privmode='AES128',
v3privpw=None, allowedips=None, suppressprint=False):
#
# Confirm API Key has Admin Access Otherwise Raise Error
#
__hasorgaccess(apikey, orgid)
calltype = 'SNMP'
puturl = '{0}/organizations/{1}/snmp'.format(str(base_url), str(orgid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
putdata = {}
if v3authmode not in ['SHA', 'MD5']:
raise ValueError('Valid authentication modes are "SHA" or "MD5"')
if v3privmode not in ['DES', 'AES128']:
raise ValueError('Valid privacy modes are "DES" and "AES128"')
if v3 and (v3authpw is None or v3privpw is None):
raise ValueError('If SNMPv3 is enabled a authentication and privacy password must be provided')
elif v3 and (len(v3authpw) < 8 or len(v3privpw) < 8):
raise ValueError('Authentication and privacy passwords must be a minimum of 8 characters')
elif v3:
putdata['v3AuthMode'] = v3authmode
putdata['v3AuthPass'] = v3authpw
putdata['v3PrivMode'] = v3privmode
putdata['v3PrivPass'] = v3privpw
putdata['v2cEnabled'] = v2c
putdata['v3Enabled'] = v3
if allowedips is not None:
if isinstance(allowedips, list):
allowiplist = str(allowedips[0])
__validip(allowiplist)
if len(allowedips) > 1:
for i in allowedips[1:]:
__validip(str(i))
allowiplist = allowiplist + ':' + i
else:
__validip(str(allowedips))
allowiplist = str(allowedips)
putdata['peerIps'] = allowiplist
else:
putdata['peerIps'] = None
putdata = json.dumps(putdata)
dashboard = requests.put(puturl, data=putdata, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def removedevfromnet(apikey, networkid, serial, suppressprint=False):
calltype = 'Device'
posturl = '{0}/networks/{1}/devices/{2}/remove'.format(str(base_url), str(networkid), str(serial))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
dashboard = requests.post(posturl, headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def addorg(apikey, neworgname, suppressprint=False):
calltype = 'Organization'
posturl = '{0}/organizations/'.format(str(base_url))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
postdata = {
'name': format(str(neworgname))
}
dashboard = requests.post(posturl, data=json.dumps(postdata), headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def cloneorg(apikey, orgid, neworgname, suppressprint=False):
__hasorgaccess(apikey, orgid)
calltype = 'Organization Clone'
posturl = '{0}/organizations/{1}/clone'.format(str(base_url), str(orgid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
postdata = {
'name': format(str(neworgname))
}
dashboard = requests.post(posturl, data=json.dumps(postdata), headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def renameorg(apikey, orgid, neworgname, suppressprint=False):
__hasorgaccess(apikey, orgid)
calltype = 'Organization Rename'
puturl = '{0}/organizations/{1}'.format(str(base_url), str(orgid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
putdata = {
'name': format(str(neworgname))
}
dashboard = requests.put(puturl, data=json.dumps(putdata), headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def updatenetwork(apikey, networkid, name, tz, tags, suppressprint=False):
calltype = 'Network'
puturl = '{0}/organizations/{1}'.format(str(base_url), str(networkid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
putdata = {}
if name:
putdata['name'] = name
if tz:
__isvalidtz(tz)
putdata['timeZone'] = format(str(tz))
if tags:
putdata['tags'] = __listtotag(tags)
dashboard = requests.put(puturl, data=json.dumps(putdata), headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def updatedevice(apikey, networkid, sn, name, tags, lat, lng, address, suppressprint=False):
calltype = 'Device'
posturl = '{0}/networks/{1}/devices/{2}'.format(str(base_url), str(networkid), str(sn))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
putdata = {}
if name:
putdata['name'] = name
if tags:
putdata['tags'] = __listtotag(tags)
if lat and not lng:
raise ValueError('If latitude is entered a longitude value must also be entered')
elif lng and not lat:
raise ValueError('If longitude is entered a latitude value must also be entered')
elif lat and lng:
putdata['lat'] = lat
putdata['lng'] = lng
if address:
putdata['address'] = address
dashboard = requests.put(posturl, data=json.dumps(putdata), headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def updatessid(apikey, networkid, ssidnum, name, enabled, authmode, encryptionmode, psk, suppressprint=False):
calltype = 'SSID'
puturl = '{0}/networks/{1}/ssids/{2}'.format(str(base_url), str(networkid), str(ssidnum))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
putdata = {}
if name:
putdata['name'] = str(name)
if enabled and (enabled is not False or not True):
raise ValueError("Enabled must be a boolean variable")
else:
putdata['enabled'] = str(enabled)
if authmode not in ['psk', 'open']:
raise ValueError("Authentication mode must be psk or open")
elif authmode == 'psk' and (not encryptionmode or not psk):
raise ValueError("If authentication mode is set to psk, encryption mode and psk must also be passed")
elif authmode == 'open' and (encryptionmode or psk):
warnings.warn(IgnoredArgument("If authentication mode is open, encryption mode and psk will be ignored"))
elif authmode:
putdata['authMode'] = str(authmode)
if encryptionmode and (authmode is not 'psk' or not psk or not authmode):
raise ValueError("If encryption mode is passed, authentication mode must be psk and psk must also be passed")
elif encryptionmode:
putdata['encryptionMode'] = str(encryptionmode)
if psk and (authmode is not 'psk' or not encryptionmode or not authmode):
raise ValueError("If psk is passed, authentication mode and encryption mode must also be passed")
elif len(psk) < 8 and encryptionmode == 'wpa':
raise ValueError("If encryption mode is wpa, the psk must be a minimum of 8 characters")
elif psk:
putdata['psk'] = str(psk)
dashboard = requests.put(puturl, data=json.dumps(putdata), headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def updateswitchport(apikey, serialnum, portnum, name, tags, enabled, porttype, vlan, voicevlan, allowedvlans, poe,
isolation, rstp, stpguard, accesspolicynum, suppressprint=False):
calltype = 'Switch Port'
puturl = '{0}/devices/{1}/switchPorts/{2}'.format(str(base_url), str(serialnum), str(portnum))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
putdata = {}
if name:
putdata['name'] = str(name)
if tags:
putdata['tags'] = __listtotag(tags)
putdata['enabled'] = str(enabled)
if porttype and porttype not in ['access', 'trunk', 'TRUNK', 'ACCESS']:
raise ValueError("Type must be either 'access' or 'trunk'")
elif porttype:
putdata['type'] = str(porttype)
if vlan:
putdata['vlan'] = str(vlan)
if voicevlan:
putdata['voiceVlan'] = voicevlan
if allowedvlans:
putdata['allowedVlans'] = allowedvlans
putdata['poeEnabled'] = str(poe)
putdata['isolation'] = isolation
putdata['rstpEnabled'] = rstp
if stpguard and stpguard not in ['disabled', 'root guard', 'BPDU guard']:
raise ValueError("Valid values for STP Guard are 'disabled', 'root guard', or 'BPDU Guard'")
elif stpguard:
putdata['stpGuard'] = stpguard
if accesspolicynum:
putdata['accessPolicyNumber'] = accesspolicynum
dashboard = requests.put(puturl, data=json.dumps(putdata), headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def addsamlrole(apikey, orgid, rolename, orgaccess, tags, tagaccess, networks, netaccess, suppressprint=False):
#
# Confirm API Key has Admin Access Otherwise Raise Error
#
__hasorgaccess(apikey, orgid)
calltype = 'SAML Role'
posturl = '{0}/organizations/{1}/samlRoles'.format(str(base_url), str(orgid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
if not orgaccess and not tags and not networks:
raise AttributeError("At least one of organization access, tag based access, or network based access must be "
"defined")
if orgaccess and orgaccess not in ['read-only', 'full', 'none']:
raise ValueError("Organization access must be either 'read-only' or 'full' or 'none'")
posttags = []
taglist = False
if (tags and not tagaccess) or (tagaccess and not tags):
raise AttributeError("Both tags and tag access lists must be passed if tag based permissions are defined")
elif tags and tagaccess:
taglist = True
if taglist is True:
tagcompare = __comparelist(tags, tagaccess)
if tagcompare == 2:
warnings.warn(ListLengthWarn("Tags and tag access list are not the same length, lists will be joined to "
"the shortest length list"))
tagzip = zip(tags, tagaccess)
for t, ta in tagzip:
posttags.append({'tag': t, 'access': ta})
elif tagcompare == 0:
tagzip = zip(tags, tagaccess)
for t, ta in tagzip:
posttags.append({'tag': t, 'access': ta})
postnets = []
netlist = False
if (networks and not netaccess) or (netaccess and not networks):
raise AttributeError("Both network and network access lists must be passed if network based permissions "
"are defined")
elif networks and netaccess:
netlist = True
if netlist is True:
netcompare = __comparelist(networks, netaccess)
if netcompare == 2:
warnings.warn(ListLengthWarn("Networks and tag access list are not the same length, lists will be joined to"
" the shortest length list"))
netzip = zip(networks, netaccess)
for n, na in netzip:
postnets.append({'id': n, 'access': na})
elif netcompare == 0:
netzip = zip(networks, netaccess)
for n, na in netzip:
postnets.append({'id': n, 'access': na})
postdata = {}
if not rolename:
raise ValueError("Role name must be passed for role creation")
else:
postdata['role'] = str(rolename)
if orgaccess:
postdata['orgAccess'] = str(orgaccess)
if taglist is True:
postdata['tags'] = posttags
if netlist is True:
postdata['networks'] = postnets
dashboard = requests.post(posturl, data=json.dumps(postdata), headers=headers)
#
# Call return handler function to parse Dashboard response
#
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result
def updatesamlrole(apikey, orgid, roleid, rolename, orgaccess, tags, tagaccess, networks, netaccess,
suppressprint=False):
#
# Confirm API Key has Admin Access Otherwise Raise Error
#
__hasorgaccess(apikey, orgid)
calltype = 'SAML Role'
puturl = '{0}/organizations/{1}/samlRoles/{2}'.format(str(base_url), str(orgid), str(roleid))
headers = {
'x-cisco-meraki-api-key': format(str(apikey)),
'Content-Type': 'application/json'
}
if orgaccess and orgaccess not in ['read-only', 'full', 'none']:
raise ValueError("Organization access must be either 'read-only' or 'full' or 'none")
puttags = []
taglist = False
if (tags and not tagaccess) or (tagaccess and not tags):
raise AttributeError("Both tags and tag access lists must be passed if tag based permissions are defined")
elif tags and tagaccess:
taglist = True
if taglist is True:
tagcompare = __comparelist(tags, tagaccess)
if tagcompare == 2:
warnings.warn(ListLengthWarn("Tags and tag access list are not the same length, lists will be joined to "
"the shortest length list"))
tagzip = zip(tags, tagaccess)
for t, ta in tagzip:
puttags.append({'tag': t, 'access': ta})
elif tagcompare == 0:
tagzip = zip(tags, tagaccess)
for t, ta in tagzip:
puttags.append({'tag': t, 'access': ta})
putnets = []
netlist = False
if (networks and not netaccess) or (netaccess and not networks):
raise AttributeError("Both network and network access lists must be passed if network based permissions "
"are defined")
elif networks and netaccess:
netlist = True
if netlist is True:
netcompare = __comparelist(networks, netaccess)
if netcompare == 2:
warnings.warn(ListLengthWarn("Networks and tag access list are not the same length, lists will be joined to"
" the shortest length list"))
netzip = zip(networks, netaccess)
for n, na in netzip:
putnets.append({'id': n, 'access': na})
elif netcompare == 0:
netzip = zip(networks, netaccess)
for n, na in netzip:
putnets.append({'id': n, 'access': na})
roledata = {}
if rolename:
roledata['role'] = str(rolename)
if orgaccess:
roledata['orgAccess'] = str(orgaccess)
if taglist is True:
roledata['tags'] = puttags
if netlist is True:
roledata['networks'] = putnets
putdata = [roledata]
print(roledata, putdata, sep='\n')
dashboard = requests.put(puturl, data=json.dumps(roledata), headers=headers)
result = __returnhandler(dashboard.status_code, dashboard.text, calltype, suppressprint)
return result | true | true |
f7f481bc75236984cfe082cef91a414189fb19b0 | 3,509 | py | Python | evaluation/parameterstudy.py | ChristianSchorr/InertialFlowCutter | baac26aa394e6bb58ed43d122b820dd963cfb303 | [
"BSD-3-Clause"
] | 14 | 2019-07-03T00:20:01.000Z | 2022-03-15T02:28:58.000Z | evaluation/parameterstudy.py | ChristianSchorr/InertialFlowCutter | baac26aa394e6bb58ed43d122b820dd963cfb303 | [
"BSD-3-Clause"
] | 9 | 2019-07-04T08:32:16.000Z | 2021-07-09T09:19:47.000Z | evaluation/parameterstudy.py | ChristianSchorr/InertialFlowCutter | baac26aa394e6bb58ed43d122b820dd963cfb303 | [
"BSD-3-Clause"
] | 9 | 2019-07-03T00:20:53.000Z | 2022-03-22T11:41:25.000Z | import configurable_inertialflowcutter_order as ifc
import pandas as pd
import numpy as np
import re
import subprocess
import os
experiments_folder = ""
graph = "col" #TODO replace again with europe
graph_path = experiments_folder + graph + "/"
metric_path = graph_path + "travel_time"
query_sources = experiments_folder + graph + ".q.s"
query_targets = experiments_folder + graph + ".q.t"
binary_path = "./../build/"
order_console = binary_path + "console"
customization_binary = binary_path + "customize"
query_binary = binary_path + "query"
def config_contained(config, results):
cpd = pd.DataFrame([config._asdict()])
return len(cpd.merge(results)) > 0
def config_to_string(config):
return '.'.join(map(str,config))
def order_path(config):
return experiments_folder + "parameterstudy/" + graph + "." + config_to_string(config) + ".order"
def log_path(config):
return order_path(config) + ".log"
def parse_order_log(config):
log = open(log_path(config))
row_dict = dict()
for l in log:
m = re.match(r"^\s*([a-zA-Z_ ]+) : ([0-9.]+)[^0-9]*$", l)
assert(m)
name = m.group(1).replace(" ", "_")
value = m.group(2)
if '.' in value:
value = float(value)
else:
value = int(value)
if "running_time" in name:
name = "order_running_time"
value /= 1000000 #in seconds
row_dict[name] = value
return row_dict
def run_customizations(config):
args = [customization_binary, graph_path + "first_out", graph_path + "head", order_path(config), metric_path, str(1)]
runtimes = []
for i in range(9):
t = subprocess.check_output(args, universal_newlines=True)
runtimes.append(float(t) / 1000) #in ms
return np.median(np.array(runtimes))
def run_queries(config):
args = [query_binary, graph_path + "first_out", graph_path + "head", order_path(config), metric_path, query_sources, query_targets]
t = subprocess.check_output(args, universal_newlines=True)
return float(t)
def main():
configs = pd.read_csv(experiments_folder + "parameterstudy_configs.csv")
if not os.path.isfile(experiments_folder + "parameterstudy.csv"):
x = pd.DataFrame(columns=["geo_distance_cutters","hop_distance_cutters","initial_assimilated_fraction","bulk_step_fraction","bulk_assimilation_order_threshold","bulk_assimilation_threshold"])
x.to_csv(experiments_folder + "parameterstudy.csv", index=False)
results = pd.read_csv(experiments_folder + "parameterstudy.csv")
for config in configs.itertuples(index=False):
if not config_contained(config, results):
print("computing order with config", config)
ifc.save_inertialflowcutter_cch_order(config, order_console, graph_path, order_path(config), log_path(config))
row_dict = config._asdict()
row_dict.update(parse_order_log(config))
print("running customization")
row_dict["median_customization_time"] = run_customizations(config)
print("running queries")
row_dict["avg_query_time"] = run_queries(config)
print(row_dict)
results = results.append(pd.DataFrame([row_dict]), ignore_index=True)
results.sort_values([x for x in configs.columns], ascending=[True for i in configs.columns], inplace=True)
results.to_csv(experiments_folder + "parameterstudy.csv", index=False) #careful
if __name__ == '__main__':
main()
| 39.426966 | 199 | 0.680536 | import configurable_inertialflowcutter_order as ifc
import pandas as pd
import numpy as np
import re
import subprocess
import os
experiments_folder = ""
graph = "col"
graph_path = experiments_folder + graph + "/"
metric_path = graph_path + "travel_time"
query_sources = experiments_folder + graph + ".q.s"
query_targets = experiments_folder + graph + ".q.t"
binary_path = "./../build/"
order_console = binary_path + "console"
customization_binary = binary_path + "customize"
query_binary = binary_path + "query"
def config_contained(config, results):
cpd = pd.DataFrame([config._asdict()])
return len(cpd.merge(results)) > 0
def config_to_string(config):
return '.'.join(map(str,config))
def order_path(config):
return experiments_folder + "parameterstudy/" + graph + "." + config_to_string(config) + ".order"
def log_path(config):
return order_path(config) + ".log"
def parse_order_log(config):
log = open(log_path(config))
row_dict = dict()
for l in log:
m = re.match(r"^\s*([a-zA-Z_ ]+) : ([0-9.]+)[^0-9]*$", l)
assert(m)
name = m.group(1).replace(" ", "_")
value = m.group(2)
if '.' in value:
value = float(value)
else:
value = int(value)
if "running_time" in name:
name = "order_running_time"
value /= 1000000
row_dict[name] = value
return row_dict
def run_customizations(config):
args = [customization_binary, graph_path + "first_out", graph_path + "head", order_path(config), metric_path, str(1)]
runtimes = []
for i in range(9):
t = subprocess.check_output(args, universal_newlines=True)
runtimes.append(float(t) / 1000)
return np.median(np.array(runtimes))
def run_queries(config):
args = [query_binary, graph_path + "first_out", graph_path + "head", order_path(config), metric_path, query_sources, query_targets]
t = subprocess.check_output(args, universal_newlines=True)
return float(t)
def main():
configs = pd.read_csv(experiments_folder + "parameterstudy_configs.csv")
if not os.path.isfile(experiments_folder + "parameterstudy.csv"):
x = pd.DataFrame(columns=["geo_distance_cutters","hop_distance_cutters","initial_assimilated_fraction","bulk_step_fraction","bulk_assimilation_order_threshold","bulk_assimilation_threshold"])
x.to_csv(experiments_folder + "parameterstudy.csv", index=False)
results = pd.read_csv(experiments_folder + "parameterstudy.csv")
for config in configs.itertuples(index=False):
if not config_contained(config, results):
print("computing order with config", config)
ifc.save_inertialflowcutter_cch_order(config, order_console, graph_path, order_path(config), log_path(config))
row_dict = config._asdict()
row_dict.update(parse_order_log(config))
print("running customization")
row_dict["median_customization_time"] = run_customizations(config)
print("running queries")
row_dict["avg_query_time"] = run_queries(config)
print(row_dict)
results = results.append(pd.DataFrame([row_dict]), ignore_index=True)
results.sort_values([x for x in configs.columns], ascending=[True for i in configs.columns], inplace=True)
results.to_csv(experiments_folder + "parameterstudy.csv", index=False)
if __name__ == '__main__':
main()
| true | true |
f7f4820a4bb7a25a72c3ec091c69573d08929fa5 | 124 | py | Python | main.py | ReanGD/docker-x11-chrome | 13bace121b3580b57228ead0adb3aea6759f7cc5 | [
"Apache-2.0"
] | null | null | null | main.py | ReanGD/docker-x11-chrome | 13bace121b3580b57228ead0adb3aea6759f7cc5 | [
"Apache-2.0"
] | null | null | null | main.py | ReanGD/docker-x11-chrome | 13bace121b3580b57228ead0adb3aea6759f7cc5 | [
"Apache-2.0"
] | null | null | null | # import one_neuron
import digits
import digits2
import utils
# one_neuron.run()
# digits.run()
digits2.run()
# utils.run() | 13.777778 | 19 | 0.741935 |
import digits
import digits2
import utils
digits2.run()
| true | true |
f7f482792f236c165342ca6efea07fc1651d1130 | 25,632 | py | Python | tensorflow/python/eager/wrap_function.py | abhaikollara/tensorflow | 4f96df3659696990cb34d0ad07dc67843c4225a9 | [
"Apache-2.0"
] | 27 | 2020-02-29T04:13:22.000Z | 2022-02-07T21:54:50.000Z | tensorflow/python/eager/wrap_function.py | abhaikollara/tensorflow | 4f96df3659696990cb34d0ad07dc67843c4225a9 | [
"Apache-2.0"
] | 6 | 2022-01-15T07:17:47.000Z | 2022-02-14T15:28:22.000Z | tensorflow/python/eager/wrap_function.py | abhaikollara/tensorflow | 4f96df3659696990cb34d0ad07dc67843c4225a9 | [
"Apache-2.0"
] | 10 | 2020-12-15T03:55:24.000Z | 2021-12-17T23:14:11.000Z | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=unidiomatic-typecheck
"""Prototype decorator for defining legacy-graph-mode functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import weakref
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.protobuf import struct_pb2
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.eager import lift_to_graph
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import func_graph
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import nested_structure_coder
from tensorflow.python.training.tracking import data_structures
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
class VariableHolder(object):
"""Holds variables for a python function."""
def __init__(self, fn=None, share_variables=False):
self._fn = fn
self._share_variables = share_variables
self._variables_by_name = data_structures.Mapping()
@property
def variables(self):
return self._variables_by_name
def variable_creator_scope(self, next_creator, **kwargs):
"""Creates variables & adds them to collections to match legacy code."""
collections = kwargs.pop("collections", None)
v = None
# Get expected variable name.
with ops.name_scope(kwargs.get("name", None), "Variable") as name:
variable_name = ops.name_from_scope_name(name)
kwargs["name"] = name
if self._share_variables:
v = self._variables_by_name.get(variable_name, None)
if v is None:
v = next_creator(**kwargs)
self._variables_by_name[variable_name] = v
if collections is None:
collections = [ops.GraphKeys.GLOBAL_VARIABLES]
if v.trainable and ops.GraphKeys.TRAINABLE_VARIABLES not in collections:
collections = list(collections) + [ops.GraphKeys.TRAINABLE_VARIABLES]
ops.add_to_collections(collections, v)
return v
def __call__(self, *args, **kwargs):
return self.call_with_variable_creator_scope(self._fn)(*args, **kwargs)
def call_with_variable_creator_scope(self, fn):
def wrapped(*args, **kwargs):
with variable_scope.variable_creator_scope(self.variable_creator_scope):
return fn(*args, **kwargs)
return wrapped
def _get_element_from_tensor_info(tensor_info, graph):
"""Simplified copy of the deprecated `get_tensor_from_tensor_info`."""
encoding = tensor_info.WhichOneof("encoding")
if encoding == "name":
# We may get operations here in some cases. TensorInfo is a bit of a
# misnomer if so.
return graph.as_graph_element(tensor_info.name)
elif encoding == "coo_sparse":
return sparse_tensor.SparseTensor(
graph.get_tensor_by_name(tensor_info.coo_sparse.indices_tensor_name),
graph.get_tensor_by_name(tensor_info.coo_sparse.values_tensor_name),
graph.get_tensor_by_name(
tensor_info.coo_sparse.dense_shape_tensor_name))
elif encoding == "composite_tensor":
struct_coder = nested_structure_coder.StructureCoder()
spec_proto = struct_pb2.StructuredValue(
type_spec_value=tensor_info.composite_tensor.type_spec)
spec = struct_coder.decode_proto(spec_proto)
components = [graph.get_tensor_by_name(component.name) for component in
tensor_info.composite_tensor.components]
return spec._from_components(components) # pylint: disable=protected-access
else:
raise ValueError("Invalid TensorInfo.encoding: %s" % encoding)
def _lift_single_variable(old_variable, graph, variable_holder):
"""Lifts `old_variable` out of the `FuncGraph` `graph`."""
new_variable = resource_variable_ops.UninitializedVariable(
shape=old_variable.shape,
dtype=old_variable.dtype,
name=old_variable.op.name,
trainable=old_variable.trainable,
extra_handle_data=old_variable.handle)
new_variable._initializer_op = old_variable._initializer_op # pylint: disable=protected-access
graph.add_capture(new_variable.handle, old_variable.handle)
# Now that we've added the new variable to graph.captures,
# graph.capture will use that cached value and do some post-processing
# on the capture like recording it on the tape.
graph.capture(new_variable.handle)
# pylint: disable=protected-access
variable_name = new_variable.name.split(":")[0]
variable_holder._variables_by_name[variable_name] = new_variable
graph._weak_variables.append(weakref.ref(new_variable))
# pylint: enable=protected-access
graph.watch_variable(new_variable)
return new_variable
def _lift_unlifted_variables(graph, variable_holder):
"""Finds resource variables and lifts them into the outer context.
When we import a GraphDef inside a wrap_function, no Python graph building
code runs. This means we get VarHandleOps which create variable resources,
but no corresponding Python objects. Leaving them like this works but gives
the user no way to interact with or modify the variables outside the graph.
This method searches for variables and lifts them out as regular variable
objects when possible, indicating to the FuncGraph that they are captures.
Args:
graph: The FuncGraph to lift variables from.
variable_holder: A VariableHolder to record the lifted variables in.
"""
with graph.as_default():
global_collection_variables = ops.get_collection(
ops.GraphKeys.GLOBAL_VARIABLES)
local_collection_variables = ops.get_collection(
ops.GraphKeys.LOCAL_VARIABLES)
existing_captures = {id(c) for c in graph.internal_captures}
lifted_variables = {}
def _should_lift_variable(v):
return ((v._in_graph_mode # pylint: disable=protected-access
and v.graph.building_function)
and isinstance(v, resource_variable_ops.BaseResourceVariable)
and id(v.handle) not in existing_captures)
for old_variable in global_collection_variables:
if _should_lift_variable(old_variable):
new_variable = _lift_single_variable(
old_variable, graph, variable_holder)
lifted_variables[id(old_variable)] = new_variable
existing_captures.add(id(old_variable.handle))
for old_variable in local_collection_variables:
if _should_lift_variable(old_variable):
new_variable = _lift_single_variable(
old_variable, graph, variable_holder)
lifted_variables[id(old_variable)] = new_variable
existing_captures.add(id(old_variable.handle))
if new_variable._in_graph_mode: # pylint: disable=protected-access
outer_graph = new_variable.graph
# Variables are added to the global collection by default. In this
# case we only want the variable in the local collection, so we'll pop
# it out.
global_collection = outer_graph.get_collection_ref(
ops.GraphKeys.GLOBAL_VARIABLES)
global_collection.remove(new_variable)
outer_graph.add_to_collection(
ops.GraphKeys.LOCAL_VARIABLES, new_variable)
# Update the FuncGraph's collections, partly for the user and partly so this
# function is idempotent when it runs again in prune() calls.
for collection_name in [
ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.LOCAL_VARIABLES
]:
mutable_collection = ops.get_collection_ref(collection_name)
for index, current in enumerate(mutable_collection):
mutable_collection[index] = lifted_variables.get(id(current), current)
if not resource_variable_ops.is_resource_variable(
mutable_collection[index]):
logging.log_first_n(
logging.WARN,
"Unable to create a python object for variable {} because it is "
"a reference variable. It may not be visible to training APIs. "
"If this is a problem, consider rebuilding the SavedModel after "
"running tf.compat.v1.enable_resource_variables().".format(
mutable_collection[index]),
5)
# TODO(allenl): make this trackable
class WrappedFunction(function.ConcreteFunction):
"""Wraps a tf V1 piece of code in a function."""
def __init__(self, fn_graph, variable_holder, attrs=None, signature=None):
self._variable_holder = variable_holder
_lift_unlifted_variables(fn_graph, variable_holder)
# We call __init__ after lifting variables so that the function's signature
# properly reflects the new captured inputs.
for f in fn_graph.as_graph_def().library.function:
context.context().add_function_def(f)
super(WrappedFunction, self).__init__(
fn_graph, attrs=attrs, signature=signature)
def prune(self, feeds, fetches, name=None, input_signature=None):
"""Extract a subgraph of this function's underlying graph.
Wraps the subgraph in a new `WrappedFunction` object.
Args:
feeds: Input tensors to the subgraph to extract, as `Tensor` objects.
fetches: Possibly-nested Python data structure containing information
about outputs of the target subgraph. Each entry can either be a
`Tensor` object (for data outputs), an `Operation` object (for control
outputs), or a `TensorInfo` proto. Any additional shape/dtype
information provided in a `TensorInfo` and not present in the original
graph will be added to the returned subgraph.
name: (optional) Name to give to the underlying `FuncGraph` of the
returned object. If no name is provided, the graph's name will be
`"pruned"`.
input_signature: (optional) possibly-nested Python data structure
containing `TensorSpec` objects, with which to populate the returned
functions's `FuncGraph`'s `structured_input_signature` field.
Returns:
A new `WrappedFunction` object containing a copy of the portion of this
object's graph that goes from `feeds` to `fetches`.
"""
# TODO(b/129646028): Add support for CompositeTensors.
name = name or "pruned"
flat_feeds = nest.flatten(feeds, expand_composites=True)
flat_feeds = [self.graph.as_graph_element(t) for t in flat_feeds]
for f in flat_feeds:
if not isinstance(f, ops.Tensor):
raise ValueError("Feeds must be tensors.")
# Ignoring all feeds that are captures allows prune to be called
# using wrapped_func.inputs even when it uses variables
internal_captures = {id(c) for c in self.graph.internal_captures}
flat_feeds = [f for f in flat_feeds if id(f) not in internal_captures]
operation_fetches = []
tensor_fetches = []
tensor_infos = []
def _fetch_preprocesing_callback(fetch):
"""Extract out lists of ops, tensors, and tensor type info.
Turns TensorInfos into Tensors in the original `fetches` structure.
Also extracts ops from `fetches`.
Args:
fetch: The fetch to preprocess: Tensor, TensorInfo, or Operation, or
string identifying a Tensor or Operation.
Returns:
`fetch` converted to a Tensor.
"""
if isinstance(fetch, ops.Operation):
operation_fetches.append(fetch)
return fetch
elif isinstance(fetch, meta_graph_pb2.TensorInfo):
tensor_infos.append(fetch)
decoded = _get_element_from_tensor_info(fetch, self._func_graph)
if (tensor_util.is_tensor(decoded) or
isinstance(decoded, composite_tensor.CompositeTensor)):
tensor_fetches.append(decoded)
else:
operation_fetches.append(decoded)
return decoded
elif isinstance(fetch, (ops.Tensor, composite_tensor.CompositeTensor)):
tensor_fetches.append(fetch)
return fetch
else:
graph_element = self.graph.as_graph_element(fetch)
return _fetch_preprocesing_callback(graph_element)
fetches = nest.map_structure(_fetch_preprocesing_callback, fetches)
# Expand composite tensors into their component dense Tensors.
tensor_fetches = nest.flatten(tensor_fetches, expand_composites=True)
for f in (flat_feeds + tensor_fetches + operation_fetches):
if f.graph is not self._func_graph:
raise ValueError("Can only prune function whose feeds and fetches "
"are from this graph (%s). Input %s is from graph %s" %
(self._func_graph, f, f.graph))
with self._func_graph.as_default():
pruned_graph = func_graph.FuncGraph(name)
lift_map = lift_to_graph.lift_to_graph(
operation_fetches + tensor_fetches,
pruned_graph,
sources=flat_feeds + self.graph.internal_captures)
# Note that we add the component tensors of any composite tensors to the
# returned function's outputs list; the list must contain these component
# tensors, or the function's sparse outputs won't work properly.
pruned_graph.outputs.extend(lift_map[x] for x in tensor_fetches)
pruned_graph.control_outputs.extend(
[lift_map[operation] for operation in operation_fetches])
pruned_graph.inputs.extend(lift_map[x] for x in flat_feeds)
for external_capture, internal_capture in self.graph.captures:
pruned_graph.add_capture(external_capture, lift_map[internal_capture])
for ti in tensor_infos:
if ti.WhichOneof("encoding") == "name": # Dense tensors only
t = pruned_graph.as_graph_element(ti.name)
if tensor_util.is_tensor(t):
t.set_shape(tensor_shape.TensorShape(ti.tensor_shape))
# pylint: disable=protected-access
for f in self.graph._functions.values():
pruned_graph._add_function(f)
# pylint: enable=protected-access
pruned_graph.variables = self.graph.variables
def _structured_output_mapping(fetched):
"""callback for `nest.map_structure()`"""
lifted = lift_map[fetched]
if isinstance(lifted, ops.Operation):
return None
return lifted
# expand_composites=True here causes composite tensors to be expanded
# into their component dense Tensors, mapped to the new graph, and then
# reconstituted into their original composite form.
pruned_graph.structured_outputs = nest.map_structure(
_structured_output_mapping, fetches, expand_composites=True)
pruned_graph.structured_input_signature = input_signature
pruned_fn = WrappedFunction(
pruned_graph, variable_holder=self._variable_holder)
pruned_fn._num_positional_args = len(flat_feeds) # pylint: disable=protected-access
# TODO(kathywu): Enable keyword arguments if an input signature is specified
pruned_fn._arg_keywords = [tensor.op.name for tensor in flat_feeds] # pylint: disable=protected-access
return pruned_fn
def _filter_returned_ops(fn):
"""Filtering out any ops returned by function.
Args:
fn: a function
Returns:
A tuple of (
Wrapped function that returns `None` in place of any ops,
dict that maps the index in the flat output structure to the returned op
)
"""
returned_ops = {}
def wrap_and_filter_returned_ops(*args, **kwargs):
outputs = fn(*args, **kwargs)
flat_outputs = nest.flatten(outputs)
for n in range(len(flat_outputs)):
output = flat_outputs[n]
if isinstance(output, ops.Operation):
returned_ops[n] = output
flat_outputs[n] = None
return nest.pack_sequence_as(outputs, flat_outputs)
return wrap_and_filter_returned_ops, returned_ops
class WrappedGraph(object):
"""Class for wrapping multiple TF 1.X functions in a single graph.
Maintains a dictionary mapping names to wrapped functions. See
`tf.compat.v1.wrap_function` to learn more about wrapping V1 functions.
Functions wrapped using this class have access to variables and collections
created in other wrapped functions, using the standard TF 1.X API (
`tf.compat.v1.get_variable` or
`tf.compat.v1.get_default_graph().get_collection(...)`)
Outside a function, variables and collections may be accessed using the
`variables` and `graph` properties.
Example:
```
def add_v1(x):
with tf.compat.v1.variable_scope('vars', reuse=tf.compat.v1.AUTO_REUSE):
v = tf.compat.v1.get_variable('v', shape=[], dtype=tf.int32)
return v + x
def increment_var_v1(x):
with tf.compat.v1.variable_scope('vars', reuse=tf.compat.v1.AUTO_REUSE):
v = tf.compat.v1.get_variable('v', shape=[], dtype=tf.int32)
return v.assign_add(x)
g = WrappedGraph()
add = g.wrap_function(add_v1, [tf.TensorSpec([], tf.int32)])
increment_var = g.wrap_function(increment_var_v1,
[tf.TensorSpec([], tf.int32)])
assert len(g.variables) == 1
assert g.variables[0].numpy() == 0
increment_var(tf.constant(5))
assert g.variables[0].numpy() == 5
```
"""
def __init__(self, variable_holder=None, **kwargs):
self._variable_holder = (
variable_holder or VariableHolder(share_variables=True))
name = kwargs.pop("name", "wrapped_function_graph")
# Always start with empty collections, unless otherwise specified. Setting
# `collections=None` will copy the collections from the outer graph.
collections = kwargs.pop("collections", {})
self.graph = func_graph.FuncGraph(name, collections=collections, **kwargs)
self._wrapped_function = WrappedFunction(self.graph, self._variable_holder)
self._functions = {}
@property
def functions(self):
return self._functions
@property
def variables(self):
return self._variable_holder.variables
def wrap_function(self, fn, signature, name=None):
"""Wraps a TF 1.X function and returns an eager-compatible function.
All functions wrapped in the same `WrappedGraph` will have access to the
same graph (`tf.compat.v1.get_default_graph` to get the graph object
within a function, or `WrappedGraph.graph` to get the graph outside a
function). Variables created within the function will be added to the
`variables` list.
Function inputs: All inputs to the function must be tensors (nested ok),
with their shapes and dtypes defined in the `signature` argument.
Function outputs:
* The 1.X function may return tensors, variables, and ops. The wrapped
eager-compatible function will always return tensors in the same nested
structure.
* Variables are replaced with a tensor containing the latest read values.
* Returned ops are executed, and replaced with None.
* The order of op execution and variable reads in the return is
nondeterministic. For example:
```
def update_var(x):
v = tf.Variable(0)
op = tf.compat.v1.assign(v, x).op
return v, op
g = WrappedGraph()
fn = g.wrap_function(update_var)
read_value, _ = fn(tf.constant(3))
print(read_value.numpy()) # could be 0 or 3
print(g.variables[0].numpy()) # always 3
```
To ensure that ops in the function are executed (e.g. ops added to the
`tf.GraphKeys.UPDATE_OPS` collection), include them in the function returns.
Args:
fn: a 1.X tensorflow function.
signature: a possibly nested sequence of `TensorSpecs` specifying the
shapes and dtypes of the arguments.
name: an optional string name for the function. The function will be saved
with key `name` in the `functions` dictionary.
Returns:
An eager-compatible function.
"""
return self._wrap_function(fn, signature=signature, name=name)
def _wrap_function(self,
fn,
args=None,
kwargs=None,
signature=None,
name=None):
"""Internal wrap function method with extended func_graph arguments."""
fn_with_filter_and_scope, returned_ops = _filter_returned_ops(
self._variable_holder.call_with_variable_creator_scope(fn))
func_graph.func_graph_from_py_func(
None, # Name is unused.
fn_with_filter_and_scope,
args=args,
kwargs=kwargs,
signature=signature,
add_control_dependencies=False,
func_graph=self.graph)
# This code relies on questional behavior from `func_graph_from_py_func`.
# If an existing FuncGraph is passed into the `func_graph` arg, the inputs
# and structured outputs are overwritten. Pretty sure this is a bug,
# because structured outputs doesn't match up with the outputs...
fn_inputs = self.graph.inputs[:-len(self.graph.captures)]
# Return filtered ops to the flattened outputs.
flat_fn_outputs = nest.flatten(self.graph.structured_outputs)
for index, op in returned_ops.items():
flat_fn_outputs[index] = op
fn_outputs = nest.pack_sequence_as(self.graph.structured_outputs,
flat_fn_outputs)
name = name or fn.__name__
wrapped_function = self._wrapped_function.prune(
fn_inputs, fn_outputs, name, self.graph.structured_input_signature)
self._functions[name] = wrapped_function
return wrapped_function
@tf_export(v1=["wrap_function"])
def wrap_function(fn, signature, name=None):
"""Wraps the TF 1.x function fn into a graph function.
The python function `fn` will be called once with symbolic arguments specified
in the `signature`, traced, and turned into a graph function. Any variables
created by `fn` will be owned by the object returned by `wrap_function`. The
resulting graph function can be called with tensors which match the
signature.
```python
def f(x, do_add):
v = tf.Variable(5.0)
if do_add:
op = v.assign_add(x)
else:
op = v.assign_sub(x)
with tf.control_dependencies([op]):
return v.read_value()
f_add = tf.compat.v1.wrap_function(f, [tf.TensorSpec((), tf.float32), True])
assert float(f_add(1.0)) == 6.0
assert float(f_add(1.0)) == 7.0
# Can call tf.compat.v1.wrap_function again to get a new trace, a new set
# of variables, and possibly different non-template arguments.
f_sub= tf.compat.v1.wrap_function(f, [tf.TensorSpec((), tf.float32), False])
assert float(f_sub(1.0)) == 4.0
assert float(f_sub(1.0)) == 3.0
```
Both `tf.compat.v1.wrap_function` and `tf.function` create a callable
TensorFlow graph. But while `tf.function` runs all stateful operations
(e.g. `tf.print`) and sequences operations to provide the same semantics as
eager execution, `wrap_function` is closer to the behavior of `session.run` in
TensorFlow 1.x. It will not run any operations unless they are required to
compute the function's outputs, either through a data dependency or a control
dependency. Nor will it sequence operations.
Unlike `tf.function`, `wrap_function` will only trace the Python function
once. As with placeholders in TF 1.x, shapes and dtypes must be provided to
`wrap_function`'s `signature` argument.
Since it is only traced once, variables and state may be created inside the
function and owned by the function wrapper object.
Args:
fn: python function to be wrapped
signature: the placeholder and python arguments to be passed to the wrapped
function
name: Optional. The name of the function.
Returns:
the wrapped graph function.
"""
holder = VariableHolder(fn)
func_graph_name = "wrapped_function"
if name is not None:
func_graph_name = "wrapped_function_" + name
return WrappedFunction(
func_graph.func_graph_from_py_func(
func_graph_name,
holder,
args=None,
kwargs=None,
signature=signature,
add_control_dependencies=False,
collections={}),
variable_holder=holder,
signature=signature)
def function_from_graph_def(graph_def, inputs, outputs):
"""Creates a ConcreteFunction from a GraphDef.
Args:
graph_def: A GraphDef to make a function out of.
inputs: A Tensor name or nested structure of names in `graph_def` which
should be inputs to the function.
outputs: A Tensor name or nested structure of names in `graph_def` which
should be outputs of the function.
Returns:
A ConcreteFunction.
"""
def _imports_graph_def():
importer.import_graph_def(graph_def, name="")
wrapped_import = wrap_function(_imports_graph_def, [])
import_graph = wrapped_import.graph
return wrapped_import.prune(
nest.map_structure(import_graph.as_graph_element, inputs),
nest.map_structure(import_graph.as_graph_element, outputs))
| 40.301887 | 107 | 0.71676 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import weakref
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.protobuf import struct_pb2
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.eager import lift_to_graph
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import func_graph
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import nested_structure_coder
from tensorflow.python.training.tracking import data_structures
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
class VariableHolder(object):
def __init__(self, fn=None, share_variables=False):
self._fn = fn
self._share_variables = share_variables
self._variables_by_name = data_structures.Mapping()
@property
def variables(self):
return self._variables_by_name
def variable_creator_scope(self, next_creator, **kwargs):
collections = kwargs.pop("collections", None)
v = None
with ops.name_scope(kwargs.get("name", None), "Variable") as name:
variable_name = ops.name_from_scope_name(name)
kwargs["name"] = name
if self._share_variables:
v = self._variables_by_name.get(variable_name, None)
if v is None:
v = next_creator(**kwargs)
self._variables_by_name[variable_name] = v
if collections is None:
collections = [ops.GraphKeys.GLOBAL_VARIABLES]
if v.trainable and ops.GraphKeys.TRAINABLE_VARIABLES not in collections:
collections = list(collections) + [ops.GraphKeys.TRAINABLE_VARIABLES]
ops.add_to_collections(collections, v)
return v
def __call__(self, *args, **kwargs):
return self.call_with_variable_creator_scope(self._fn)(*args, **kwargs)
def call_with_variable_creator_scope(self, fn):
def wrapped(*args, **kwargs):
with variable_scope.variable_creator_scope(self.variable_creator_scope):
return fn(*args, **kwargs)
return wrapped
def _get_element_from_tensor_info(tensor_info, graph):
encoding = tensor_info.WhichOneof("encoding")
if encoding == "name":
return graph.as_graph_element(tensor_info.name)
elif encoding == "coo_sparse":
return sparse_tensor.SparseTensor(
graph.get_tensor_by_name(tensor_info.coo_sparse.indices_tensor_name),
graph.get_tensor_by_name(tensor_info.coo_sparse.values_tensor_name),
graph.get_tensor_by_name(
tensor_info.coo_sparse.dense_shape_tensor_name))
elif encoding == "composite_tensor":
struct_coder = nested_structure_coder.StructureCoder()
spec_proto = struct_pb2.StructuredValue(
type_spec_value=tensor_info.composite_tensor.type_spec)
spec = struct_coder.decode_proto(spec_proto)
components = [graph.get_tensor_by_name(component.name) for component in
tensor_info.composite_tensor.components]
return spec._from_components(components)
else:
raise ValueError("Invalid TensorInfo.encoding: %s" % encoding)
def _lift_single_variable(old_variable, graph, variable_holder):
new_variable = resource_variable_ops.UninitializedVariable(
shape=old_variable.shape,
dtype=old_variable.dtype,
name=old_variable.op.name,
trainable=old_variable.trainable,
extra_handle_data=old_variable.handle)
new_variable._initializer_op = old_variable._initializer_op
graph.add_capture(new_variable.handle, old_variable.handle)
# graph.capture will use that cached value and do some post-processing
# on the capture like recording it on the tape.
graph.capture(new_variable.handle)
# pylint: disable=protected-access
variable_name = new_variable.name.split(":")[0]
variable_holder._variables_by_name[variable_name] = new_variable
graph._weak_variables.append(weakref.ref(new_variable))
# pylint: enable=protected-access
graph.watch_variable(new_variable)
return new_variable
def _lift_unlifted_variables(graph, variable_holder):
with graph.as_default():
global_collection_variables = ops.get_collection(
ops.GraphKeys.GLOBAL_VARIABLES)
local_collection_variables = ops.get_collection(
ops.GraphKeys.LOCAL_VARIABLES)
existing_captures = {id(c) for c in graph.internal_captures}
lifted_variables = {}
def _should_lift_variable(v):
return ((v._in_graph_mode # pylint: disable=protected-access
and v.graph.building_function)
and isinstance(v, resource_variable_ops.BaseResourceVariable)
and id(v.handle) not in existing_captures)
for old_variable in global_collection_variables:
if _should_lift_variable(old_variable):
new_variable = _lift_single_variable(
old_variable, graph, variable_holder)
lifted_variables[id(old_variable)] = new_variable
existing_captures.add(id(old_variable.handle))
for old_variable in local_collection_variables:
if _should_lift_variable(old_variable):
new_variable = _lift_single_variable(
old_variable, graph, variable_holder)
lifted_variables[id(old_variable)] = new_variable
existing_captures.add(id(old_variable.handle))
if new_variable._in_graph_mode: # pylint: disable=protected-access
outer_graph = new_variable.graph
# Variables are added to the global collection by default. In this
# case we only want the variable in the local collection, so we'll pop
global_collection = outer_graph.get_collection_ref(
ops.GraphKeys.GLOBAL_VARIABLES)
global_collection.remove(new_variable)
outer_graph.add_to_collection(
ops.GraphKeys.LOCAL_VARIABLES, new_variable)
# function is idempotent when it runs again in prune() calls.
for collection_name in [
ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.LOCAL_VARIABLES
]:
mutable_collection = ops.get_collection_ref(collection_name)
for index, current in enumerate(mutable_collection):
mutable_collection[index] = lifted_variables.get(id(current), current)
if not resource_variable_ops.is_resource_variable(
mutable_collection[index]):
logging.log_first_n(
logging.WARN,
"Unable to create a python object for variable {} because it is "
"a reference variable. It may not be visible to training APIs. "
"If this is a problem, consider rebuilding the SavedModel after "
"running tf.compat.v1.enable_resource_variables().".format(
mutable_collection[index]),
5)
# TODO(allenl): make this trackable
class WrappedFunction(function.ConcreteFunction):
def __init__(self, fn_graph, variable_holder, attrs=None, signature=None):
self._variable_holder = variable_holder
_lift_unlifted_variables(fn_graph, variable_holder)
# We call __init__ after lifting variables so that the function's signature
for f in fn_graph.as_graph_def().library.function:
context.context().add_function_def(f)
super(WrappedFunction, self).__init__(
fn_graph, attrs=attrs, signature=signature)
def prune(self, feeds, fetches, name=None, input_signature=None):
name = name or "pruned"
flat_feeds = nest.flatten(feeds, expand_composites=True)
flat_feeds = [self.graph.as_graph_element(t) for t in flat_feeds]
for f in flat_feeds:
if not isinstance(f, ops.Tensor):
raise ValueError("Feeds must be tensors.")
internal_captures = {id(c) for c in self.graph.internal_captures}
flat_feeds = [f for f in flat_feeds if id(f) not in internal_captures]
operation_fetches = []
tensor_fetches = []
tensor_infos = []
def _fetch_preprocesing_callback(fetch):
if isinstance(fetch, ops.Operation):
operation_fetches.append(fetch)
return fetch
elif isinstance(fetch, meta_graph_pb2.TensorInfo):
tensor_infos.append(fetch)
decoded = _get_element_from_tensor_info(fetch, self._func_graph)
if (tensor_util.is_tensor(decoded) or
isinstance(decoded, composite_tensor.CompositeTensor)):
tensor_fetches.append(decoded)
else:
operation_fetches.append(decoded)
return decoded
elif isinstance(fetch, (ops.Tensor, composite_tensor.CompositeTensor)):
tensor_fetches.append(fetch)
return fetch
else:
graph_element = self.graph.as_graph_element(fetch)
return _fetch_preprocesing_callback(graph_element)
fetches = nest.map_structure(_fetch_preprocesing_callback, fetches)
tensor_fetches = nest.flatten(tensor_fetches, expand_composites=True)
for f in (flat_feeds + tensor_fetches + operation_fetches):
if f.graph is not self._func_graph:
raise ValueError("Can only prune function whose feeds and fetches "
"are from this graph (%s). Input %s is from graph %s" %
(self._func_graph, f, f.graph))
with self._func_graph.as_default():
pruned_graph = func_graph.FuncGraph(name)
lift_map = lift_to_graph.lift_to_graph(
operation_fetches + tensor_fetches,
pruned_graph,
sources=flat_feeds + self.graph.internal_captures)
# tensors, or the function's sparse outputs won't work properly.
pruned_graph.outputs.extend(lift_map[x] for x in tensor_fetches)
pruned_graph.control_outputs.extend(
[lift_map[operation] for operation in operation_fetches])
pruned_graph.inputs.extend(lift_map[x] for x in flat_feeds)
for external_capture, internal_capture in self.graph.captures:
pruned_graph.add_capture(external_capture, lift_map[internal_capture])
for ti in tensor_infos:
if ti.WhichOneof("encoding") == "name": # Dense tensors only
t = pruned_graph.as_graph_element(ti.name)
if tensor_util.is_tensor(t):
t.set_shape(tensor_shape.TensorShape(ti.tensor_shape))
# pylint: disable=protected-access
for f in self.graph._functions.values():
pruned_graph._add_function(f)
# pylint: enable=protected-access
pruned_graph.variables = self.graph.variables
def _structured_output_mapping(fetched):
lifted = lift_map[fetched]
if isinstance(lifted, ops.Operation):
return None
return lifted
# expand_composites=True here causes composite tensors to be expanded
# into their component dense Tensors, mapped to the new graph, and then
# reconstituted into their original composite form.
pruned_graph.structured_outputs = nest.map_structure(
_structured_output_mapping, fetches, expand_composites=True)
pruned_graph.structured_input_signature = input_signature
pruned_fn = WrappedFunction(
pruned_graph, variable_holder=self._variable_holder)
pruned_fn._num_positional_args = len(flat_feeds) # pylint: disable=protected-access
# TODO(kathywu): Enable keyword arguments if an input signature is specified
pruned_fn._arg_keywords = [tensor.op.name for tensor in flat_feeds] # pylint: disable=protected-access
return pruned_fn
def _filter_returned_ops(fn):
returned_ops = {}
def wrap_and_filter_returned_ops(*args, **kwargs):
outputs = fn(*args, **kwargs)
flat_outputs = nest.flatten(outputs)
for n in range(len(flat_outputs)):
output = flat_outputs[n]
if isinstance(output, ops.Operation):
returned_ops[n] = output
flat_outputs[n] = None
return nest.pack_sequence_as(outputs, flat_outputs)
return wrap_and_filter_returned_ops, returned_ops
class WrappedGraph(object):
def __init__(self, variable_holder=None, **kwargs):
self._variable_holder = (
variable_holder or VariableHolder(share_variables=True))
name = kwargs.pop("name", "wrapped_function_graph")
# Always start with empty collections, unless otherwise specified. Setting
# `collections=None` will copy the collections from the outer graph.
collections = kwargs.pop("collections", {})
self.graph = func_graph.FuncGraph(name, collections=collections, **kwargs)
self._wrapped_function = WrappedFunction(self.graph, self._variable_holder)
self._functions = {}
@property
def functions(self):
return self._functions
@property
def variables(self):
return self._variable_holder.variables
def wrap_function(self, fn, signature, name=None):
return self._wrap_function(fn, signature=signature, name=name)
def _wrap_function(self,
fn,
args=None,
kwargs=None,
signature=None,
name=None):
fn_with_filter_and_scope, returned_ops = _filter_returned_ops(
self._variable_holder.call_with_variable_creator_scope(fn))
func_graph.func_graph_from_py_func(
None, # Name is unused.
fn_with_filter_and_scope,
args=args,
kwargs=kwargs,
signature=signature,
add_control_dependencies=False,
func_graph=self.graph)
# This code relies on questional behavior from `func_graph_from_py_func`.
# If an existing FuncGraph is passed into the `func_graph` arg, the inputs
# and structured outputs are overwritten. Pretty sure this is a bug,
# because structured outputs doesn't match up with the outputs...
fn_inputs = self.graph.inputs[:-len(self.graph.captures)]
flat_fn_outputs = nest.flatten(self.graph.structured_outputs)
for index, op in returned_ops.items():
flat_fn_outputs[index] = op
fn_outputs = nest.pack_sequence_as(self.graph.structured_outputs,
flat_fn_outputs)
name = name or fn.__name__
wrapped_function = self._wrapped_function.prune(
fn_inputs, fn_outputs, name, self.graph.structured_input_signature)
self._functions[name] = wrapped_function
return wrapped_function
@tf_export(v1=["wrap_function"])
def wrap_function(fn, signature, name=None):
holder = VariableHolder(fn)
func_graph_name = "wrapped_function"
if name is not None:
func_graph_name = "wrapped_function_" + name
return WrappedFunction(
func_graph.func_graph_from_py_func(
func_graph_name,
holder,
args=None,
kwargs=None,
signature=signature,
add_control_dependencies=False,
collections={}),
variable_holder=holder,
signature=signature)
def function_from_graph_def(graph_def, inputs, outputs):
def _imports_graph_def():
importer.import_graph_def(graph_def, name="")
wrapped_import = wrap_function(_imports_graph_def, [])
import_graph = wrapped_import.graph
return wrapped_import.prune(
nest.map_structure(import_graph.as_graph_element, inputs),
nest.map_structure(import_graph.as_graph_element, outputs))
| true | true |
f7f482b1498c5cfbfa908a852befa2a66d858761 | 3,753 | py | Python | fastmot/models/reid.py | 6DammK9/FastMOT | ff5febf4f4bac576db6e5846479bdc0891fa740b | [
"MIT"
] | null | null | null | fastmot/models/reid.py | 6DammK9/FastMOT | ff5febf4f4bac576db6e5846479bdc0891fa740b | [
"MIT"
] | null | null | null | fastmot/models/reid.py | 6DammK9/FastMOT | ff5febf4f4bac576db6e5846479bdc0891fa740b | [
"MIT"
] | null | null | null | from pathlib import Path
import logging
import tensorrt as trt
EXPLICIT_BATCH = 1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
logger = logging.getLogger(__name__)
class ReID:
"""Base class for ReID models.
Attributes
----------
PLUGIN_PATH : Path, optional
Path to TensorRT plugin.
ENGINE_PATH : Path
Path to TensorRT engine.
If not found, TensorRT engine will be converted from the ONNX model
at runtime and cached for later use.
MODEL_PATH : Path
Path to ONNX model.
INPUT_SHAPE : tuple
Input size in the format `(channel, height, width)`.
OUTPUT_LAYOUT : int
Feature dimension output by the model.
METRIC : {'euclidean', 'cosine'}
Distance metric used to match features.
"""
__registry = {}
PLUGIN_PATH = None
ENGINE_PATH = None
MODEL_PATH = None
INPUT_SHAPE = None
OUTPUT_LAYOUT = None
METRIC = None
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)
cls.__registry[cls.__name__] = cls
@classmethod
def get_model(cls, name):
return cls.__registry[name]
@classmethod
def build_engine(cls, trt_logger, batch_size):
with trt.Builder(trt_logger) as builder, builder.create_network(EXPLICIT_BATCH) as network, \
trt.OnnxParser(network, trt_logger) as parser:
builder.max_batch_size = batch_size
logger.info('Building engine with batch size: %d', batch_size)
logger.info('This may take a while...')
# parse model file
with open(cls.MODEL_PATH, 'rb') as model_file:
if not parser.parse(model_file.read()):
logger.critical('Failed to parse the ONNX file')
for err in range(parser.num_errors):
logger.error(parser.get_error(err))
return None
# reshape input to the right batch size
net_input = network.get_input(0)
assert cls.INPUT_SHAPE == net_input.shape[1:]
net_input.shape = (batch_size, *cls.INPUT_SHAPE)
config = builder.create_builder_config()
config.max_workspace_size = 1 << 30
if builder.platform_has_fast_fp16:
config.set_flag(trt.BuilderFlag.FP16)
profile = builder.create_optimization_profile()
profile.set_shape(
net_input.name, # input tensor name
(batch_size, *cls.INPUT_SHAPE), # min shape
(batch_size, *cls.INPUT_SHAPE), # opt shape
(batch_size, *cls.INPUT_SHAPE) # max shape
)
config.add_optimization_profile(profile)
# engine = builder.build_cuda_engine(network)
engine = builder.build_engine(network, config)
if engine is None:
logger.critical('Failed to build engine')
return None
logger.info("Completed creating engine")
with open(cls.ENGINE_PATH, 'wb') as engine_file:
engine_file.write(engine.serialize())
return engine
class OSNet025(ReID):
ENGINE_PATH = Path(__file__).parent / 'osnet_x0_25_msmt17.trt'
MODEL_PATH = Path(__file__).parent / 'osnet_x0_25_msmt17.onnx'
INPUT_SHAPE = (3, 256, 128)
OUTPUT_LAYOUT = 512
METRIC = 'euclidean'
class OSNet10(ReID):
"""Multi-source model trained on MSMT17, DukeMTMC, and CUHK03, not provided."""
ENGINE_PATH = Path(__file__).parent / 'osnet_x1_0_msdc.trt'
MODEL_PATH = Path(__file__).parent / 'osnet_x1_0_msdc.onnx'
INPUT_SHAPE = (3, 256, 128)
OUTPUT_LAYOUT = 512
METRIC = 'cosine'
| 34.118182 | 101 | 0.618439 | from pathlib import Path
import logging
import tensorrt as trt
EXPLICIT_BATCH = 1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
logger = logging.getLogger(__name__)
class ReID:
__registry = {}
PLUGIN_PATH = None
ENGINE_PATH = None
MODEL_PATH = None
INPUT_SHAPE = None
OUTPUT_LAYOUT = None
METRIC = None
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)
cls.__registry[cls.__name__] = cls
@classmethod
def get_model(cls, name):
return cls.__registry[name]
@classmethod
def build_engine(cls, trt_logger, batch_size):
with trt.Builder(trt_logger) as builder, builder.create_network(EXPLICIT_BATCH) as network, \
trt.OnnxParser(network, trt_logger) as parser:
builder.max_batch_size = batch_size
logger.info('Building engine with batch size: %d', batch_size)
logger.info('This may take a while...')
with open(cls.MODEL_PATH, 'rb') as model_file:
if not parser.parse(model_file.read()):
logger.critical('Failed to parse the ONNX file')
for err in range(parser.num_errors):
logger.error(parser.get_error(err))
return None
net_input = network.get_input(0)
assert cls.INPUT_SHAPE == net_input.shape[1:]
net_input.shape = (batch_size, *cls.INPUT_SHAPE)
config = builder.create_builder_config()
config.max_workspace_size = 1 << 30
if builder.platform_has_fast_fp16:
config.set_flag(trt.BuilderFlag.FP16)
profile = builder.create_optimization_profile()
profile.set_shape(
net_input.name,
(batch_size, *cls.INPUT_SHAPE),
(batch_size, *cls.INPUT_SHAPE),
(batch_size, *cls.INPUT_SHAPE)
)
config.add_optimization_profile(profile)
engine = builder.build_engine(network, config)
if engine is None:
logger.critical('Failed to build engine')
return None
logger.info("Completed creating engine")
with open(cls.ENGINE_PATH, 'wb') as engine_file:
engine_file.write(engine.serialize())
return engine
class OSNet025(ReID):
ENGINE_PATH = Path(__file__).parent / 'osnet_x0_25_msmt17.trt'
MODEL_PATH = Path(__file__).parent / 'osnet_x0_25_msmt17.onnx'
INPUT_SHAPE = (3, 256, 128)
OUTPUT_LAYOUT = 512
METRIC = 'euclidean'
class OSNet10(ReID):
ENGINE_PATH = Path(__file__).parent / 'osnet_x1_0_msdc.trt'
MODEL_PATH = Path(__file__).parent / 'osnet_x1_0_msdc.onnx'
INPUT_SHAPE = (3, 256, 128)
OUTPUT_LAYOUT = 512
METRIC = 'cosine'
| true | true |
f7f482f3a9471f430426dbbf3fda05cafdf34fda | 4,405 | py | Python | webowl.py | pelaohxc/webowl | 6f4154ec126dbad39eb7406fadfe8522e379f168 | [
"MIT"
] | 5 | 2019-02-24T14:16:21.000Z | 2022-02-20T21:35:05.000Z | webowl.py | pelaohxc/webowl | 6f4154ec126dbad39eb7406fadfe8522e379f168 | [
"MIT"
] | null | null | null | webowl.py | pelaohxc/webowl | 6f4154ec126dbad39eb7406fadfe8522e379f168 | [
"MIT"
] | 1 | 2019-02-21T09:44:48.000Z | 2019-02-21T09:44:48.000Z | from selenium import webdriver
import os
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--file", help="File containing the url list")
parser.add_argument("--output", help="The path of the folder for the output files")
parser.add_argument("--webdriver", help="The path of the webdriver binary")
args = parser.parse_args()
file = args.file if args.file else None
output = args.output if args.output else None
driver = args.webdriver if args.webdriver else None
print("\n\n")
print(" .':l,. .....'. .'',,'...........,;;,'.. ")
print(" .;oc..,;;'...'....cl:'........'...',,'.. ")
print(" ..;loo:,;lo;.....',;c;,'..':,....';'.''. ")
print(" .,coxkx:.,:..,,;:l:...';:,;c;',;loc,.... ... ")
print(" . .cxkkd;,::;:;,:do::cc::c;,;cc,,;;,,. .;;. ")
print(" ... .';ll;,::::;:::;,,,:oc,,;cc'':lldx:. .,::;. ")
print(" .. .:;,colcc:,:oc;;::;;;;;ccoo;;codxd, .':clc,. ")
print(" . .. ,dxc;cdl,:::loc:::;;,;coddo:cocldc. ... .':c;. ")
print(" ..... .:odo::::;';c:;clloc:ccoxoloclodl,. .,''... 'cc,. ")
print(" . ...... ..:oxo;:l;.,c:.':ooc,:lllclcclol:'. .,,'''....,c:. ")
print(" .....'''. .;cc:co::c,'. .:olcc:ccclc;lo:,...,;:ll;;;,'''.. .':,. ")
print(" . ';cc::;,:c::'.,:;:::',ccclol;. .'.'','.'....... .',.. ")
print(" . .. 'co:;;',ccloc'..,:;:cccll;. .... ........''...")
print(" .;,. .,oc,;,,:llc;''',;cddc'... .;c;... .........'',..")
print(" ...,c:,....... .cooc;::;,;c::lodx: ':c;,'';lol,.,'...............")
print(" ......';::;;,'. ,lool;'.',,:odo;,...,:cllolc:;:l:'..''...........")
print(" ...''.......... .,cdxdc,:lokko;':ocll:;;;:ccloo:'...''...........")
print(" ......'',;cooc,.....';c:;:ldd:,',lxOO0Odoollol:;',,'''............")
print(" . ....'',;;;;;:ll;,..'''''ldc;;;;',ldk0K0xoodddddll:;,'.....';,'.....")
print(" .... ...';;;,''...,c:;,',::,,oOo;::ccclxkOko::ldddoololc;'....,:c;,''...")
print(" . .... ..'''',;;;;,',;clc:::,.:d:',clodddk0Oxxxdkkool::lc:;'.';cc::;,'...")
print(" .. .......',;:cc;;:lollc'.,c,..cddkOkOOO0ko:::;:c::c;,,,',,',,',;,..,")
print(" ........',,...,,.';::cccc;.......:odkkxxxdc;'..;;,;:;:::::;,'...'..''',")
print(" ...'.',,. .....'cdxkxo:. ....;dO0K0Od:'''''..',;:cc::::,'.........")
print(" . .....''.. .. .,;cdxkdl' ..;x00Okxdl;..... .'''',;;'',;'...... ")
print(" ...... . .... .':loo:. .'ckOxdl:,,;,''..'..''....''.. ....'. ")
print(" ......... .,:l; ,lool;''.............';,..... ..';. ")
print(" .... ... ....,'':c:'.. .''....'.''.. .',''..',;,..','','")
print(" ... . ..'............ ...',. .';;',cc:c:;::,..")
print(" . ... ... .... ':'.. .....''.:llll:'")
print(" __ __ ___. ________ .__ ")
print(" / \ / \ ____\_ |__ \_____ \__ _ _| | ")
print(" \ \/\/ // __ \| __ \ / | \ \/ \/ / | ")
print(" \ /\ ___/| \_\ \/ | \ /| |__")
print(" \__/\ / \___ >___ /\_______ /\/\_/ |____/")
print(" \/ \/ \/ \/ ")
print("\n by Bastian Muhlhauser @xpl0ited1\n\n")
if (file != None and output != None) and driver != None:
options = webdriver.ChromeOptions()
options.add_argument('headless')
browser = webdriver.Chrome(driver, chrome_options=options)
if not os.path.exists(output):
os.mkdir(output)
print("[+] Reading file...")
with open(file) as f:
lines = f.readlines()
for line in lines:
print("[+] Getting screenshots for "+line)
browser.get('http://'+line)
print("[+] Saving http_"+line+'.png')
browser.save_screenshot(output+'/http_'+line+'.png')
browser.get('https://'+line)
print("[+] Saving https_"+line+'.jpg')
browser.save_screenshot(output+'/https_'+line+'.png')
print("[+] Done.")
browser.close()
| 58.733333 | 89 | 0.3437 | from selenium import webdriver
import os
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--file", help="File containing the url list")
parser.add_argument("--output", help="The path of the folder for the output files")
parser.add_argument("--webdriver", help="The path of the webdriver binary")
args = parser.parse_args()
file = args.file if args.file else None
output = args.output if args.output else None
driver = args.webdriver if args.webdriver else None
print("\n\n")
print(" .':l,. .....'. .'',,'...........,;;,'.. ")
print(" .;oc..,;;'...'....cl:'........'...',,'.. ")
print(" ..;loo:,;lo;.....',;c;,'..':,....';'.''. ")
print(" .,coxkx:.,:..,,;:l:...';:,;c;',;loc,.... ... ")
print(" . .cxkkd;,::;:;,:do::cc::c;,;cc,,;;,,. .;;. ")
print(" ... .';ll;,::::;:::;,,,:oc,,;cc'':lldx:. .,::;. ")
print(" .. .:;,colcc:,:oc;;::;;;;;ccoo;;codxd, .':clc,. ")
print(" . .. ,dxc;cdl,:::loc:::;;,;coddo:cocldc. ... .':c;. ")
print(" ..... .:odo::::;';c:;clloc:ccoxoloclodl,. .,''... 'cc,. ")
print(" . ...... ..:oxo;:l;.,c:.':ooc,:lllclcclol:'. .,,'''....,c:. ")
print(" .....'''. .;cc:co::c,'. .:olcc:ccclc;lo:,...,;:ll;;;,'''.. .':,. ")
print(" . ';cc::;,:c::'.,:;:::',ccclol;. .'.'','.'....... .',.. ")
print(" . .. 'co:;;',ccloc'..,:;:cccll;. .... ........''...")
print(" .;,. .,oc,;,,:llc;''',;cddc'... .;c;... .........'',..")
print(" ...,c:,....... .cooc;::;,;c::lodx: ':c;,'';lol,.,'...............")
print(" ......';::;;,'. ,lool;'.',,:odo;,...,:cllolc:;:l:'..''...........")
print(" ...''.......... .,cdxdc,:lokko;':ocll:;;;:ccloo:'...''...........")
print(" ......'',;cooc,.....';c:;:ldd:,',lxOO0Odoollol:;',,'''............")
print(" . ....'',;;;;;:ll;,..'''''ldc;;;;',ldk0K0xoodddddll:;,'.....';,'.....")
print(" .... ...';;;,''...,c:;,',::,,oOo;::ccclxkOko::ldddoololc;'....,:c;,''...")
print(" . .... ..'''',;;;;,',;clc:::,.:d:',clodddk0Oxxxdkkool::lc:;'.';cc::;,'...")
print(" .. .......',;:cc;;:lollc'.,c,..cddkOkOOO0ko:::;:c::c;,,,',,',,',;,..,")
print(" ........',,...,,.';::cccc;.......:odkkxxxdc;'..;;,;:;:::::;,'...'..''',")
print(" ...'.',,. .....'cdxkxo:. ....;dO0K0Od:'''''..',;:cc::::,'.........")
print(" . .....''.. .. .,;cdxkdl' ..;x00Okxdl;..... .'''',;;'',;'...... ")
print(" ...... . .... .':loo:. .'ckOxdl:,,;,''..'..''....''.. ....'. ")
print(" ......... .,:l; ,lool;''.............';,..... ..';. ")
print(" .... ... ....,'':c:'.. .''....'.''.. .',''..',;,..','','")
print(" ... . ..'............ ...',. .';;',cc:c:;::,..")
print(" . ... ... .... ':'.. .....''.:llll:'")
print(" __ __ ___. ________ .__ ")
print(" / \ / \ ____\_ |__ \_____ \__ _ _| | ")
print(" \ \/\/ // __ \| __ \ / | \ \/ \/ / | ")
print(" \ /\ ___/| \_\ \/ | \ /| |__")
print(" \__/\ / \___ >___ /\_______ /\/\_/ |____/")
print(" \/ \/ \/ \/ ")
print("\n by Bastian Muhlhauser @xpl0ited1\n\n")
if (file != None and output != None) and driver != None:
options = webdriver.ChromeOptions()
options.add_argument('headless')
browser = webdriver.Chrome(driver, chrome_options=options)
if not os.path.exists(output):
os.mkdir(output)
print("[+] Reading file...")
with open(file) as f:
lines = f.readlines()
for line in lines:
print("[+] Getting screenshots for "+line)
browser.get('http://'+line)
print("[+] Saving http_"+line+'.png')
browser.save_screenshot(output+'/http_'+line+'.png')
browser.get('https://'+line)
print("[+] Saving https_"+line+'.jpg')
browser.save_screenshot(output+'/https_'+line+'.png')
print("[+] Done.")
browser.close()
| true | true |
f7f4834c1dac54ee79265b60a45f9c7fbc413a40 | 403 | py | Python | DjangoRESTAPI/wsgi.py | Thecapable/DjangoRESTAPI-Bank | 454fb6858f8f8820f3bc0b503d5ada46ca84b633 | [
"MIT"
] | null | null | null | DjangoRESTAPI/wsgi.py | Thecapable/DjangoRESTAPI-Bank | 454fb6858f8f8820f3bc0b503d5ada46ca84b633 | [
"MIT"
] | null | null | null | DjangoRESTAPI/wsgi.py | Thecapable/DjangoRESTAPI-Bank | 454fb6858f8f8820f3bc0b503d5ada46ca84b633 | [
"MIT"
] | null | null | null | """
WSGI config for DjangoRESTAPI project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'DjangoRESTAPI.settings')
application = get_wsgi_application()
| 23.705882 | 78 | 0.791563 |
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'DjangoRESTAPI.settings')
application = get_wsgi_application()
| true | true |
f7f483b60767e1276340637def1a89aaba6e87a4 | 485 | py | Python | server/python/exceptions.py | GjjvdBurg/wrattler | 1e4937b42fbe15a87a8204a61dbdd0b4526d9b6c | [
"MIT"
] | 56 | 2018-03-21T07:04:44.000Z | 2021-12-26T15:01:42.000Z | server/python/exceptions.py | GjjvdBurg/wrattler | 1e4937b42fbe15a87a8204a61dbdd0b4526d9b6c | [
"MIT"
] | 181 | 2018-06-07T10:35:23.000Z | 2022-02-26T10:22:54.000Z | server/python/exceptions.py | GjjvdBurg/wrattler | 1e4937b42fbe15a87a8204a61dbdd0b4526d9b6c | [
"MIT"
] | 9 | 2018-05-04T10:04:49.000Z | 2019-10-07T15:53:50.000Z | """
Define custom exceptions for the API
"""
class ApiException(Exception):
status_code = 500
def __init__(self, message, status_code=None, payload=None):
Exception.__init__(self)
self.message = message
if status_code is not None:
self.status_code = status_code
self.payload = payload
def to_dict(self):
rv = dict(self.payload or ())
rv["status"] = "error"
rv["error"] = self.message
return rv
| 24.25 | 64 | 0.610309 |
class ApiException(Exception):
status_code = 500
def __init__(self, message, status_code=None, payload=None):
Exception.__init__(self)
self.message = message
if status_code is not None:
self.status_code = status_code
self.payload = payload
def to_dict(self):
rv = dict(self.payload or ())
rv["status"] = "error"
rv["error"] = self.message
return rv
| true | true |
f7f48475f968e504ce866d12667918c23541dd05 | 4,316 | py | Python | google/ads/google_ads/v3/proto/resources/parental_status_view_pb2.py | jphanwebstaurant/google-ads-python | 600812b2afcc4d57f00b47dfe436620ce50bfe9b | [
"Apache-2.0"
] | 1 | 2019-11-30T23:42:39.000Z | 2019-11-30T23:42:39.000Z | google/ads/google_ads/v3/proto/resources/parental_status_view_pb2.py | jphanwebstaurant/google-ads-python | 600812b2afcc4d57f00b47dfe436620ce50bfe9b | [
"Apache-2.0"
] | null | null | null | google/ads/google_ads/v3/proto/resources/parental_status_view_pb2.py | jphanwebstaurant/google-ads-python | 600812b2afcc4d57f00b47dfe436620ce50bfe9b | [
"Apache-2.0"
] | 1 | 2020-09-30T17:04:06.000Z | 2020-09-30T17:04:06.000Z | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads_v3/proto/resources/parental_status_view.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2
from google.api import resource_pb2 as google_dot_api_dot_resource__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v3/proto/resources/parental_status_view.proto',
package='google.ads.googleads.v3.resources',
syntax='proto3',
serialized_options=_b('\n%com.google.ads.googleads.v3.resourcesB\027ParentalStatusViewProtoP\001ZJgoogle.golang.org/genproto/googleapis/ads/googleads/v3/resources;resources\242\002\003GAA\252\002!Google.Ads.GoogleAds.V3.Resources\312\002!Google\\Ads\\GoogleAds\\V3\\Resources\352\002%Google::Ads::GoogleAds::V3::Resources'),
serialized_pb=_b('\nBgoogle/ads/googleads_v3/proto/resources/parental_status_view.proto\x12!google.ads.googleads.v3.resources\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a\x1cgoogle/api/annotations.proto\"\xd3\x01\n\x12ParentalStatusView\x12J\n\rresource_name\x18\x01 \x01(\tB3\xe0\x41\x03\xfa\x41-\n+googleads.googleapis.com/ParentalStatusView:q\xea\x41n\n+googleads.googleapis.com/ParentalStatusView\x12?customers/{customer}/parentalStatusViews/{parental_status_view}B\x84\x02\n%com.google.ads.googleads.v3.resourcesB\x17ParentalStatusViewProtoP\x01ZJgoogle.golang.org/genproto/googleapis/ads/googleads/v3/resources;resources\xa2\x02\x03GAA\xaa\x02!Google.Ads.GoogleAds.V3.Resources\xca\x02!Google\\Ads\\GoogleAds\\V3\\Resources\xea\x02%Google::Ads::GoogleAds::V3::Resourcesb\x06proto3')
,
dependencies=[google_dot_api_dot_field__behavior__pb2.DESCRIPTOR,google_dot_api_dot_resource__pb2.DESCRIPTOR,google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_PARENTALSTATUSVIEW = _descriptor.Descriptor(
name='ParentalStatusView',
full_name='google.ads.googleads.v3.resources.ParentalStatusView',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='resource_name', full_name='google.ads.googleads.v3.resources.ParentalStatusView.resource_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\340A\003\372A-\n+googleads.googleapis.com/ParentalStatusView'), file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('\352An\n+googleads.googleapis.com/ParentalStatusView\022?customers/{customer}/parentalStatusViews/{parental_status_view}'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=196,
serialized_end=407,
)
DESCRIPTOR.message_types_by_name['ParentalStatusView'] = _PARENTALSTATUSVIEW
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ParentalStatusView = _reflection.GeneratedProtocolMessageType('ParentalStatusView', (_message.Message,), dict(
DESCRIPTOR = _PARENTALSTATUSVIEW,
__module__ = 'google.ads.googleads_v3.proto.resources.parental_status_view_pb2'
,
__doc__ = """A parental status view.
Attributes:
resource_name:
Output only. The resource name of the parental status view.
Parental Status view resource names have the form: ``customer
s/{customer_id}/parentalStatusViews/{ad_group_id}~{criterion_i
d}``
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v3.resources.ParentalStatusView)
))
_sym_db.RegisterMessage(ParentalStatusView)
DESCRIPTOR._options = None
_PARENTALSTATUSVIEW.fields_by_name['resource_name']._options = None
_PARENTALSTATUSVIEW._options = None
# @@protoc_insertion_point(module_scope)
| 48.494382 | 821 | 0.797266 |
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
_sym_db = _symbol_database.Default()
from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2
from google.api import resource_pb2 as google_dot_api_dot_resource__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v3/proto/resources/parental_status_view.proto',
package='google.ads.googleads.v3.resources',
syntax='proto3',
serialized_options=_b('\n%com.google.ads.googleads.v3.resourcesB\027ParentalStatusViewProtoP\001ZJgoogle.golang.org/genproto/googleapis/ads/googleads/v3/resources;resources\242\002\003GAA\252\002!Google.Ads.GoogleAds.V3.Resources\312\002!Google\\Ads\\GoogleAds\\V3\\Resources\352\002%Google::Ads::GoogleAds::V3::Resources'),
serialized_pb=_b('\nBgoogle/ads/googleads_v3/proto/resources/parental_status_view.proto\x12!google.ads.googleads.v3.resources\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a\x1cgoogle/api/annotations.proto\"\xd3\x01\n\x12ParentalStatusView\x12J\n\rresource_name\x18\x01 \x01(\tB3\xe0\x41\x03\xfa\x41-\n+googleads.googleapis.com/ParentalStatusView:q\xea\x41n\n+googleads.googleapis.com/ParentalStatusView\x12?customers/{customer}/parentalStatusViews/{parental_status_view}B\x84\x02\n%com.google.ads.googleads.v3.resourcesB\x17ParentalStatusViewProtoP\x01ZJgoogle.golang.org/genproto/googleapis/ads/googleads/v3/resources;resources\xa2\x02\x03GAA\xaa\x02!Google.Ads.GoogleAds.V3.Resources\xca\x02!Google\\Ads\\GoogleAds\\V3\\Resources\xea\x02%Google::Ads::GoogleAds::V3::Resourcesb\x06proto3')
,
dependencies=[google_dot_api_dot_field__behavior__pb2.DESCRIPTOR,google_dot_api_dot_resource__pb2.DESCRIPTOR,google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_PARENTALSTATUSVIEW = _descriptor.Descriptor(
name='ParentalStatusView',
full_name='google.ads.googleads.v3.resources.ParentalStatusView',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='resource_name', full_name='google.ads.googleads.v3.resources.ParentalStatusView.resource_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\340A\003\372A-\n+googleads.googleapis.com/ParentalStatusView'), file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('\352An\n+googleads.googleapis.com/ParentalStatusView\022?customers/{customer}/parentalStatusViews/{parental_status_view}'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=196,
serialized_end=407,
)
DESCRIPTOR.message_types_by_name['ParentalStatusView'] = _PARENTALSTATUSVIEW
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ParentalStatusView = _reflection.GeneratedProtocolMessageType('ParentalStatusView', (_message.Message,), dict(
DESCRIPTOR = _PARENTALSTATUSVIEW,
__module__ = 'google.ads.googleads_v3.proto.resources.parental_status_view_pb2'
,
__doc__ = """A parental status view.
Attributes:
resource_name:
Output only. The resource name of the parental status view.
Parental Status view resource names have the form: ``customer
s/{customer_id}/parentalStatusViews/{ad_group_id}~{criterion_i
d}``
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v3.resources.ParentalStatusView)
))
_sym_db.RegisterMessage(ParentalStatusView)
DESCRIPTOR._options = None
_PARENTALSTATUSVIEW.fields_by_name['resource_name']._options = None
_PARENTALSTATUSVIEW._options = None
# @@protoc_insertion_point(module_scope)
| true | true |
f7f484a600d59feb460a0d54800060b9f17601bc | 1,217 | py | Python | module_3/df_test.py | JedersonLuz/Codenation_AceleraDev_DataScience | a23137ba7f1349bdc544647ef680ea6f822f797b | [
"MIT"
] | 6 | 2020-03-30T23:40:18.000Z | 2020-08-20T16:48:38.000Z | module_3/df_test.py | JedersonLuz/Codenation_AceleraDev_DataScience | a23137ba7f1349bdc544647ef680ea6f822f797b | [
"MIT"
] | 26 | 2021-02-03T01:19:53.000Z | 2021-04-24T19:51:53.000Z | module_3/df_test.py | JedersonLuz/Codenation_AceleraDev_DataScience | a23137ba7f1349bdc544647ef680ea6f822f797b | [
"MIT"
] | 7 | 2020-04-24T15:07:55.000Z | 2020-08-24T04:47:20.000Z | import pandas as pd
import altair as alt
import streamlit as st
@st.cache
def get_UN_data():
AWS_BUCKET_URL = "https://streamlit-demo-data.s3-us-west-2.amazonaws.com"
df = pd.read_csv(AWS_BUCKET_URL + "/agri.csv.gz")
return df.set_index("Region")
try:
df = get_UN_data()
except urllib.error.URLError as e:
st.error(
"""
**This demo requires internet access.**
Connection error: %s
"""
% e.reason
)
#return
countries = st.multiselect(
"Choose countries", list(df.index), ["China", "United States of America"]
)
if not countries:
st.error("Please select at least one country.")
#return
data = df.loc[countries]
data /= 1000000.0
st.write("### Gross Agricultural Production ($B)", data.sort_index())
data = data.T.reset_index()
data = pd.melt(data, id_vars=["index"]).rename(
columns={"index": "year", "value": "Gross Agricultural Product ($B)"}
)
chart = (
alt.Chart(data)
.mark_area(opacity=0.3)
.encode(
x="year:T",
y=alt.Y("Gross Agricultural Product ($B):Q", stack=None),
color="Region:N",
)
)
st.altair_chart(chart, use_container_width=True) | 25.354167 | 78 | 0.612983 | import pandas as pd
import altair as alt
import streamlit as st
@st.cache
def get_UN_data():
AWS_BUCKET_URL = "https://streamlit-demo-data.s3-us-west-2.amazonaws.com"
df = pd.read_csv(AWS_BUCKET_URL + "/agri.csv.gz")
return df.set_index("Region")
try:
df = get_UN_data()
except urllib.error.URLError as e:
st.error(
"""
**This demo requires internet access.**
Connection error: %s
"""
% e.reason
)
countries = st.multiselect(
"Choose countries", list(df.index), ["China", "United States of America"]
)
if not countries:
st.error("Please select at least one country.")
data = df.loc[countries]
data /= 1000000.0
st.write("### Gross Agricultural Production ($B)", data.sort_index())
data = data.T.reset_index()
data = pd.melt(data, id_vars=["index"]).rename(
columns={"index": "year", "value": "Gross Agricultural Product ($B)"}
)
chart = (
alt.Chart(data)
.mark_area(opacity=0.3)
.encode(
x="year:T",
y=alt.Y("Gross Agricultural Product ($B):Q", stack=None),
color="Region:N",
)
)
st.altair_chart(chart, use_container_width=True) | true | true |
f7f484d1bbeadd341cdcf12d1a3dd530c4a78b88 | 9,585 | py | Python | sample/simple_pyhlm_sample.py | kishiyamat/npbdaa | c13a97b32635e00b192b7075fdc09875710c5029 | [
"MIT"
] | null | null | null | sample/simple_pyhlm_sample.py | kishiyamat/npbdaa | c13a97b32635e00b192b7075fdc09875710c5029 | [
"MIT"
] | 2 | 2020-07-05T02:11:53.000Z | 2020-07-08T00:00:37.000Z | sample/simple_pyhlm_sample.py | kishiyamat/npbdaa | c13a97b32635e00b192b7075fdc09875710c5029 | [
"MIT"
] | null | null | null | import time
import warnings
from pathlib import Path
import numpy as np
import pyhsmm
from tqdm import trange
from util.config_parser import ConfigParser_with_eval
from pyhlm.model import WeakLimitHDPHLM
from pyhlm.word_model import LetterHSMM
warnings.filterwarnings('ignore')
# import pyximport;
# pyximport.install() # https://stackoverflow.com/questions/36880336/setup-of-pycharm-for-cython
def load_config(filename):
cp = ConfigParser_with_eval()
cp.read(filename)
return cp
def load_datas():
data = []
names = np.loadtxt("files.txt", dtype=str)
files = names
for name in names:
data.append(np.loadtxt("DATA/" + name + ".txt"))
return data
def unpack_durations(dur):
unpacked = np.zeros(dur.sum())
d = np.cumsum(dur)
unpacked[d - 1] = 1.0
return unpacked
def save_stateseq(model):
# Save sampled states sequences.
names = np.loadtxt("files.txt", dtype=str)
for i, s in enumerate(model.states_list):
with open("results/" + names[i] + "_s.txt", "a") as f:
np.savetxt(f, s.stateseq, fmt="%d")
with open("results/" + names[i] + "_l.txt", "a") as f:
np.savetxt(f, s.letter_stateseq, fmt="%d")
with open("results/" + names[i] + "_d.txt", "a") as f:
np.savetxt(f, unpack_durations(s.durations_censored), fmt="%d")
def save_params_as_text(itr_idx, model):
with open("parameters/ITR_{0:04d}.txt".format(itr_idx), "w") as f:
f.write(str(model.params))
def save_params_as_file(iter_idx, model):
params = model.params
root_dir = Path("parameters/ITR_{0:04d}".format(iter_idx))
root_dir.mkdir(exist_ok=True)
save_json(root_dir, params)
def save_json(root_dir, json_obj):
for keyname, subjson in json_obj.items():
type_of_subjson = type(subjson)
if type_of_subjson == dict:
dir = root_dir / keyname
dir.mkdir(exist_ok=True)
save_json(dir, json_obj[keyname])
else:
savefile = root_dir / f"{keyname}.txt"
if type_of_subjson == np.ndarray:
if subjson.dtype in [np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.uint64]:
np.savetxt(savefile, subjson, fmt="%d")
else:
np.savetxt(savefile, subjson)
else:
savefile.write_text(str(subjson))
def save_params_as_npz(iter_idx, model):
params = model.params
flatten_params = flatten_json(params)
# flatten_params = copy_flatten_json(flatten_params)
np.savez(f"parameters/ITR_{iter_idx:04d}.npz", **flatten_params)
def flatten_json(json_obj, keyname_prefix=None, dict_obj=None):
if dict_obj is None:
dict_obj = {}
if keyname_prefix is None:
keyname_prefix = ""
for keyname, subjson in json_obj.items():
if type(subjson) == dict:
prefix = f"{keyname_prefix}{keyname}/"
flatten_json(subjson, keyname_prefix=prefix, dict_obj=dict_obj)
else:
dict_obj[f"{keyname_prefix}{keyname}"] = subjson
return dict_obj
def unflatten_json(flatten_json_obj):
dict_obj = {}
for keyname, value in flatten_json_obj.items():
current_dict = dict_obj
splitted_keyname = keyname.split("/")
for key in splitted_keyname[:-1]:
if key not in current_dict:
current_dict[key] = {}
current_dict = current_dict[key]
current_dict[splitted_keyname[-1]] = value
return dict_obj
def copy_flatten_json(json_obj):
new_json = {}
for keyname, subjson in json_obj.items():
type_of_subjson = type(subjson)
if type_of_subjson in [int, float, complex, bool]:
new_json[keyname] = subjson
elif type_of_subjson in [list, tuple]:
new_json[keyname] = subjson[:]
elif type_of_subjson == np.ndarray:
new_json[keyname] = subjson.copy()
else:
raise NotImplementedError(f"type :{type_of_subjson} can not copy. Plz implement here!")
return new_json
def save_loglikelihood(model):
with open("summary_files/log_likelihood.txt", "a") as f:
f.write(str(model.log_likelihood()) + "\n")
def save_resample_times(resample_time):
with open("summary_files/resample_times.txt", "a") as f:
f.write(str(resample_time) + "\n")
def main():
# Ensure that you have the directories
Path("results").mkdir(exist_ok=True)
Path("parameters").mkdir(exist_ok=True)
Path("summary_files").mkdir(exist_ok=True)
# Declare the config path
# NOTE: use `unroll_default_config.py` to get the following configs.
hypparams_model = "hypparams/model.config"
hypparams_letter_duration = "hypparams/letter_duration.config"
hypparams_letter_hsmm = "hypparams/letter_hsmm.config"
hypparams_letter_observation = "hypparams/letter_observation.config"
hypparams_pyhlm = "hypparams/pyhlm.config"
hypparams_word_length = "hypparams/word_length.config"
hypparams_superstate = "hypparams/superstate.config"
# Parse configs such as hyper parameters
config_parser = load_config(hypparams_model)
section = config_parser["model"] # it has some sections
thread_num: int = section["thread_num"]
pretrain_iter: int = section["pretrain_iter"]
train_iter: int = section["train_iter"]
word_num: int = section["word_num"]
letter_num: int = section["letter_num"]
observation_dim = section["observation_dim"]
# コンフィグ(Sectionというクラス. dictのように使える)だけを返す.
hlm_hypparams = load_config(hypparams_pyhlm)["pyhlm"]
config_parser = load_config(hypparams_letter_observation)
obs_hypparams = [config_parser[f"{i + 1}_th"] for i in range(letter_num)]
config_parser = load_config(hypparams_letter_duration)
dur_hypparams = [config_parser[f"{i + 1}_th"] for i in range(letter_num)]
len_hypparams = load_config(hypparams_word_length)["word_length"]
letter_hsmm_hypparams = load_config(hypparams_letter_hsmm)["letter_hsmm"]
superstate_config = load_config(hypparams_superstate)
# Make instance of distributions and models
letter_obs_distns = [pyhsmm.distributions.Gaussian(**hypparam) for hypparam in obs_hypparams]
letter_dur_distns = [pyhsmm.distributions.PoissonDuration(**hypparam) for hypparam in dur_hypparams] # Argが変?
dur_distns = [pyhsmm.distributions.PoissonDuration(lmbda=20) for _ in range(word_num)] # Argが変?
length_distn = pyhsmm.distributions.PoissonDuration(**len_hypparams) # Argが変?
letter_hsmm = LetterHSMM(**letter_hsmm_hypparams, obs_distns=letter_obs_distns, dur_distns=letter_dur_distns)
model = WeakLimitHDPHLM(**hlm_hypparams, letter_hsmm=letter_hsmm, dur_distns=dur_distns, length_distn=length_distn)
# TODO: 要は何をすれば良いのか、の記述
# 1. セットアップ
# a. プロジェクトのクローン
# b. 各種ライブラリの導入
# c. optional: PyCharmなどを使う場合は Cython のコンパイル
# 2. データの配置 (sample/DATA/.)
# a. データは一つの観測 (e.g. aioi_aioi) で得た (m, n_feature) の行列
# ただし、その行列は txt として export される。
# FYI: file name にはセグメントとワードを書いてある (e.g. aioi_aioi.txt)
# c. 学習する txt のリストを `files.txt` として配置 (sample/.)
# 3. ハイパーパラメータを設定
# a. 必要に応じて `default.config` の以下を更新:
# model, pyhlm, letter_observation, letter_duration, letter_hsmm, superstate, word_length
# b. `unroll_default_config.py` を使って展開 (各ファイル名はよしなにつけてくれる)
# 4. `pyhlm_sample.py` (あるいは simple_pyhlm_sample.py) を実行
# a. よしなに学習をすすめてくれる模様
# 5. `summary_and_plot.py` を実行
# a. load model config -> plot results -> ARI の計算などなど
# a. DAAのletterとsegmentのアノテーションの図がそれぞれ `<label>_l.png` と `<label>_s.png` に書き出される
# FYI: Path モジュールの `import` が無いように見える (c.f. https://github.com/RyoOzaki/npbdaa/pull/2/files)
# FYI: `Log_likelihood.png` の生成は ValueError を起こす
# TODO: 質問
# 1. 分析/報告の手順
# a. Aと同様、A
# b. Bと同様、B
# c. その他
# 1. 分析にベースラインとの比較が含まれる場合(b/c)、妥当なベースライン
# 1. 上の3のハイパーパラメータを設定するステップに関するドキュメントは存在するか
# %%
files = np.loadtxt("files.txt", dtype=str)
datas = load_datas()
# %% Pre training.
for data in datas:
letter_hsmm.add_data(data, **superstate_config["DEFAULT"])
for t in trange(pretrain_iter): # t: 0
letter_hsmm.resample_model(num_procs=thread_num)
letter_hsmm.states_list = []
# %%
print("Add datas...")
for name, data in zip(files, datas):
model.add_data(data, **superstate_config[name], generate=False)
model.resample_states(num_procs=thread_num)
# # or
# for name, data in zip(files, datas):
# model.add_data(data, **superstate_config[name], initialize_from_prior=False)
print("Done!")
# %% Save init params
# save_params_as_text(0, model)
# save_params_as_file(0, model)
save_params_as_npz(0, model)
save_loglikelihood(model)
# %%
for t in trange(train_iter):
st = time.time()
model.resample_model(num_procs=thread_num)
resample_model_time = time.time() - st
save_stateseq(model)
save_loglikelihood(model)
# save_params_as_text(t+1, model)
# save_params_as_file(t+1, model)
save_params_as_npz(t + 1, model)
save_resample_times(resample_model_time)
print(model.word_list)
print(model.word_counts())
print(f"log_likelihood:{model.log_likelihood()}")
print(f"resample_model:{resample_model_time}")
if __name__ == "__main__":
main()
| 35.898876 | 119 | 0.664371 | import time
import warnings
from pathlib import Path
import numpy as np
import pyhsmm
from tqdm import trange
from util.config_parser import ConfigParser_with_eval
from pyhlm.model import WeakLimitHDPHLM
from pyhlm.word_model import LetterHSMM
warnings.filterwarnings('ignore')
d(filename)
return cp
def load_datas():
data = []
names = np.loadtxt("files.txt", dtype=str)
files = names
for name in names:
data.append(np.loadtxt("DATA/" + name + ".txt"))
return data
def unpack_durations(dur):
unpacked = np.zeros(dur.sum())
d = np.cumsum(dur)
unpacked[d - 1] = 1.0
return unpacked
def save_stateseq(model):
names = np.loadtxt("files.txt", dtype=str)
for i, s in enumerate(model.states_list):
with open("results/" + names[i] + "_s.txt", "a") as f:
np.savetxt(f, s.stateseq, fmt="%d")
with open("results/" + names[i] + "_l.txt", "a") as f:
np.savetxt(f, s.letter_stateseq, fmt="%d")
with open("results/" + names[i] + "_d.txt", "a") as f:
np.savetxt(f, unpack_durations(s.durations_censored), fmt="%d")
def save_params_as_text(itr_idx, model):
with open("parameters/ITR_{0:04d}.txt".format(itr_idx), "w") as f:
f.write(str(model.params))
def save_params_as_file(iter_idx, model):
params = model.params
root_dir = Path("parameters/ITR_{0:04d}".format(iter_idx))
root_dir.mkdir(exist_ok=True)
save_json(root_dir, params)
def save_json(root_dir, json_obj):
for keyname, subjson in json_obj.items():
type_of_subjson = type(subjson)
if type_of_subjson == dict:
dir = root_dir / keyname
dir.mkdir(exist_ok=True)
save_json(dir, json_obj[keyname])
else:
savefile = root_dir / f"{keyname}.txt"
if type_of_subjson == np.ndarray:
if subjson.dtype in [np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.uint64]:
np.savetxt(savefile, subjson, fmt="%d")
else:
np.savetxt(savefile, subjson)
else:
savefile.write_text(str(subjson))
def save_params_as_npz(iter_idx, model):
params = model.params
flatten_params = flatten_json(params)
np.savez(f"parameters/ITR_{iter_idx:04d}.npz", **flatten_params)
def flatten_json(json_obj, keyname_prefix=None, dict_obj=None):
if dict_obj is None:
dict_obj = {}
if keyname_prefix is None:
keyname_prefix = ""
for keyname, subjson in json_obj.items():
if type(subjson) == dict:
prefix = f"{keyname_prefix}{keyname}/"
flatten_json(subjson, keyname_prefix=prefix, dict_obj=dict_obj)
else:
dict_obj[f"{keyname_prefix}{keyname}"] = subjson
return dict_obj
def unflatten_json(flatten_json_obj):
dict_obj = {}
for keyname, value in flatten_json_obj.items():
current_dict = dict_obj
splitted_keyname = keyname.split("/")
for key in splitted_keyname[:-1]:
if key not in current_dict:
current_dict[key] = {}
current_dict = current_dict[key]
current_dict[splitted_keyname[-1]] = value
return dict_obj
def copy_flatten_json(json_obj):
new_json = {}
for keyname, subjson in json_obj.items():
type_of_subjson = type(subjson)
if type_of_subjson in [int, float, complex, bool]:
new_json[keyname] = subjson
elif type_of_subjson in [list, tuple]:
new_json[keyname] = subjson[:]
elif type_of_subjson == np.ndarray:
new_json[keyname] = subjson.copy()
else:
raise NotImplementedError(f"type :{type_of_subjson} can not copy. Plz implement here!")
return new_json
def save_loglikelihood(model):
with open("summary_files/log_likelihood.txt", "a") as f:
f.write(str(model.log_likelihood()) + "\n")
def save_resample_times(resample_time):
with open("summary_files/resample_times.txt", "a") as f:
f.write(str(resample_time) + "\n")
def main():
Path("results").mkdir(exist_ok=True)
Path("parameters").mkdir(exist_ok=True)
Path("summary_files").mkdir(exist_ok=True)
hypparams_model = "hypparams/model.config"
hypparams_letter_duration = "hypparams/letter_duration.config"
hypparams_letter_hsmm = "hypparams/letter_hsmm.config"
hypparams_letter_observation = "hypparams/letter_observation.config"
hypparams_pyhlm = "hypparams/pyhlm.config"
hypparams_word_length = "hypparams/word_length.config"
hypparams_superstate = "hypparams/superstate.config"
config_parser = load_config(hypparams_model)
section = config_parser["model"]
thread_num: int = section["thread_num"]
pretrain_iter: int = section["pretrain_iter"]
train_iter: int = section["train_iter"]
word_num: int = section["word_num"]
letter_num: int = section["letter_num"]
observation_dim = section["observation_dim"]
hlm_hypparams = load_config(hypparams_pyhlm)["pyhlm"]
config_parser = load_config(hypparams_letter_observation)
obs_hypparams = [config_parser[f"{i + 1}_th"] for i in range(letter_num)]
config_parser = load_config(hypparams_letter_duration)
dur_hypparams = [config_parser[f"{i + 1}_th"] for i in range(letter_num)]
len_hypparams = load_config(hypparams_word_length)["word_length"]
letter_hsmm_hypparams = load_config(hypparams_letter_hsmm)["letter_hsmm"]
superstate_config = load_config(hypparams_superstate)
letter_obs_distns = [pyhsmm.distributions.Gaussian(**hypparam) for hypparam in obs_hypparams]
letter_dur_distns = [pyhsmm.distributions.PoissonDuration(**hypparam) for hypparam in dur_hypparams]
dur_distns = [pyhsmm.distributions.PoissonDuration(lmbda=20) for _ in range(word_num)]
length_distn = pyhsmm.distributions.PoissonDuration(**len_hypparams)
letter_hsmm = LetterHSMM(**letter_hsmm_hypparams, obs_distns=letter_obs_distns, dur_distns=letter_dur_distns)
model = WeakLimitHDPHLM(**hlm_hypparams, letter_hsmm=letter_hsmm, dur_distns=dur_distns, length_distn=length_distn)
files = np.loadtxt("files.txt", dtype=str)
datas = load_datas()
for data in datas:
letter_hsmm.add_data(data, **superstate_config["DEFAULT"])
for t in trange(pretrain_iter):
letter_hsmm.resample_model(num_procs=thread_num)
letter_hsmm.states_list = []
print("Add datas...")
for name, data in zip(files, datas):
model.add_data(data, **superstate_config[name], generate=False)
model.resample_states(num_procs=thread_num)
print("Done!")
save_params_as_npz(0, model)
save_loglikelihood(model)
for t in trange(train_iter):
st = time.time()
model.resample_model(num_procs=thread_num)
resample_model_time = time.time() - st
save_stateseq(model)
save_loglikelihood(model)
save_params_as_npz(t + 1, model)
save_resample_times(resample_model_time)
print(model.word_list)
print(model.word_counts())
print(f"log_likelihood:{model.log_likelihood()}")
print(f"resample_model:{resample_model_time}")
if __name__ == "__main__":
main()
| true | true |
f7f485e4fc9f4c6d551adf746bf8b9cc82c5f85b | 1,008 | py | Python | stanza/tests/test_prepare_resources.py | asears/stanza | f91ca215e175d4f7b202259fe789374db7829395 | [
"Apache-2.0"
] | 3,633 | 2016-01-21T17:29:13.000Z | 2022-03-31T13:36:47.000Z | stanza/tests/test_prepare_resources.py | asears/stanza | f91ca215e175d4f7b202259fe789374db7829395 | [
"Apache-2.0"
] | 593 | 2016-01-19T07:16:05.000Z | 2022-03-31T20:23:58.000Z | stanza/tests/test_prepare_resources.py | asears/stanza | f91ca215e175d4f7b202259fe789374db7829395 | [
"Apache-2.0"
] | 525 | 2016-01-20T03:22:19.000Z | 2022-03-24T05:51:56.000Z | import pytest
import stanza
import stanza.resources.prepare_resources as prepare_resources
from stanza.tests import *
pytestmark = [pytest.mark.travis, pytest.mark.pipeline]
def test_split_model_name():
# Basic test
lang, package, processor = prepare_resources.split_model_name('ro_nonstandard_tagger.pt')
assert lang == 'ro'
assert package == 'nonstandard'
assert processor == 'pos'
# Check that nertagger is found even though it also ends with tagger
# Check that ncbi_disease is correctly partitioned despite the extra _
lang, package, processor = prepare_resources.split_model_name('en_ncbi_disease_nertagger.pt')
assert lang == 'en'
assert package == 'ncbi_disease'
assert processor == 'ner'
# assert that processors with _ in them are also okay
lang, package, processor = prepare_resources.split_model_name('en_pubmed_forward_charlm.pt')
assert lang == 'en'
assert package == 'pubmed'
assert processor == 'forward_charlm'
| 32.516129 | 97 | 0.733135 | import pytest
import stanza
import stanza.resources.prepare_resources as prepare_resources
from stanza.tests import *
pytestmark = [pytest.mark.travis, pytest.mark.pipeline]
def test_split_model_name():
lang, package, processor = prepare_resources.split_model_name('ro_nonstandard_tagger.pt')
assert lang == 'ro'
assert package == 'nonstandard'
assert processor == 'pos'
lang, package, processor = prepare_resources.split_model_name('en_ncbi_disease_nertagger.pt')
assert lang == 'en'
assert package == 'ncbi_disease'
assert processor == 'ner'
lang, package, processor = prepare_resources.split_model_name('en_pubmed_forward_charlm.pt')
assert lang == 'en'
assert package == 'pubmed'
assert processor == 'forward_charlm'
| true | true |
f7f486a57186930e852420b301713c394d58d21b | 710 | py | Python | tests/psd_tools/api/test_mask.py | mrstephenneal/psd-tools2 | fde1c9768b8d2a232e5afd5f1b58983ec675b960 | [
"MIT"
] | 19 | 2019-11-21T09:26:52.000Z | 2022-03-16T13:51:29.000Z | tests/psd_tools/api/test_mask.py | sfneal/psd-tools3 | 61e780a2b8dd34b4d9be2d2ffea6274ab17d6051 | [
"MIT"
] | 1 | 2018-10-01T14:14:50.000Z | 2018-10-01T14:14:50.000Z | tests/psd_tools/api/test_mask.py | mrstephenneal/psd-tools3 | fde1c9768b8d2a232e5afd5f1b58983ec675b960 | [
"MIT"
] | 1 | 2021-12-24T06:42:05.000Z | 2021-12-24T06:42:05.000Z | from __future__ import absolute_import, unicode_literals
import logging
import pytest
from psd_tools.api.psd_image import PSDImage
from ..utils import full_name
logger = logging.getLogger(__name__)
@pytest.fixture
def layer_mask_data():
return PSDImage.open(full_name('layer_mask_data.psd'))
def test_layer_mask(layer_mask_data):
from PIL.Image import Image
for layer in layer_mask_data:
if not layer.has_mask():
continue
mask = layer.mask
mask.background_color
mask.bbox
mask.size
mask.disabled
mask.flags
mask.parameters
mask.real_flags
repr(mask)
assert isinstance(mask.topil(), Image)
| 20.882353 | 58 | 0.687324 | from __future__ import absolute_import, unicode_literals
import logging
import pytest
from psd_tools.api.psd_image import PSDImage
from ..utils import full_name
logger = logging.getLogger(__name__)
@pytest.fixture
def layer_mask_data():
return PSDImage.open(full_name('layer_mask_data.psd'))
def test_layer_mask(layer_mask_data):
from PIL.Image import Image
for layer in layer_mask_data:
if not layer.has_mask():
continue
mask = layer.mask
mask.background_color
mask.bbox
mask.size
mask.disabled
mask.flags
mask.parameters
mask.real_flags
repr(mask)
assert isinstance(mask.topil(), Image)
| true | true |
f7f48798927c69521134ccd477a2b5496589c1c9 | 4,421 | py | Python | admin_tools/dashboard/south_migrations/0002_auto__add_field_dashboardpreferences_dashboard_id.py | asherf/django-admin-tools | 26a993545de7d68286be56ac640fe12acf1a1abe | [
"MIT"
] | 711 | 2015-06-21T10:08:06.000Z | 2022-03-25T08:46:37.000Z | admin_tools/dashboard/south_migrations/0002_auto__add_field_dashboardpreferences_dashboard_id.py | asherf/django-admin-tools | 26a993545de7d68286be56ac640fe12acf1a1abe | [
"MIT"
] | 102 | 2015-06-22T12:38:21.000Z | 2022-03-29T14:00:54.000Z | admin_tools/dashboard/south_migrations/0002_auto__add_field_dashboardpreferences_dashboard_id.py | asherf/django-admin-tools | 26a993545de7d68286be56ac640fe12acf1a1abe | [
"MIT"
] | 149 | 2015-06-21T10:16:49.000Z | 2022-03-28T13:11:47.000Z | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from django.conf import settings
user_model = getattr(settings, 'AUTH_USER_MODEL', 'auth.User')
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'DashboardPreferences.dashboard_id'
db.add_column('admin_tools_dashboard_preferences', 'dashboard_id', self.gf('django.db.models.fields.CharField')(default='dashboard', max_length=100), keep_default=False)
def backwards(self, orm):
# Deleting field 'DashboardPreferences.dashboard_id'
db.delete_column('admin_tools_dashboard_preferences', 'dashboard_id')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
user_model: {
'Meta': {'object_name': user_model.split('.')[1]},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'dashboard.dashboardpreferences': {
'Meta': {'ordering': "('user',)", 'object_name': 'DashboardPreferences', 'db_table': "'admin_tools_dashboard_preferences'"},
'dashboard_id': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'data': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_model})
}
}
complete_apps = ['dashboard']
| 61.402778 | 182 | 0.589686 |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from django.conf import settings
user_model = getattr(settings, 'AUTH_USER_MODEL', 'auth.User')
class Migration(SchemaMigration):
def forwards(self, orm):
db.add_column('admin_tools_dashboard_preferences', 'dashboard_id', self.gf('django.db.models.fields.CharField')(default='dashboard', max_length=100), keep_default=False)
def backwards(self, orm):
db.delete_column('admin_tools_dashboard_preferences', 'dashboard_id')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
user_model: {
'Meta': {'object_name': user_model.split('.')[1]},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'dashboard.dashboardpreferences': {
'Meta': {'ordering': "('user',)", 'object_name': 'DashboardPreferences', 'db_table': "'admin_tools_dashboard_preferences'"},
'dashboard_id': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'data': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_model})
}
}
complete_apps = ['dashboard']
| true | true |
f7f487d8c6dc1e5cfa4a1510ee7c3e19029fda9e | 3,167 | py | Python | tests/test_licenses.py | richard-dinh/ricecooker | 1de27c9c01aa2c65b2109ada2b790b23cbc3b0dd | [
"MIT"
] | 14 | 2017-01-10T09:33:03.000Z | 2021-11-28T12:11:27.000Z | tests/test_licenses.py | richard-dinh/ricecooker | 1de27c9c01aa2c65b2109ada2b790b23cbc3b0dd | [
"MIT"
] | 174 | 2016-09-29T17:32:54.000Z | 2022-03-29T15:02:48.000Z | tests/test_licenses.py | richard-dinh/ricecooker | 1de27c9c01aa2c65b2109ada2b790b23cbc3b0dd | [
"MIT"
] | 41 | 2016-08-29T23:26:17.000Z | 2021-11-29T17:12:03.000Z | """ Tests for license getting and serialization """
import json
import pytest
from le_utils.constants.licenses import (
CC_BY, CC_BY_SA, CC_BY_ND, CC_BY_NC, CC_BY_NC_SA, CC_BY_NC_ND,
ALL_RIGHTS_RESERVED,
PUBLIC_DOMAIN,
SPECIAL_PERMISSIONS
)
from ricecooker.classes.licenses import get_license
""" *********** LICENSE FIXTURES *********** """
@pytest.fixture
def license_objects():
regular_ids = [CC_BY, CC_BY_SA, CC_BY_ND, CC_BY_NC, CC_BY_NC_SA, CC_BY_NC_ND,
ALL_RIGHTS_RESERVED, PUBLIC_DOMAIN]
license_objects = []
for regular_id in regular_ids:
# with desciption and copyright_holder
licence_obj = get_license(regular_id,
copyright_holder='Some name',
description='Le description')
assert licence_obj, 'licence_obj should exist'
license_objects.append(licence_obj)
# with desciption only
licence_obj = get_license(regular_id, description='Le description solo2')
assert licence_obj, 'licence_obj should exist'
license_objects.append(licence_obj)
# with copyright_holder only
licence_obj = get_license(regular_id, copyright_holder='Some name3')
assert licence_obj, 'licence_obj should exist'
license_objects.append(licence_obj)
# bare
licence_obj = get_license(regular_id)
assert licence_obj, 'licence_obj should exist'
license_objects.append(licence_obj)
return license_objects
@pytest.fixture
def special_license():
return get_license(SPECIAL_PERMISSIONS,
copyright_holder='Authorov',
description='Only for use offline')
""" *********** LICENSE TESTS *********** """
def test_the_license_fixtures(license_objects, special_license):
assert len(license_objects) > 4
assert special_license.license_id == SPECIAL_PERMISSIONS
assert special_license.description
def test_bad_special_license():
try:
get_license(SPECIAL_PERMISSIONS, description=None)
assert False, 'Should not come here because of missing description'
except AssertionError:
assert True, 'SPECIAL_PERMISSIONS without description should raise an exception'
def _compare_licence_objects(obj1, obj2):
same = True
if not obj1.license_id == obj2.license_id:
same = False
if not obj1.description == obj2.description:
same = False
if not obj1.copyright_holder == obj2.copyright_holder:
same = False
return same
def test_license_serilizibility(license_objects, special_license):
orig_licenses = license_objects
orig_licenses.append(special_license)
for licence_orig in orig_licenses:
# serizlize
license_dict = licence_orig.as_dict()
license_json = json.dumps(license_dict)
# deserizlize
license_copy_dict = json.loads(license_json)
license_copy = get_license(**license_copy_dict)
same_attributes = _compare_licence_objects(licence_orig, license_copy)
assert same_attributes, 'License attributes not the same after serizlize'
| 32.316327 | 88 | 0.689927 |
import json
import pytest
from le_utils.constants.licenses import (
CC_BY, CC_BY_SA, CC_BY_ND, CC_BY_NC, CC_BY_NC_SA, CC_BY_NC_ND,
ALL_RIGHTS_RESERVED,
PUBLIC_DOMAIN,
SPECIAL_PERMISSIONS
)
from ricecooker.classes.licenses import get_license
@pytest.fixture
def license_objects():
regular_ids = [CC_BY, CC_BY_SA, CC_BY_ND, CC_BY_NC, CC_BY_NC_SA, CC_BY_NC_ND,
ALL_RIGHTS_RESERVED, PUBLIC_DOMAIN]
license_objects = []
for regular_id in regular_ids:
licence_obj = get_license(regular_id,
copyright_holder='Some name',
description='Le description')
assert licence_obj, 'licence_obj should exist'
license_objects.append(licence_obj)
licence_obj = get_license(regular_id, description='Le description solo2')
assert licence_obj, 'licence_obj should exist'
license_objects.append(licence_obj)
licence_obj = get_license(regular_id, copyright_holder='Some name3')
assert licence_obj, 'licence_obj should exist'
license_objects.append(licence_obj)
licence_obj = get_license(regular_id)
assert licence_obj, 'licence_obj should exist'
license_objects.append(licence_obj)
return license_objects
@pytest.fixture
def special_license():
return get_license(SPECIAL_PERMISSIONS,
copyright_holder='Authorov',
description='Only for use offline')
def test_the_license_fixtures(license_objects, special_license):
assert len(license_objects) > 4
assert special_license.license_id == SPECIAL_PERMISSIONS
assert special_license.description
def test_bad_special_license():
try:
get_license(SPECIAL_PERMISSIONS, description=None)
assert False, 'Should not come here because of missing description'
except AssertionError:
assert True, 'SPECIAL_PERMISSIONS without description should raise an exception'
def _compare_licence_objects(obj1, obj2):
same = True
if not obj1.license_id == obj2.license_id:
same = False
if not obj1.description == obj2.description:
same = False
if not obj1.copyright_holder == obj2.copyright_holder:
same = False
return same
def test_license_serilizibility(license_objects, special_license):
orig_licenses = license_objects
orig_licenses.append(special_license)
for licence_orig in orig_licenses:
license_dict = licence_orig.as_dict()
license_json = json.dumps(license_dict)
license_copy_dict = json.loads(license_json)
license_copy = get_license(**license_copy_dict)
same_attributes = _compare_licence_objects(licence_orig, license_copy)
assert same_attributes, 'License attributes not the same after serizlize'
| true | true |
f7f4886da19fb94e741c0e55063fff72c525e77d | 18,384 | py | Python | src/docmail/client.py | ancoris/docmail-api-wrapper | 1dbc4d1136f3052eaf17795e84fdd668378280ec | [
"MIT"
] | null | null | null | src/docmail/client.py | ancoris/docmail-api-wrapper | 1dbc4d1136f3052eaf17795e84fdd668378280ec | [
"MIT"
] | null | null | null | src/docmail/client.py | ancoris/docmail-api-wrapper | 1dbc4d1136f3052eaf17795e84fdd668378280ec | [
"MIT"
] | 1 | 2022-01-21T13:30:41.000Z | 2022-01-21T13:30:41.000Z | #!/usr/bin/env python
#
# Docmail API Wrapper (Python)
#
#Copyright (c) 2011 Appogee (www.appogee.co.uk)
#
#Permission is hereby granted, free of charge, to any person obtaining
#a copy of this software and associated documentation files (the
#"Software"), to deal in the Software without restriction, including
#without limitation the rights to use, copy, modify, merge, publish,
#distribute, sublicense, and/or sell copies of the Software, and to
#permit persons to whom the Software is furnished to do so, subject to
#the following conditions:
#
#The above copyright notice and this permission notice shall be
#included in all copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
#EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
#MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
#NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
#LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
#OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
#WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Docmail API Wrapper (Python) is a Python wrapper for the docmail API V2.
For futher information and full documentation of Docmail API see http://www.cfhdocmail.com/downloads/WebServiceHelp2.pdf
Docmail is a web based service for sending snail-mail electronically.
For further information on Docmail see http://cfhdocmail.com/
This wrapper extends the suds library for making soap calls.
For further information on suds see https://fedorahosted.org/suds/
Project home: http://code.google.com/p/python-docmail/
"""
__author__ = 'gwyn.howell@appogee.co.uk (Gwyn Howell)'
__version__ = '1.0'
__license__ = 'Apache License 2.0 (http://www.apache.org/licenses/LICENSE-2.0)'
from xml.dom.minidom import parseString
import base64
import datetime
import os.path
import re
import suds.client
from docmail import enums, util
DOCMAIL_WSDL_LIVE = 'https://www.cfhdocmail.com/LiveAPI2/DMWS.asmx?WSDL'
DOCMAIL_WSDL_TEST = 'https://www.cfhdocmail.com/TestAPI2/DMWS.asmx?WSDL'
DOCMAIL_WSDL_BETA = 'https://www.cfhdocmail.com/BetaAPI2/DMWS.asmx?WSDL'
"""
NOTE: Change this to whichever API you want to use. You will need
a separate account for each. See README for further info """
DOCMAIL_WSDL = DOCMAIL_WSDL_TEST
RE_DATETIME = '^(0[1-9]|[12][0-9]|3[01])[/](0[1-9]|1[012])[/](19|20)\d\d[ ]([0-1][0-9]|2[0-3])[:][0-5][0-9][:][0-5][0-9]$'
PTN_DATETIME = '%d/%m/%Y %H:%M:%S'
# some default values for processing a mailing
PROCESS_MAILING = { 'po_reference': '',
'payment_method': 'Topup',
'email_success_list': '',
'email_error_list': '',
'http_post_on_success': '',
'http_post_on_error': '',
'max_price_ex_vat': 0,
'skip_preview_image_generation': False }
class DocmailException(Exception):
def __init__(self, code, error, description):
self.code = code
self.error = error
self.description = description
def __str__(self):
return '%s (%s): %s' % (self.error, self.code, self.description)
class DocmailObject(object):
def _format_data(self):
""" override this method to provide custom formatting for data returned from docmail """
pass
class Mailing(DocmailObject):
def __init__(self, name=None):
# mailing_product and product_type seem to be the same thing?
self.product_type = enums.ProductType.A4Letter
self.name = name
self.mailing_description = None
self.is_colour = True
self.is_duplex = False
self.delivery_type = enums.DeliveryType.Standard
self.courier_delivery_to_self = False
self.despatch_asap = True
self.despatch_date = datetime.datetime.now() + datetime.timedelta(days=2)
self.address_name_prefix = None
self.address_name_format = enums.AddressNameFormat.FullName
self.discount_code = None
self.min_envelope_size = enums.MinEnvelopeSize.C5
@property
def is_mono(self):
raise Exception('property not supported - use is_colour instead')
def _format_data(self):
if self.mailing_list_guid == '00000000-0000-0000-0000-000000000000':
self.mailing_list_guid = None
if self.despatch_date == 'ASAP':
self.despatch_asap = True
self.despatch_date = None
else:
self.despatch_asap = False
class TemplateFile(object):
def __init__(self, file, data=None):
""" creates a template file object. can be created from:
- file object - specify file=file object
- file path - specify file=path to file
- bytes - specify file=filename and data=bytes
"""
if data:
self.file_name = file
self.file_data = data
else:
if isinstance(file, basestring):
if os.path.isfile(file):
file = open(file, 'r')
self.file_name = file.name
self.file_data = file.read()
ext = self.file_name[self.file_name.rfind('.')+1:].lower()
if not ext in ('doc', 'docx', 'rtf'):
raise ValueError('file_name incorrect format, must be .doc, .docx or .rtf')
self.guid = None
self.template_name = ''
self.document_type = enums.DocumentType.A4Letter
self.address_font_code = enums.AddressFont.Arial10
self.template_type = 'Preformatted Stream' if ext == 'pdf' else 'Document'
self.background_name = ''
self.protected_area_password = ''
self.encryption_password = ''
self.instance_page_numbers = ''
self.addressed_document = True
self.can_begin_on_back = False
self.next_template_can_begin_on_back = False
self.bleed_supplied = True
self.copies = 1
self.instances = 1
self.cycle_instances_on_copies = False
class MailingListFile(object):
def __init__(self, file, data=None, sheet_name=None, data_format=None, mapping_delimiter=None, mapping_fixed_width_chars=None):
""" creates a mailing list file object. can be created from:
- file object - specify file=file object
- file path - specify file=path to file
- bytes - specify file=filename and data=bytes
"""
if data:
self.file_name = file
self.file_data = data
else:
if isinstance(file, basestring):
if os.path.isfile(file):
file = open(file, 'r')
self.file_name = file.name
self.file_data = file.read()
ext = self.file_name[self.file_name.rfind('.')+1:].lower()
if ext == 'csv':
data_format = 'CSV'
elif ext in ('xls', 'xlsx'):
data_format = 'Excel'
if not sheet_name:
raise ValueError('sheet_name argument must be provided for .%s files' % ext)
elif ext == 'txt':
if not data_format in ('Tab separated', 'Delimited', 'Fixed width'):
raise ValueError('data_format not supplied or not valid - must be "Tab separated", "Delimited", or "Fixed width" for txt files')
else:
raise ValueError('Unsupported file - %s. Please provide a .txt, .csv, .xls or .xlsx file' % ext)
# some more validation ...
if data_format == 'Delimited' and mapping_delimiter:
raise ValueError('mapping_delimiter arg must be provided if data_format is Delimited')
if data_format == 'Fixed width' and mapping_fixed_width_chars:
raise ValueError('mapping_fixed_width_chars arg must be provided if data_format is Fixed Width')
# some default values ...
self.headers = True
self.mapping_name = ''
self.data_format = data_format or ''
self.mapping_delimiter = mapping_delimiter or ' '
self.sheet_name = sheet_name or ''
self.mapping_fixed_width_chars = mapping_fixed_width_chars or ''
class Client(suds.client.Client):
def __init__(self, username, password, source='', wsdl_url=None, **kwargs):
if not wsdl_url:
wsdl_url = DOCMAIL_WSDL
self.source = source
self.username = username
self.password = password
self.return_format = 'XML'
self.failure_return_format = 'XML'
suds.client.Client.__init__(self, wsdl_url, **kwargs)
def _parse(self, xml, return_class=DocmailObject):
ob = return_class()
dom = parseString(xml)
for node in dom.firstChild.childNodes:
key = node.childNodes[0].firstChild.wholeText
value = node.childNodes[1].firstChild.wholeText
key = self._format_key(key)
value = self._format_value(value)
if key == 'error_code':
raise DocmailException(value,
self._format_value(node.childNodes[3].firstChild.wholeText),
self._format_value(node.childNodes[5].firstChild.wholeText))
setattr(ob, key, value)
if hasattr(ob, '_format_data'):
ob._format_data()
return ob
def _format_key(self, key):
if not ' ' in key:
key = '_'.join(util.split_caps(key))
key = key.strip('\/:*?"<>|').replace(' ', '_').lower()
return key
def _format_value(self, value):
if re.match(RE_DATETIME, value):
return datetime.datetime.strptime(value, PTN_DATETIME)
if value.lower() == 'yes':
return True
if value.lower() == 'no':
return False
return value
def get_mailing(self, guid):
xml = self.service.GetMailingDetails(self.username, self.password, guid, self.return_format)
mailing = self._parse(xml, Mailing)
mailing.guid = guid
return mailing
def create_mailing(self, mailing):
xml = self.service.CreateMailing(self.username, self.password, self.source,
mailing.product_type,
mailing.name,
mailing.mailing_description,
not mailing.is_colour,
mailing.is_duplex,
mailing.delivery_type,
mailing.courier_delivery_to_self,
mailing.despatch_asap,
mailing.despatch_date,
mailing.address_name_prefix,
mailing.address_name_format,
mailing.discount_code,
mailing.min_envelope_size,
self.return_format)
ob = self._parse(xml)
mailing.guid = ob.mailing_guid
return mailing
def update_mailing(self, mailing):
xml = self.service.UpdateMailingOptions(self.username, self.password,
mailing.guid,
mailing.name,
mailing.mailing_description,
not mailing.is_colour,
mailing.is_duplex,
mailing.delivery_type,
mailing.despatch_asap,
mailing.despatch_date,
mailing.address_name_prefix,
mailing.address_name_format,
mailing.discount_code,
mailing.min_envelope_size,
self.return_format)
ob = self._parse(xml)
return ob.success
def add_template_file(self, mailing_guid, template_file):
xml = self.service.AddTemplateFile(self.username, self.password, mailing_guid,
template_file.template_name,
template_file.file_name,
base64.b64encode(template_file.file_data),
template_file.document_type,
template_file.addressed_document,
template_file.address_font_code,
template_file.template_type,
template_file.background_name,
template_file.can_begin_on_back,
template_file.next_template_can_begin_on_back,
template_file.protected_area_password,
template_file.encryption_password,
template_file.bleed_supplied,
template_file.copies,
template_file.instances,
template_file.instance_page_numbers,
template_file.cycle_instances_on_copies,
self.return_format)
ob = self._parse(xml)
template_file.guid = ob.template_guid
return template_file
def add_mailing_list_file(self, mailing_guid, mailing_list_file):
xml = self.service.AddMailingListFile(self.username, self.password, mailing_guid,
mailing_list_file.file_name,
base64.b64encode(mailing_list_file.file_data),
mailing_list_file.data_format,
mailing_list_file.headers,
mailing_list_file.sheet_name,
mailing_list_file.mapping_delimiter,
mailing_list_file.mapping_fixed_width_chars,
mailing_list_file.mapping_name,
self.return_format)
ob = self._parse(xml)
mailing_list_file.guid = ob.mailing_list_guid
return mailing_list_file
def process_mailing(self, mailing_guid, submit=False, partial_process=True, **args):
args['ReturnFormat'] = self.return_format
for k, v in PROCESS_MAILING.items():
if not args.has_key(k):
args[k] = v
xml = self.service.ProcessMailing(self.username, self.password,
mailing_guid, self.source,
submit, partial_process,
args['max_price_ex_vat'],
args['po_reference'],
args['payment_method'],
args['skip_preview_image_generation'],
args['email_success_list'],
args['email_error_list'],
args['http_post_on_success'],
args['http_post_on_error'],
self.return_format)
return self._parse(xml).success
def get_process_status(self, mailing_guid):
xml = self.service.GetStatus(self.username, self.password,
mailing_guid, self.return_format)
return self._parse(xml).status
def get_topup_balance(self):
""" Returns a float representing current balance for a topup account """
xml = self.service.GetBalance(self.username, self.password,
'Topup', self.return_format)
return float(self._parse(xml).current_balance)
def get_invoice_balance(self):
""" Returns a float representing current balance for a topup account """
xml = self.service.GetBalance(self.username, self.password,
'Invoice', self.return_format)
return float(self._parse(xml).current_balance)
def delete_mail_pack(self, mailing_guid):
xml = self.service.DeleteMailPack(self.username, self.password,
mailing_guid, self.return_format)
return self._parse(xml).success
def delete_mailing_list(self, mailing_guid):
xml = self.service.DeleteMailingList(self.username, self.password,
mailing_guid, self.return_format)
return self._parse(xml).success
def add_self(self, mailing_guid):
xml = self.service.DeleteMailingList(self.username, self.password,
mailing_guid, self.return_format)
return self._parse(xml).success
def auto_correct_addresses(self, mailing_guid, correction_method='Cost'):
xml = self.service.AutoCorrectAddresses(self.username, self.password,
mailing_guid, correction_method,
self.return_format)
return self._parse(xml).success
def cancel_mailing_approval(self, mailing_guid):
xml = self.service.CancelMailingApproval(self.username, self.password,
mailing_guid, self.return_format)
return self._parse(xml).success | 46.659898 | 144 | 0.553742 |
__author__ = 'gwyn.howell@appogee.co.uk (Gwyn Howell)'
__version__ = '1.0'
__license__ = 'Apache License 2.0 (http://www.apache.org/licenses/LICENSE-2.0)'
from xml.dom.minidom import parseString
import base64
import datetime
import os.path
import re
import suds.client
from docmail import enums, util
DOCMAIL_WSDL_LIVE = 'https://www.cfhdocmail.com/LiveAPI2/DMWS.asmx?WSDL'
DOCMAIL_WSDL_TEST = 'https://www.cfhdocmail.com/TestAPI2/DMWS.asmx?WSDL'
DOCMAIL_WSDL_BETA = 'https://www.cfhdocmail.com/BetaAPI2/DMWS.asmx?WSDL'
DOCMAIL_WSDL = DOCMAIL_WSDL_TEST
RE_DATETIME = '^(0[1-9]|[12][0-9]|3[01])[/](0[1-9]|1[012])[/](19|20)\d\d[ ]([0-1][0-9]|2[0-3])[:][0-5][0-9][:][0-5][0-9]$'
PTN_DATETIME = '%d/%m/%Y %H:%M:%S'
PROCESS_MAILING = { 'po_reference': '',
'payment_method': 'Topup',
'email_success_list': '',
'email_error_list': '',
'http_post_on_success': '',
'http_post_on_error': '',
'max_price_ex_vat': 0,
'skip_preview_image_generation': False }
class DocmailException(Exception):
def __init__(self, code, error, description):
self.code = code
self.error = error
self.description = description
def __str__(self):
return '%s (%s): %s' % (self.error, self.code, self.description)
class DocmailObject(object):
def _format_data(self):
pass
class Mailing(DocmailObject):
def __init__(self, name=None):
self.product_type = enums.ProductType.A4Letter
self.name = name
self.mailing_description = None
self.is_colour = True
self.is_duplex = False
self.delivery_type = enums.DeliveryType.Standard
self.courier_delivery_to_self = False
self.despatch_asap = True
self.despatch_date = datetime.datetime.now() + datetime.timedelta(days=2)
self.address_name_prefix = None
self.address_name_format = enums.AddressNameFormat.FullName
self.discount_code = None
self.min_envelope_size = enums.MinEnvelopeSize.C5
@property
def is_mono(self):
raise Exception('property not supported - use is_colour instead')
def _format_data(self):
if self.mailing_list_guid == '00000000-0000-0000-0000-000000000000':
self.mailing_list_guid = None
if self.despatch_date == 'ASAP':
self.despatch_asap = True
self.despatch_date = None
else:
self.despatch_asap = False
class TemplateFile(object):
def __init__(self, file, data=None):
if data:
self.file_name = file
self.file_data = data
else:
if isinstance(file, basestring):
if os.path.isfile(file):
file = open(file, 'r')
self.file_name = file.name
self.file_data = file.read()
ext = self.file_name[self.file_name.rfind('.')+1:].lower()
if not ext in ('doc', 'docx', 'rtf'):
raise ValueError('file_name incorrect format, must be .doc, .docx or .rtf')
self.guid = None
self.template_name = ''
self.document_type = enums.DocumentType.A4Letter
self.address_font_code = enums.AddressFont.Arial10
self.template_type = 'Preformatted Stream' if ext == 'pdf' else 'Document'
self.background_name = ''
self.protected_area_password = ''
self.encryption_password = ''
self.instance_page_numbers = ''
self.addressed_document = True
self.can_begin_on_back = False
self.next_template_can_begin_on_back = False
self.bleed_supplied = True
self.copies = 1
self.instances = 1
self.cycle_instances_on_copies = False
class MailingListFile(object):
def __init__(self, file, data=None, sheet_name=None, data_format=None, mapping_delimiter=None, mapping_fixed_width_chars=None):
if data:
self.file_name = file
self.file_data = data
else:
if isinstance(file, basestring):
if os.path.isfile(file):
file = open(file, 'r')
self.file_name = file.name
self.file_data = file.read()
ext = self.file_name[self.file_name.rfind('.')+1:].lower()
if ext == 'csv':
data_format = 'CSV'
elif ext in ('xls', 'xlsx'):
data_format = 'Excel'
if not sheet_name:
raise ValueError('sheet_name argument must be provided for .%s files' % ext)
elif ext == 'txt':
if not data_format in ('Tab separated', 'Delimited', 'Fixed width'):
raise ValueError('data_format not supplied or not valid - must be "Tab separated", "Delimited", or "Fixed width" for txt files')
else:
raise ValueError('Unsupported file - %s. Please provide a .txt, .csv, .xls or .xlsx file' % ext)
if data_format == 'Delimited' and mapping_delimiter:
raise ValueError('mapping_delimiter arg must be provided if data_format is Delimited')
if data_format == 'Fixed width' and mapping_fixed_width_chars:
raise ValueError('mapping_fixed_width_chars arg must be provided if data_format is Fixed Width')
self.headers = True
self.mapping_name = ''
self.data_format = data_format or ''
self.mapping_delimiter = mapping_delimiter or ' '
self.sheet_name = sheet_name or ''
self.mapping_fixed_width_chars = mapping_fixed_width_chars or ''
class Client(suds.client.Client):
def __init__(self, username, password, source='', wsdl_url=None, **kwargs):
if not wsdl_url:
wsdl_url = DOCMAIL_WSDL
self.source = source
self.username = username
self.password = password
self.return_format = 'XML'
self.failure_return_format = 'XML'
suds.client.Client.__init__(self, wsdl_url, **kwargs)
def _parse(self, xml, return_class=DocmailObject):
ob = return_class()
dom = parseString(xml)
for node in dom.firstChild.childNodes:
key = node.childNodes[0].firstChild.wholeText
value = node.childNodes[1].firstChild.wholeText
key = self._format_key(key)
value = self._format_value(value)
if key == 'error_code':
raise DocmailException(value,
self._format_value(node.childNodes[3].firstChild.wholeText),
self._format_value(node.childNodes[5].firstChild.wholeText))
setattr(ob, key, value)
if hasattr(ob, '_format_data'):
ob._format_data()
return ob
def _format_key(self, key):
if not ' ' in key:
key = '_'.join(util.split_caps(key))
key = key.strip('\/:*?"<>|').replace(' ', '_').lower()
return key
def _format_value(self, value):
if re.match(RE_DATETIME, value):
return datetime.datetime.strptime(value, PTN_DATETIME)
if value.lower() == 'yes':
return True
if value.lower() == 'no':
return False
return value
def get_mailing(self, guid):
xml = self.service.GetMailingDetails(self.username, self.password, guid, self.return_format)
mailing = self._parse(xml, Mailing)
mailing.guid = guid
return mailing
def create_mailing(self, mailing):
xml = self.service.CreateMailing(self.username, self.password, self.source,
mailing.product_type,
mailing.name,
mailing.mailing_description,
not mailing.is_colour,
mailing.is_duplex,
mailing.delivery_type,
mailing.courier_delivery_to_self,
mailing.despatch_asap,
mailing.despatch_date,
mailing.address_name_prefix,
mailing.address_name_format,
mailing.discount_code,
mailing.min_envelope_size,
self.return_format)
ob = self._parse(xml)
mailing.guid = ob.mailing_guid
return mailing
def update_mailing(self, mailing):
xml = self.service.UpdateMailingOptions(self.username, self.password,
mailing.guid,
mailing.name,
mailing.mailing_description,
not mailing.is_colour,
mailing.is_duplex,
mailing.delivery_type,
mailing.despatch_asap,
mailing.despatch_date,
mailing.address_name_prefix,
mailing.address_name_format,
mailing.discount_code,
mailing.min_envelope_size,
self.return_format)
ob = self._parse(xml)
return ob.success
def add_template_file(self, mailing_guid, template_file):
xml = self.service.AddTemplateFile(self.username, self.password, mailing_guid,
template_file.template_name,
template_file.file_name,
base64.b64encode(template_file.file_data),
template_file.document_type,
template_file.addressed_document,
template_file.address_font_code,
template_file.template_type,
template_file.background_name,
template_file.can_begin_on_back,
template_file.next_template_can_begin_on_back,
template_file.protected_area_password,
template_file.encryption_password,
template_file.bleed_supplied,
template_file.copies,
template_file.instances,
template_file.instance_page_numbers,
template_file.cycle_instances_on_copies,
self.return_format)
ob = self._parse(xml)
template_file.guid = ob.template_guid
return template_file
def add_mailing_list_file(self, mailing_guid, mailing_list_file):
xml = self.service.AddMailingListFile(self.username, self.password, mailing_guid,
mailing_list_file.file_name,
base64.b64encode(mailing_list_file.file_data),
mailing_list_file.data_format,
mailing_list_file.headers,
mailing_list_file.sheet_name,
mailing_list_file.mapping_delimiter,
mailing_list_file.mapping_fixed_width_chars,
mailing_list_file.mapping_name,
self.return_format)
ob = self._parse(xml)
mailing_list_file.guid = ob.mailing_list_guid
return mailing_list_file
def process_mailing(self, mailing_guid, submit=False, partial_process=True, **args):
args['ReturnFormat'] = self.return_format
for k, v in PROCESS_MAILING.items():
if not args.has_key(k):
args[k] = v
xml = self.service.ProcessMailing(self.username, self.password,
mailing_guid, self.source,
submit, partial_process,
args['max_price_ex_vat'],
args['po_reference'],
args['payment_method'],
args['skip_preview_image_generation'],
args['email_success_list'],
args['email_error_list'],
args['http_post_on_success'],
args['http_post_on_error'],
self.return_format)
return self._parse(xml).success
def get_process_status(self, mailing_guid):
xml = self.service.GetStatus(self.username, self.password,
mailing_guid, self.return_format)
return self._parse(xml).status
def get_topup_balance(self):
xml = self.service.GetBalance(self.username, self.password,
'Topup', self.return_format)
return float(self._parse(xml).current_balance)
def get_invoice_balance(self):
xml = self.service.GetBalance(self.username, self.password,
'Invoice', self.return_format)
return float(self._parse(xml).current_balance)
def delete_mail_pack(self, mailing_guid):
xml = self.service.DeleteMailPack(self.username, self.password,
mailing_guid, self.return_format)
return self._parse(xml).success
def delete_mailing_list(self, mailing_guid):
xml = self.service.DeleteMailingList(self.username, self.password,
mailing_guid, self.return_format)
return self._parse(xml).success
def add_self(self, mailing_guid):
xml = self.service.DeleteMailingList(self.username, self.password,
mailing_guid, self.return_format)
return self._parse(xml).success
def auto_correct_addresses(self, mailing_guid, correction_method='Cost'):
xml = self.service.AutoCorrectAddresses(self.username, self.password,
mailing_guid, correction_method,
self.return_format)
return self._parse(xml).success
def cancel_mailing_approval(self, mailing_guid):
xml = self.service.CancelMailingApproval(self.username, self.password,
mailing_guid, self.return_format)
return self._parse(xml).success | true | true |
f7f4894932b6cea8f41ce5dc47d58d60b353f5b0 | 552 | py | Python | oldp/utils/limited_paginator.py | docsuleman/oldp | 8dcaa8e6e435794c872346b5014945ace885adb4 | [
"MIT"
] | 66 | 2018-05-07T12:34:39.000Z | 2022-02-23T20:14:24.000Z | oldp/utils/limited_paginator.py | Justice-PLP-DHV/oldp | eadf235bb0925453d9a5b81963a0ce53afeb17fd | [
"MIT"
] | 68 | 2018-06-11T16:13:17.000Z | 2022-02-10T08:03:26.000Z | oldp/utils/limited_paginator.py | Justice-PLP-DHV/oldp | eadf235bb0925453d9a5b81963a0ce53afeb17fd | [
"MIT"
] | 15 | 2018-06-23T19:41:13.000Z | 2021-08-18T08:21:49.000Z | from math import ceil
from django.conf import settings
from django.core.paginator import Paginator
from django.utils.functional import cached_property
class LimitedPaginator(Paginator):
"""Limits the number of pages to avoid slow DB queries"""
@cached_property
def num_pages(self):
"""Return the total number of pages."""
if self.count == 0 and not self.allow_empty_first_page:
return 0
hits = max(1, self.count - self.orphans)
return min(settings.PAGINATE_UNTIL, ceil(hits / self.per_page))
| 30.666667 | 71 | 0.706522 | from math import ceil
from django.conf import settings
from django.core.paginator import Paginator
from django.utils.functional import cached_property
class LimitedPaginator(Paginator):
@cached_property
def num_pages(self):
if self.count == 0 and not self.allow_empty_first_page:
return 0
hits = max(1, self.count - self.orphans)
return min(settings.PAGINATE_UNTIL, ceil(hits / self.per_page))
| true | true |
f7f4895cf92cfb30274d315d1364a4edda968c07 | 65,867 | py | Python | geosoft/gxpy/tests/test_group.py | fearaschiarrai/gxpy | 4c5e7594b24e530a8cd94df1eef562c5c6ce3e92 | [
"BSD-2-Clause"
] | 25 | 2017-07-14T06:39:37.000Z | 2022-03-09T21:39:51.000Z | geosoft/gxpy/tests/test_group.py | fearaschiarrai/gxpy | 4c5e7594b24e530a8cd94df1eef562c5c6ce3e92 | [
"BSD-2-Clause"
] | 100 | 2016-12-13T17:30:41.000Z | 2021-08-01T20:21:13.000Z | geosoft/gxpy/tests/test_group.py | fearaschiarrai/gxpy | 4c5e7594b24e530a8cd94df1eef562c5c6ce3e92 | [
"BSD-2-Clause"
] | 28 | 2016-12-12T17:34:40.000Z | 2022-03-16T15:39:39.000Z | import unittest
import os
import numpy as np
import geosoft
import geosoft.gxapi as gxapi
import geosoft.gxpy.system as gsys
import geosoft.gxpy.map as gxmap
import geosoft.gxpy.geometry as gxgm
import geosoft.gxpy.grid as gxgrd
import geosoft.gxpy.agg as gxagg
import geosoft.gxpy.system as gxsys
import geosoft.gxpy.view as gxv
import geosoft.gxpy.group as gxg
import geosoft.gxpy.vv as gxvv
import geosoft.gxpy.viewer as gxviewer
from base import GXPYTest
def rect_line(g, size=100):
g.rectangle(gxgm.Point2((0, 0, size, size), coordinate_system="cm"), pen=g.new_pen(line_thick=1))
p1 = gxgm.Point((0.1, 0.1)) * size
p2 = gxgm.Point((0.9, 0.9)) * size
poff = gxgm.Point((0.15, 0.05)) * size
g.rectangle((p1, p2), pen=g.new_pen(fill_color=gxg.C_LT_GREEN))
p12 = gxgm.Point2((p1 + poff, p2 - poff))
g.line((p12.p0.x, p12.p0.y, p12.p1.x, p12.p1.y), pen=g.new_pen(line_style=2, line_pitch=2.0))
def pline():
return gxgm.PPoint([[10, 5],
[20, 20],
[30, 15],
[50, 50],
[60, 70],
[75, 35],
[90, 65],
[20, 50],
[35, 18.5]])
def draw_stuff(g, size=1.0):
plinelist = [[110, 5],
[120, 20],
[130, 15],
[150, 50],
[160, 70],
[175, 35],
[190, 65],
[220, 50],
[235, 18.5]]
pp = gxgm.PPoint.from_list(plinelist) * size
g.pen = g.new_pen(line_style=2, line_pitch=2.0)
g.polyline(pp)
g.pen = g.new_pen(line_style=4, line_pitch=2.0, line_smooth=gxg.SMOOTH_AKIMA)
g.polyline(pp)
ppp = np.array(plinelist)
pp = gxgm.PPoint(ppp[3:, :]) * size
g.pen = g.new_pen(line_style=5, line_pitch=5.0,
line_smooth=gxg.SMOOTH_CUBIC,
line_color=gxg.C_RED,
line_thick=0.25,
fill_color=gxg.C_LT_BLUE)
g.polygon(pp)
g.pen = g.new_pen(fill_color=gxg.C_LT_GREEN)
p1 = gxgm.Point((100, 0, 0)) * size
p2 = gxgm.Point((100, 0, 0)) * size
pp = (pp - p1) / 2 + p2
g.polygon(pp)
pp += gxgm.Point((0, 25, 0)) * size
g.pen = g.new_pen(fill_color=gxg.C_LT_RED)
g.polygon(pp)
class Test(GXPYTest):
def test_version(self):
self.start()
self.assertEqual(gxmap.__version__, geosoft.__version__)
def test_create(self):
self.start()
def test_lock(self):
self.start()
with gxmap.Map.new(data_area=(0, 0, 50, 40), coordinate_system='cm') as map:
with gxv.View.open(map, 'data') as v:
self.assertFalse(bool(v.lock))
with gxg.Draw(v, 'rectangle') as g:
self.assertEqual(str(g), 'rectangle/data')
self.assertTrue(g.drawing_plane is None)
self.assertEqual(g.unit_of_measure, '')
self.assertTrue(bool(v.lock))
self.assertEqual(v.lock, 'rectangle')
self.assertRaises(gxg.GroupException, gxg.Group, v)
self.assertFalse(bool(v.lock))
def test_metadata(self):
self.start()
with gxmap.Map.new(data_area=(0, 0, 50, 40), coordinate_system='cm') as map:
with gxv.View.open(map, 'data') as v:
with gxg.Draw(v, 'rectangle') as g:
self.assertTrue(g.guid)
meta = g.gx_metadata
meta.node_token('maki/data/more')
meta.set_attribute('/maki/data/more/scale', 45)
meta.set_attribute('/maki/data/more/unit_of_measure', 'cm')
g.gx_metadata = meta
g.unit_of_measure = 'billy-bob'
with gxg.Draw(v, 'rectangle') as g:
meta = g.gx_metadata
self.assertTrue(meta.has_node('/maki/data'))
self.assertTrue(meta.has_node('/maki/data/more'))
self.assertEqual(meta.get_attribute('/maki/data/more/scale'), 45)
self.assertEqual(meta.get_attribute('/maki/data/more/unit_of_measure'), 'cm')
self.assertEqual(g.unit_of_measure, 'billy-bob')
def test_cs(self):
self.start()
with gxmap.Map.new(data_area=(0, 0, 50, 40), coordinate_system='cm') as map:
with gxv.View.open(map, 'data') as v:
with gxg.Draw(v, 'rectangle') as g:
self.assertEqual(g.drawing_coordinate_system.unit_of_measure, 'cm')
g.drawing_coordinate_system = "NAD83 / UTM zone 15N"
self.assertEqual(str(g.drawing_coordinate_system), "NAD83 / UTM zone 15N")
g.drawing_coordinate_system = None
self.assertEqual(g.drawing_coordinate_system.unit_of_measure, 'cm')
def test_extent(self):
self.start()
map_file = None
try:
with gxmap.Map.new(data_area=(3, 2, 50, 40), coordinate_system='cm', overwrite=True) as map:
map_file = map.file_name
with gxv.View.open(map, 'data') as v:
self.assertEqual(v.extent_map_cm(), (2.0, 6.0, 41.6, 38.4))
with gxg.Draw(v, 'rectangle') as g:
g.rectangle((3, 2, 28, 20),
pen=g.new_pen(line_thick=0.25, line_color='R', line_style=gxg.LINE_STYLE_LONG,
line_pitch=5))
self.assertEqual(g.extent, (3., 2., 28., 20.))
self.assertEqual(g.extent_map_cm(), (3.0, 7.0, 23.0, 21.4))
finally:
gxmap.delete_files(map_file)
@unittest.skip('skipping to let fixture pass')
def test_force_assert(self):
self.start()
with gxmap.Map.figure((0, 0, 1000, 1000)) as gmap:
with gxv.View.open(gmap, "data") as v:
gxapi.GXMVU.arrow(v.gxview, 500, 500, 450, 450, 0.5, 30, 1) # asserts
with gxg.Draw(v, "arrow") as g:
gxapi.GXMVU.arrow(g.view.gxview, 500, 500, 450, 450, 0.5, 30, 1)
def test_point(self):
self.start()
p1 = gxgm.Point((10, 20))
p2 = gxgm.Point((20, 20))
p3 = gxgm.Point((30, 20))
rect = gxgm.Point2((p1 - (15, 15), p3 + (15, 15)))
with gxmap.Map.new(data_area=rect.extent_xy) as gmap:
map_file = gmap.file_name
with gxv.View.new(gmap, "data") as v:
with gxg.Draw(v, 'test_point') as g:
g.pen = gxg.Pen(line_thick=1)
g.rectangle(rect)
g.pen = gxg.Pen(line_thick=2, line_color='R')
g.line((p1, p1)) # invisible - zero-length, but we should see it
g.pen = gxg.Pen(line_thick=2, line_color='G')
g.line((p2, p2 + (0.04, 0))) # invisible - bug
g.pen = gxg.Pen(line_thick=2, line_color='B')
g.line((p3, p3 + (0.05, 0))) # visible - correct!
self.crc_map(map_file, pix_width=800)
def test_points(self):
self.start()
plinelist = [[110, 5],
[120, 20],
[130, 15],
[150, 50],
[160, 70],
[175, 35],
[190, 65],
[220, 50],
[235, 18.5]]
pp = gxgm.PPoint.from_list(plinelist)
with gxmap.Map.new() as gmap:
map_file = gmap.file_name
with gxv.View.new(gmap, "points", area=(100, 0, 260, 100)) as v:
with gxg.Draw(v, 'test_group') as g:
g.rectangle(pp.extent, pen=gxg.Pen(line_thick=1))
g.pen = gxg.Pen(line_thick=2, line_color='B')
for p in pp:
g.point(p)
pp += (15, 15)
g.pen = gxg.Pen(line_thick=1.5, line_color='G')
g.polypoint(pp)
pp -= (0, 5)
g.pen = gxg.Pen(line_thick=1, line_color='R')
g.polypoint((gxvv.GXvv(pp.x), gxvv.GXvv(pp.y)))
self.crc_map(map_file, pix_width=800)
def test_rectangle(self):
self.start()
with gxmap.Map.new(data_area=(0, 0, 50, 40), coordinate_system='cm', overwrite=True) as map:
map_file = map.file_name
with gxv.View.open(map, 'data') as v:
with gxg.Draw(v, 'rectangle') as g:
g.rectangle(v.extent_clip, pen=g.new_pen(line_thick=0.5, line_color='B'))
g.rectangle((2, 2, 48, 38),
pen=g.new_pen(line_thick=0.25, line_color='R', line_style=gxg.LINE_STYLE_LONG,
line_pitch=5))
self.crc_map(map_file)
def test_smooth_line(self):
self.start()
pp = pline()
p1, p2 = pp.extent
area = (p1.x, p1.y, p2.x, p2.y)
with gxmap.Map.new() as map:
map_file = map.file_name
with gxv.View.new(map, 'smooth') as v:
v.locate(coordinate_system='mm', area=area, map_location=(1,1), scale=0.4)
with gxg.Draw(v) as g:
g.rectangle(v.extent_clip)
g.polyline(pp, pen=g.new_pen(line_smooth=gxg.SMOOTH_AKIMA, line_color='r', line_thick=1))
g.polyline(pp, pen=g.new_pen(line_smooth=gxg.SMOOTH_CUBIC, line_color='b', line_thick=2))
g.polyline(pp)
map.delete_view('data')
map.delete_view('base')
self.crc_map(map_file)
def test_view_groups_1(self):
self.start()
testmap = os.path.join(self.gx.temp_folder(), "test")
with gxmap.Map.new(testmap, overwrite=True) as gmap:
map_file = gmap.file_name
with gxv.View.new(gmap, "rectangle_test", area=(0, 0, 250, 125)) as v:
with gxg.Draw(v, 'test_group') as g:
rect_line(g)
g.graticule(25, 20, style=gxg.GRATICULE_LINE)
g.pen = g.new_pen(line_thick=0.1)
g.rectangle(((0, 0), (250, 125)), pen=g.new_pen(line_thick=0.1, line_color='R'))
with gxv.View.new(gmap, "poly") as v:
with gxg.Draw(v) as g:
draw_stuff(g)
try:
self.crc_map(map_file)
finally:
gxmap.delete_files(map_file)
def test_view_groups_2(self):
self.start()
testmap = os.path.join(self.gx.temp_folder(), "test")
with gxmap.Map.new(testmap, overwrite=True) as gmap:
map_file = gmap.file_name
with gxv.View.new(gmap, "rectangle_test", area=(0, 0, 250, 125)) as v:
with gxg.Draw(v, 'line') as g:
rect_line(g)
with gxg.Draw(v, 'graticule') as g:
g.graticule(25, 20, style=gxg.GRATICULE_LINE)
g.pen = g.new_pen(line_thick=0.1)
with gxg.Draw(v, 'test_rectangles') as g:
g.rectangle(((0, 0), (250, 125)), pen=g.new_pen(line_thick=0.1, line_color='R'))
g.rectangle(((10, 5), (240, 120)), pen=g.new_pen(line_thick=2, line_color='B'))
v.delete_group('graticule')
with gxv.View.new(gmap, "poly") as v:
with gxg.Draw(v, 'test_group') as g:
draw_stuff(g)
try:
self.crc_map(map_file)
finally:
gxmap.delete_files(map_file)
def test_reopen_map_view(self):
self.start()
testmap = os.path.join(self.gx.temp_folder(), "test")
with gxmap.Map.new(testmap, overwrite=True) as gmap:
map_file = gmap.file_name
with gxv.View.new(gmap, "test_view") as v:
with gxg.Draw(v) as g:
rect_line(g)
with gxv.View.open(gmap, "test_view") as v:
pass
gxmap.delete_files(map_file)
def test_3D(self):
self.start()
testmap = os.path.join(self.gx.temp_folder(), "test.map")
with gxmap.Map.new(testmap, overwrite=True) as gmap:
with gxv.View.open(gmap, "base") as view_base:
with gxg.Draw(view_base, 'Surround') as g:
g.rectangle(((0, 0), (280, 260)))
test3dv = os.path.join(self.gx.temp_folder(), "test.geosoft_3dv")
with gxv.View_3d.new(test3dv, overwrite=True) as view_3d:
self.assertTrue(view_3d.extent == None)
with gxg.Draw(view_3d, '2d_group') as g:
rect_line(g)
draw_stuff(g)
with gxg.Draw_3d(view_3d, '3d_group_cylinders') as g:
self.assertEqual(g.render_backfaces, False)
g.cylinder_3d(((100, 10, 10), (120, 10, 10)), 8, pen='r', close=gxg.CYLINDER_CLOSE_ALL)
self.assertEqual(view_3d.extent_xyz, (92.0, 2.0, 2.0, 128.0, 18.0, 18.0))
g.cylinder_3d(((100, 10, 70), (120, 10, 70)), 8, pen='c', close=gxg.CYLINDER_OPEN)
self.assertEqual(view_3d.extent_xyz, (92.0, 2.0, 2.0, 128.0, 18.0, 78.0))
g.cylinder_3d(((100, 10, 50), (120, 10, 50)), 8, pen='b', close=gxg.CYLINDER_CLOSE_END)
g.cylinder_3d(((100, 10, 30), (120, 10, 30)), 8, pen='g', close=gxg.CYLINDER_CLOSE_START)
self.assertEqual(view_3d.extent_xyz, (92.0, 2.0, 2.0, 128.0, 18.0, 78.0))
self.assertEqual(g.render_backfaces, True)
with gxg.Draw_3d(view_3d, '3d_group') as g:
g.cylinder_3d(((20, 10, 60), (80, 50, 80)), 5, pen='b')
g.cone_3d(((20, 10, 80), (80, 50, 60)), 8, pen='g')
g.cone_3d(((20, 50, 65), (20, 50, 40)), 30, pen='r')
g.sphere((20, 50, 80), 10, pen='c')
self.assertEqual(g.render_backfaces, False)
g.cylinder_3d(((80, 10, 0), (80, 10, 80)), 5, pen='y', close=gxg.CYLINDER_OPEN)
self.assertEqual(g.render_backfaces, True)
g.box_3d(((20, 10, 30), (80, 50, 50)), pen=g.new_pen(line_color='R255G100B50'))
g.box_3d(((80, 50, 50), (90,60, 65)), wireframe=True,
pen=g.new_pen(line_color='R25G255B50', line_thick=2))
with gxmap.Map.open(testmap) as gmap:
gmap.create_linked_3d_view(view_3d, area_on_map=(10, 10, 270, 250))
# test re-open a 3D view, with explicit close
view_3d = gxv.View_3d.open(test3dv)
group_list = view_3d.group_list
self.assertEqual(len(group_list), 3)
view_3d.close()
self.crc_map(test3dv, alt_crc_name=gxsys.func_name() + '_3dv')
self.crc_map(testmap, alt_crc_name=gxsys.func_name() + '_map')
def test_basic_grid_1(self):
self.start()
# test grid file
folder, files = gsys.unzip(os.path.join(os.path.dirname(self._test_case_py), 'testgrids.zip'),
folder=self.gx.temp_folder())
grid_file = os.path.join(folder, 'test_agg_utm.grd')
map_file = os.path.join(self.gx.temp_folder(), "test_agg_utm")
with gxgrd.Grid(grid_file) as grd:
cs = grd.coordinate_system
area = grd.extent_2d()
with gxmap.Map.new(map_file,
data_area=area, media="A4", margins=(0, 10, 0, 0),
coordinate_system=cs, overwrite=True) as gmap:
map_file = gmap.file_name
with gxv.View.open(gmap, "base") as v:
with gxg.Draw(v, 'line') as g:
g.rectangle(v.extent_clip, pen=g.new_pen(line_thick=1, line_color='K'))
with gxv.View.open(gmap, "data") as v:
with gxg.Draw(v, 'line') as g:
g.rectangle(area, pen=g.new_pen(line_thick=0.1, line_color='R'))
with gxagg.Aggregate_image.new(grid_file) as agg:
with gxg.Aggregate_group.new(v, agg) as gagg:
self.assertEqual(gagg.name, str(agg))
self.assertEqual(len(v.group_list_agg), 1)
self.crc_map(map_file)
def test_basic_grid_3D(self):
self.start()
# test grid file
folder, files = gsys.unzip(os.path.join(os.path.dirname(self._test_case_py), 'testgrids.zip'),
folder=self.gx.temp_folder())
grid_file = os.path.join(folder, 'test_agg_utm.grd')
with gxgrd.Grid(grid_file) as grd:
cs = grd.coordinate_system
area = grd.extent_2d()
with gxv.View_3d.new() as v:
v3d_file = v.file_name
with gxg.Draw(v, 'line') as g:
self.assertEqual(g.drawing_plane, 'Plane')
self.assertEqual(str(g), 'line/Plane/uuid_test_basic_grid_3D_1')
g.rectangle(area, pen=g.new_pen(line_thick=0.1, line_color='R'))
with gxagg.Aggregate_image.new(grid_file) as agg:
with gxg.Aggregate_group.new(v, agg) as gagg:
self.assertEqual(str(gagg), agg.name + '/Plane/uuid_test_basic_grid_3D_1')
self.assertEqual(len(v.group_list_agg), 1)
self.crc_map(v3d_file)
def test_basic_grid_2(self):
self.start()
# test grid file
folder, files = gsys.unzip(os.path.join(os.path.dirname(self._test_case_py), 'testgrids.zip'),
folder=self.gx.temp_folder())
grid_file = os.path.join(folder, 'test_agg_utm.grd')
map_file = os.path.join(self.gx.temp_folder(), "test_agg_utm")
with gxgrd.Grid(grid_file) as grd:
cs = grd.coordinate_system
area = grd.extent_2d()
with gxmap.Map.new(map_file,
data_area=area, media="A3", margins=(0, 0, 0, 0),
scale=(area[2] - area[0]) / 0.2,
coordinate_system=cs, overwrite=True) as gmap:
map_file = gmap.file_name
with gxv.View.open(gmap, "base") as v:
with gxg.Draw(v, 'line') as g:
g.rectangle(v.extent_clip, pen=g.new_pen(line_thick=2, line_color='K'))
with gxv.View.open(gmap, "data") as v:
with gxg.Draw(v, 'line') as g:
g.rectangle(area, pen=g.new_pen(line_thick=0.1, line_color='G'))
with gxagg.Aggregate_image.new(grid_file) as agg:
gxg.Aggregate_group.new(v, agg)
self.crc_map(map_file)
def test_zone_grid(self):
self.start()
def test_zone(zone, suffix, shade=False):
map_file = os.path.join(self.gx.temp_folder(), "test_agg_" + suffix)
with gxmap.Map.new(map_file, overwrite=True,
data_area=(ex[0], ex[1], ex[2], ex[3]),
scale=(ex[2] - ex[0]) / 0.2) as gmap:
map_file = gmap.file_name
with gxv.View.open(gmap, "data") as v:
with gxagg.Aggregate_image.new(grid_file, zone=zone, shade=shade) as agg:
gxg.Aggregate_group.new(v, agg)
gmap.delete_view('base')
self.crc_map(map_file, alt_crc_name='{}_{}'.format(gxsys.func_name(1), suffix))
# test grid file
folder, files = gsys.unzip(os.path.join(os.path.dirname(self._test_case_py), 'testgrids.zip'),
folder=self.gx.temp_folder())
with gxgrd.Grid(os.path.join(folder, 'test_agg_utm.grd')) as grd:
ex = grd.extent_2d()
grid_file = 'test_zone'
gxgrd.delete_files(grid_file)
with gxgrd.Grid.copy(grd, grid_file) as test:
grid_file = test.file_name
try:
test_zone(gxagg.ZONE_LINEAR, "linear_shade", shade=True)
test_zone(gxagg.ZONE_EQUALAREA, "eq_area")
test_zone(gxagg.ZONE_DEFAULT, "default")
test_zone(gxagg.ZONE_LAST, "last")
test_zone(gxagg.ZONE_LINEAR, "linear")
test_zone(gxagg.ZONE_NORMAL, "normal")
test_zone(gxagg.ZONE_SHADE, "shade")
test_zone(gxagg.ZONE_LOGLINEAR, "log_linear")
finally:
gxgrd.delete_files(grid_file)
def test_text_definition(self):
self.start()
t = gxg.Text_def()
self.assertEqual(t.slant, 0)
self.assertEqual(t.height, 0.25)
self.assertEqual(t.weight, gxg.FONT_WEIGHT_MEDIUM)
self.assertEqual(t.font, 'DEFAULT')
t.font = "Arial"
self.assertEqual(t.font, 'Arial')
self.assertEqual(t.mapplot_string, '0.25,,,0,"Arial(TT)"')
t.font = 'sr.gfn'
self.assertEqual(t.mapplot_string, '0.25,,,0,"sr"')
t.font = ''
self.assertEqual(t.mapplot_string, '0.25,,,0,"DEFAULT"')
t.italics = True
self.assertTrue(t.italics)
self.assertEqual(t.slant, 15)
t.italics = 0
self.assertFalse(t.italics)
self.assertEqual(t.slant, 0)
t.weight = gxg.FONT_WEIGHT_ULTRALIGHT
self.assertAlmostEqual(t.line_thick, 0.005208333333333333)
t.weight = gxg.FONT_WEIGHT_BOLD
self.assertAlmostEqual(t.line_thick, 0.020833333333333331)
thick = t.line_thick
t.weight = gxg.FONT_WEIGHT_XXBOLD
self.assertAlmostEqual(t.line_thick, 0.0625)
t.line_thick = thick
self.assertEqual(t.weight, gxg.FONT_WEIGHT_BOLD)
t.height = 10.
self.assertEqual(t.weight, gxg.FONT_WEIGHT_BOLD)
self.assertAlmostEqual(t.line_thick, 0.8333333333333333)
t.line_thick = t.line_thick
self.assertEqual(t.weight, gxg.FONT_WEIGHT_BOLD)
def test_colours(self):
self.start()
c = gxg.Color((150, 200, 500))
self.assertEqual(c.rgb, (150, 200, 255))
c = gxg.Color((150, 200, 500), model=gxg.CMODEL_CMY)
self.assertEqual(c.cmy, (150, 200, 255))
c = gxg.Color('r255g128b56')
self.assertEqual(c.rgb, (255, 128, 56))
self.assertEqual(c.cmy, (0, 127, 199))
c.rgb = (64, 32, 16)
self.assertEqual(c.rgb, (64, 32, 16))
c.cmy = (100, 200, 300)
self.assertEqual(c.cmy, (100, 200, 255))
c = gxg.Color((0, 127, 64), gxg.CMODEL_HSV)
self.assertEqual(c.rgb, (191, 96, 96))
c = gxg.Color((0, 127, 64), gxg.CMODEL_RGB)
self.assertEqual(c.rgb, (0, 127, 64))
c = gxg.Color(gxg.C_GREEN)
self.assertEqual(c.rgb, (0, 255, 0))
c2 = gxg.Color(c)
self.assertEqual(c2.rgb, (0, 255, 0))
c = gxg.Color(gxg.C_TRANSPARENT)
self.assertEqual(c.rgb, None)
self.assertEqual(c.cmy, None)
self.assertTrue(c == gxg.Color(gxg.C_TRANSPARENT))
def test_pen(self):
self.start()
p = gxg.Pen()
self.assertEqual(p.line_color.int_value, gxg.C_BLACK)
self.assertEqual(p.fill_color.int_value, gxg.C_TRANSPARENT)
self.assertEqual(p.line_style, gxg.LINE_STYLE_SOLID)
p.line_color = (255, 127, 64)
self.assertEqual(p.mapplot_string, 'r255g127b64t10')
p2 = gxg.Pen(line_color = (255, 127, 64))
self.assertTrue(p == p2)
p2.line_color = 'K'
self.assertFalse(p == p2)
p = gxg.Pen.from_mapplot_string('r20b100k16R64K16')
ms = p.mapplot_string
self.assertEqual(ms, 'r4g0b84R48G0B0t1')
p = gxg.Pen.from_mapplot_string(ms)
self.assertEqual(p.mapplot_string, ms)
p = gxg.Pen.from_mapplot_string('c64K64')
self.assertEqual(p.line_color.rgb, (191, 255, 255))
self.assertEqual(p.fill_color.rgb, (191, 191, 191))
p = gxg.Pen(line_color='K')
self.assertEqual(p.line_color.int_value, gxg.C_BLACK)
self.assertTrue(p.line_color == gxg.Color(gxg.C_BLACK))
p = gxg.Pen(line_color=gxg.C_WHITE)
self.assertEqual(p.line_color.int_value, gxg.C_WHITE)
self.assertTrue(p.line_color == gxg.Color(gxg.C_WHITE))
p = gxg.Pen.from_mapplot_string('r20b100k16R64K16')
p = gxg.Pen(default=p, line_thick=0.5, fill_color='K')
ms = p.mapplot_string
self.assertEqual(ms, 'r4g0b84R0G0B0t500')
p = gxg.Pen.from_mapplot_string(ms)
self.assertEqual(p.mapplot_string, ms)
self.assertRaises(gxg.GroupException, gxg.Pen, bad=1)
def test_scaled(self):
self.start()
p = gxg.Pen(factor=10)
self.assertEqual(p.line_thick, 0.1)
self.assertEqual(p.line_pitch, 5.0)
self.assertEqual(p.pat_thick, 0.1)
self.assertEqual(p.pat_size, 10.0)
p = gxg.Pen(default=p, factor=5)
self.assertEqual(p.line_thick, 0.5)
self.assertEqual(p.line_pitch, 25.0)
self.assertEqual(p.pat_thick, 0.5)
self.assertEqual(p.pat_size, 50.0)
t = gxg.Text_def(factor=0.2)
self.assertEqual(t.height, 0.05)
def test_text(self):
self.start()
with gxmap.Map.new(data_area=(400000, 5000000, 500000, 5150000),
coordinate_system='WGS 84 / UTM zone 15N [geoid]') as map:
map_file = map.file_name
with gxv.View.open(map, 'base') as v:
with gxg.Draw(v) as g:
g.rectangle(g.extent)
g.text('Text on base view')
g.text('Bigger, blue, higher',
(v.units_per_map_cm, v.units_per_map_cm),
text_def=gxg.Text_def(height=20, color='B', font='Times New Roman'))
g.text('Bigger, blue, angled, italics',
(10, 25),
angle=60,
text_def=gxg.Text_def(height=20, color='B', font='Calibri', italics=True))
g.text_def = gxg.Text_def(height=20, color='B', font='Calibri', italics=True)
tex = g.text_extent('Bigger, blue, angled, italics')
self.assertAlmostEqual(209.9629, tex.dimension_xy[0], 3)
self.assertAlmostEqual(334.6408, tex.dimension_xy[1], 3)
tex = g.text_extent('Bigger, blue, angled, italics',
gxg.Text_def(height=10, font='Calibri', italics=True))
self.assertAlmostEqual(104.98147, tex.dimension_xy[0], 3)
self.assertAlmostEqual(167.32042, tex.dimension_xy[1], 3)
self.crc_map(map_file)
def test_text_1(self):
self.start()
with gxmap.Map.new(data_area=(400000, 5000000, 500000, 5050000),
coordinate_system='WGS 84 / UTM zone 15N [geoid]') as map:
map_file = map.file_name
with gxv.View.open(map, '*data') as v:
with gxg.Draw(v) as g:
g.rectangle(g.extent)
ex = g.extent
width = ex[2] - ex[0]
height = ex[3] - ex[1]
cxy = (ex[0] + width / 2, ex[1] + height / 2)
td = gxg.Text_def(height=width / 20, color='K128', font='sr.gfn', weight=gxg.FONT_WEIGHT_XBOLD)
self.assertTrue(td == gxg.Text_def(height=width / 20, color='K128', font='sr.gfn', weight=gxg.FONT_WEIGHT_XBOLD))
self.assertEqual(td.mapplot_string, '5227.3,,,0,"sr"')
g.rectangle(ex)
g.line((ex[0], cxy[1], ex[2], cxy[1]))
g.line((cxy[0], ex[1], cxy[0], ex[3]))
g.text('Centered',
cxy,
text_def=td,
reference=gxg.REF_CENTER)
g.text('Bottom',
(cxy[0], ex[1]),
text_def=td,
reference=gxg.REF_BOTTOM_CENTER)
g.text('Top',
(cxy[0], ex[3]),
text_def=td,
reference=gxg.REF_TOP_CENTER)
g.text('Left',
(ex[0], cxy[1]),
text_def=td,
angle=90,
reference=gxg.REF_TOP_CENTER)
g.text('Right',
(ex[2], cxy[1]),
text_def=td,
angle=-90,
reference=gxg.REF_TOP_CENTER)
self.crc_map(map_file)
def test_text_multiline(self):
self.start()
with gxmap.Map.new(data_area=(400000, 5000000, 500000, 5050000),
coordinate_system='WGS 84 / UTM zone 15N [geoid]') as map:
map_file = map.file_name
with gxv.View.open(map, '*data') as v:
with gxg.Draw(v) as g:
g.rectangle(g.extent)
ex = v.extent_clip
width = ex[2] - ex[0]
height = ex[3] - ex[1]
cxy = (ex[0] + width / 2, ex[1] + height / 2)
td = gxg.Text_def(height=width / 20, color='K128', font='sr.gfn', weight=gxg.FONT_WEIGHT_XBOLD)
g.rectangle(ex)
g.line((ex[0], cxy[1], ex[2], cxy[1]))
g.line((cxy[0], ex[1], cxy[0], ex[3]))
g.text('Centered\nline2\nand another',
cxy,
text_def=td,
reference=gxg.REF_CENTER)
self.crc_map(map_file)
def test_locate_group(self):
self.start()
with gxmap.Map.new(data_area=(400000, 5000000, 500000, 5050000),
coordinate_system='WGS 84 / UTM zone 15N [geoid]') as map:
map_file = map.file_name
with gxv.View.open(map, '*data') as v:
with gxg.Draw(v) as g:
g.rectangle(v.extent_clip)
rect = gxgm.Point2((v.extent_clip[0], v.extent_clip[1],
(v.extent_clip[2] + v.extent_clip[0]) * 0.5,
(v.extent_clip[3] + v.extent_clip[1]) * 0.5))
with gxg.Draw(v, 'a') as g:
g.rectangle(rect)
with gxg.Draw(v, 'b') as g:
self.assertEqual(g.number, 2)
g.rectangle(rect, pen="b")
g.locate((450000, 5025000),
reference=gxg.REF_TOP_CENTER)
self.crc_map(map_file)
def test_color_bar(self):
self.start()
# test grid file
folder, files = gsys.unzip(os.path.join(os.path.dirname(self._test_case_py), 'testgrids.zip'),
folder=self.gx.temp_folder())
grid_file = os.path.join(folder, 'test_agg_utm.grd')
map_file = os.path.join(self.gx.temp_folder(), "test_agg_utm")
with gxgrd.Grid.open(grid_file, mode=gxgrd.FILE_READWRITE) as grd:
cs = grd.coordinate_system
area = grd.extent_2d()
grd.unit_of_measure = 'maki'
with gxmap.Map.new(map_file, fixed_size=False,
data_area=area, media="A4", margins=(7, 7, 2.5, 2.5),
coordinate_system=cs, overwrite=True) as gmap:
map_file = gmap.file_name
with gxv.View.open(gmap, "base") as v:
with gxg.Draw(v, 'line') as g:
g.rectangle(v.extent_clip, pen=g.new_pen(line_thick=1, line_color='K'))
with gxv.View.open(gmap, "data") as v:
with gxg.Draw(v, 'line') as g:
g.rectangle(v.extent_clip, pen=g.new_pen(line_thick=0.1, line_color='G'))
g.rectangle(v.extent_all, pen=g.new_pen(line_thick=0.1, line_color='B'))
with gxagg.Aggregate_image.new(grid_file) as agg:
self.assertEqual(agg.layer_unit_of_measure(0), 'maki')
self.assertEqual(agg.layer_unit_of_measure(agg.layer_file_names[0]), 'maki')
self.assertEqual(agg.layer_color_map(0).unit_of_measure, 'maki')
gxg.legend_color_bar(v, 'color_legend', agg.layer_color_map())
gxg.legend_color_bar(v, 'color_legend', agg.layer_color_map(),
bar_location=gxg.COLOR_BAR_LEFT)
gxg.legend_color_bar(v, 'bottom', agg.layer_color_map(),
bar_location=gxg.COLOR_BAR_BOTTOM,
box_size=0.5,
location=(1, -0.1),
annotation_offset=0.1)
gxg.legend_color_bar(v, 'top', agg.layer_color_map(),
bar_location=gxg.COLOR_BAR_TOP,
box_size=0.5,
bar_width=0.1,
location=0.5,
interval_1 = 50,
annotation_offset=0.1)
self.crc_map(map_file)
def test_color_bar_existing_agg(self):
self.start()
# test grid file
folder, files = gsys.unzip(os.path.join(os.path.dirname(self._test_case_py), 'testgrids.zip'),
folder=self.gx.temp_folder())
grid_file = os.path.join(folder, 'test_agg_utm.grd')
map_file = os.path.join(self.gx.temp_folder(), "test_agg_utm")
with gxgrd.Grid(grid_file) as grd:
cs = grd.coordinate_system
area = grd.extent_2d()
with gxmap.Map.new(map_file, fixed_size=False,
data_area=area, media="A4", margins=(2, 10, 2, 1),
coordinate_system=cs, overwrite=True) as gmap:
map_file = gmap.file_name
with gxv.View.open(gmap, "base") as v:
with gxg.Draw(v, 'line') as g:
g.rectangle(v.extent_clip, pen=g.new_pen(line_thick=1, line_color='K'))
with gxv.View.open(gmap, "data") as v:
with gxg.Draw(v, 'line') as g:
g.rectangle(v.extent_clip, pen=g.new_pen(line_thick=0.1, line_color='G'))
g.rectangle(v.extent_all, pen=g.new_pen(line_thick=0.1, line_color='B'))
with gxagg.Aggregate_image.new(grid_file) as agg:
with gxg.Aggregate_group.new(v, agg) as g:
agg_group_name = g.name
with gxv.View.open(gmap, "data") as v:
with gxg.Aggregate_group.open(v, agg_group_name) as g:
gxg.legend_color_bar(v, 'color_legend', g.agg.layer_color_map())
self.crc_map(map_file)
def test_properties(self):
self.start()
with gxmap.Map.new() as map:
with gxv.View.open(map, "base") as v:
with gxg.Draw(v, 'edge') as g:
g.rectangle(v.extent_clip, pen=g.new_pen(line_thick=1, line_color='K'))
with gxv.View.open(map, "data") as v:
with gxg.Draw(v, 'edge') as g:
g.rectangle(v.extent_clip, pen=g.new_pen(line_thick=1, line_color='B'))
self.assertTrue(g.visible)
g.visible = False
self.assertFalse(g.visible)
def test_graticule(self):
self.start()
test_map = os.path.join(self.gx.temp_folder(), "test")
with gxmap.Map.new(test_map, overwrite=True) as map:
map_file = map.file_name
map.delete_view('data')
with gxv.View.new(map, "my_data_1", map_location=(2, 3), area=(0, 0, 1000, 1500), scale=10000) as v:
with gxg.Draw(v, 'line') as g:
g.rectangle(v.extent_clip,
pen=g.new_pen(line_thick=5, line_color='G'))
g.graticule(style=gxg.GRATICULE_LINE, pen=g.new_pen(line_thick=5))
ex1 = v.extent_group('line', unit=gxv.UNIT_MAP)
with gxv.View.new(map, "my_data_2", map_location=(15, 3), area=(0, 0, 1000, 1500), scale=10000) as v:
with gxg.Draw(v, 'line') as g:
g.rectangle(v.extent_clip,
pen=g.new_pen(line_thick=5, line_color='G'))
g.graticule(style=gxg.GRATICULE_DOT, pen=g.new_pen(line_thick=5))
ex2 = v.extent_group('line', unit=gxv.UNIT_MAP)
with gxv.View.new(map, "my_data_3", map_location=(28, 3), area=(0, 0, 1000, 1500), scale=10000) as v:
with gxg.Draw(v, 'line') as g:
g.rectangle(v.extent_clip,
pen=g.new_pen(line_thick=5, line_color='G'))
g.graticule(style=gxg.GRATICULE_CROSS, pen=g.new_pen(line_thick=5))
ex3 = v.extent_group('line', unit=gxv.UNIT_MAP)
area = (min(ex1[0], ex2[0], ex3[0])/10.0 - 2, max(ex1[1], ex2[1], ex3[1])/10.0 - 2,
max(ex1[2], ex2[2], ex3[2])/10.0 + 2, max(ex1[3], ex2[3], ex3[3])/10.0 + 2)
with gxv.View.new(map, "my_base_view", area=area, scale=100.0) as v:
with gxg.Draw(v, 'base_edge') as g:
g.rectangle(v.extent_clip, pen=g.new_pen(line_thick=0.1, line_color='R'))
map.delete_view('base')
self.crc_map(map_file)
def test_ppoint_3d(self):
self.start()
plist = [[110, 5, 0],
[120, 20, 10],
[130, 15, 50],
[150, 50, 20],
[160, 70, 0],
[175, 35, 30],
[190, 65, 80],
[220, 50, 90],
[235, 18.5, 100]]
pp = gxgm.PPoint(plist)
with gxv.View_3d.new(gxsys.func_name(), overwrite=True) as v:
file_name = v.file_name
with gxg.Draw_3d(v) as g:
g.pen = gxg.Pen(line_color='R')
g.polypoint_3d(pp)
pp += (0, 0, 20)
g.polypoint_3d(pp, style=gxg.POINT_STYLE_SPHERE, pen=gxg.Pen(line_color='G', line_thick=5))
try:
self.crc_map(file_name)
finally:
gxmap.delete_files(file_name)
def test_pp_3d(self):
self.start()
plist = [[110, 5, 0],
[120, 20, 10],
[130, 15, 50],
[150, 50, 20],
[160, 70, 0],
[175, 35, 30],
[190, 65, 80],
[220, 50, 90],
[235, 18.5, 100]]
with gxv.View_3d.new(gxsys.func_name(), overwrite=True) as v:
file_name = v.file_name
with gxg.Draw_3d(v) as g:
pp = gxgm.PPoint(plist)
g.pen = gxg.Pen(line_color='R')
g.polypoint_3d(pp)
pp += (0, 0, 10)
g.polypoint_3d(pp, style=gxg.POINT_STYLE_SPHERE, pen=gxg.Pen(line_color='G', line_thick=4))
pp += (0, 0, 10)
g.pen = gxg.Pen(line_color='R')
g.polyline_3d(pp)
pp += (0, 0, 10)
g.pen = gxg.Pen(line_color='C', line_thick=3)
g.polyline_3d(pp, style=gxg.LINE3D_STYLE_TUBE)
pp += (0, 0, 10)
g.polyline_3d(pp, style=gxg.LINE3D_STYLE_TUBE_JOINED, pen=gxg.Pen(line_color='K64', line_thick=4))
try:
self.crc_map(file_name)
finally:
gxmap.delete_files(file_name)
def test_color_map(self):
self.start()
cm = gxg.Color_map()
self.assertEqual(cm.length, 39)
self.assertFalse(cm.initialized)
cm = gxg.Color_map(16)
self.assertEqual(cm.length, 16)
self.assertEqual(cm[0][1], gxg.Color(gxg.C_BLACK))
self.assertEqual(cm[cm.length-1], (None, gxg.Color(gxg.C_BLACK)))
cm[4] = (cm[4][0], gxg.Color(gxg.C_GREEN))
self.assertEqual(cm[4][1].rgb, (0, 255, 0))
self.assertFalse(cm.initialized)
self.assertTrue(isinstance(cm.gxitr, gxapi.GXITR))
cm = gxg.Color_map('grey')
self.assertFalse(cm.initialized)
cm.set_sequential()
self.assertTrue(cm.initialized)
self.assertEqual(cm.length, 32)
self.assertEqual(cm[0][1].rgb, (31, 31, 31))
self.assertEqual(cm[cm.length-1][1].rgb, (255, 255, 255))
self.assertEqual(cm.color_of_value(0), cm[0][1])
self.assertEqual(cm.color_of_value(7.0), cm[7][1])
self.assertEqual(cm.color_of_value(7.000001), cm[8][1])
self.assertEqual(cm.brightness, 0.)
cm.brightness = 0.5
self.assertEqual(cm.brightness, 0.5)
self.assertEqual(cm[0][1].rgb, (143, 143, 143))
self.assertEqual(cm[cm.length - 1][1].rgb, (255, 255, 255))
cm.brightness = -0.25
self.assertEqual(cm.brightness, -0.25)
self.assertEqual(cm[0][1].rgb, (24, 24, 24))
self.assertEqual(cm[cm.length - 1][1].rgb, (192, 192, 192))
cm.brightness = 0
self.assertEqual(cm[0][1].rgb, (31, 31, 31))
self.assertEqual(cm[cm.length - 1][1].rgb, (255, 255, 255))
cm.set_linear(4, 45)
self.assertEqual(cm.length, 32)
self.assertEqual(cm[0][0], 4)
self.assertEqual(cm[30][0], 45)
cm.set_linear(4, 45, inner_limits=False)
self.assertEqual(cm.length, 32)
self.assertEqual(cm[0][0], 5.28125)
self.assertEqual(cm[30][0], 43.71875)
cm.set_linear(5, 50, contour_interval=5)
self.assertEqual(cm.length, 11)
cm = gxg.Color_map('grey')
cm.set_logarithmic(0.0001,1000)
self.assertEqual(cm.length, 32)
cm.set_logarithmic(0.0001,1000, contour_interval=10)
self.assertEqual(cm.length, 7)
cm = gxg.Color_map('grey')
cm.set_logarithmic(0.000023,18000, contour_interval=100)
self.assertEqual(cm.length, 5)
cm = gxg.Color_map()
cm.set_normal(25, 55000)
self.assertAlmostEqual(cm[cm.length//2][0], 55000.811582690316)
itr = cm.save_file()
cm2 = gxg.Color_map(itr)
self.assertTrue(cm == cm2)
tbl = gxg.Color_map().save_file()
self.assertEqual(os.path.splitext(tbl)[1], '.tbl')
cm = gxg.Color_map(tbl)
self.assertFalse(cm.initialized)
def test_color_symbols(self):
self.start()
data = [((0, 0), 1),
((10, 0), 2),
((0, 10), 3),
((10, 10), 4)]
data2 = [((0, 0, 45), 1, 4),
((10, 0, 8), None, None),
((0, 10, 16), 3, 75),
((None, 10, -22), 4, 7)]
cmap = gxg.Color_map()
cmap.set_linear(0, 5, contour_interval=1)
with gxmap.Map.new(data_area=(-1, -1, 11, 11), scale=100) as map:
map_file = map.file_name
with gxv.View.open(map, '*data') as v:
with gxg.Draw(v) as g:
g.rectangle(g.extent)
gxg.Color_symbols_group.new(v, 'outer_symbols', data, cmap, unit_of_measure='maki').close()
with gxg.Color_symbols_group.open(v, 'outer_symbols') as cs:
cm = cs.color_map()
self.assertEqual(cm.unit_of_measure, 'maki')
self.assertEqual(cm.unit_of_measure, cs.unit_of_measure)
cmap = gxg.Color_map()
cmap.set_linear(0, 5, contour_interval=1)
with gxg.Color_symbols_group.new(v, 'mark', data2, cmap,
symbol=gxg.SYMBOL_BOX,
symbol_def=gxg.Text_def(font='symbols.gfn',
height=0.15,
color=gxg.C_WHITE,
weight=gxg.FONT_WEIGHT_ULTRALIGHT)) as cs:
nv = cs.name
with gxg.Color_symbols_group.open(v, nv) as cs:
gxg.legend_color_bar(v, 'symbol_legend', cs.color_map())
self.crc_map(map_file)
def test_color_symbols_from_array(self):
self.start()
data = [(0, 0, 1),
(10, 0, 2),
(0, 10, 3),
(10, 10, 4)]
cmap = gxg.Color_map()
cmap.set_linear(0, 5, contour_interval=1)
with gxmap.Map.new(data_area=(-1, -1, 11, 11), scale=100) as map:
map_file = map.file_name
with gxv.View.open(map, '*data') as v:
with gxg.Draw(v) as g:
g.rectangle(g.extent)
gxg.Color_symbols_group.new(v, 'outer_symbols',
np.array(data), cmap,
unit_of_measure='maki').close()
cmap = gxg.Color_map()
cmap.set_linear(0, 5, contour_interval=1)
self.crc_map(map_file)
def test_color_symbols_3d(self):
self.start()
data = [((0, 0), 1),
((10, 0), 2),
((0, 10), 3),
((10, 10), 4)]
data2 = [((0, 0, 45), 1, 4),
((10, 0, 8), None, None),
((0, 10, 16), 3, 75),
((None, 10, -22), 4, 7)]
cmap = gxg.Color_map()
cmap.set_linear(0, 5, contour_interval=1)
with gxv.View_3d.new() as v:
v3d_file = v.file_name
with gxg.Draw(v) as g:
g.rectangle(g.extent)
gxg.Color_symbols_group.new(v, 'outer_symbols', data, cmap, unit_of_measure='maki').close()
cmap = gxg.Color_map('hotcycle')
cmap.set_linear(0, 5, contour_interval=1)
with gxg.Color_symbols_group.new(v, 'mark', data2, cmap,
symbol=gxg.SYMBOL_BOX,
symbol_def=gxg.Text_def(font='symbols.gfn',
height=0.15,
color=gxg.C_WHITE,
weight=gxg.FONT_WEIGHT_ULTRALIGHT)) as cs:
nv = cs.name
with gxg.Color_symbols_group.open(v, nv) as cs:
self.assertEqual(cs.number, 2)
self.crc_map(v3d_file)
def test_polydata_3d(self):
self.start()
def render_spheres(item, cmap_radius):
xyz, value = item
if None in xyz or value is None:
return None
cmap, radius = cmap_radius
cint = cmap.color_of_value(value)
return gxg.SYMBOL_3D_SPHERE, xyz, cint.int_value, radius
def render_cubes(point, size_color):
size, cint = size_color
half = size * 0.5
p2 = gxgm.Point2((point - (half, half, half), point + (half, half, half)))
return gxg.SYMBOL_3D_CUBE, p2, cint, None
def render_cylinders(point, size_color):
size, cint = size_color
half = size * 0.2
p2 = gxgm.Point2((point - (half, half, half), point + (half, half, half)))
return gxg.SYMBOL_3D_CYLINDER, p2, cint, size * 0.4
def render_cones(point, size_color):
size, cint = size_color
half = size * 0.5
p2 = gxgm.Point2((point - (half, half, half), point + (half, half, half)))
return gxg.SYMBOL_3D_CONE, p2, cint, size * 0.2
data = [((0, 0, 0), 1),
((10, 0, 5), 2),
((0, 10, -5), 3),
((0, None, -5), 99),
((0, 10, -5), None),
((10, 10, 10), 4)]
cmap = gxg.Color_map()
cmap.set_linear(0, 5, contour_interval=1)
for c in cmap:
if c[0]:
self.assertTrue(isinstance(c[0], float))
self.assertTrue(isinstance(c[1], gxg.Color))
with gxv.View_3d.new(area_2d=(-1, -1, 11, 11)) as v:
v3d_file = v.file_name
with gxg.Draw(v, 'rect') as g:
g.rectangle((0,0,10,10),
pen=gxg.Pen(line_color=gxg.C_BLACK,
line_thick=0.2,
fill_color=gxg.C_GREEN))
with gxg.Draw_3d(v, 'pipes') as g:
g.polyline_3d(((0,0,0), (10,0,0), (10,10,0), (0,10,0), (0,0,0)),
style=gxg.LINE3D_STYLE_TUBE_JOINED,
pen=gxg.Pen(line_color=gxg.C_GREY,
line_thick=0.2))
with gxg.Draw_3d(v, 'outer') as g:
g.polydata_3d(data, render_spheres, (cmap, 0.25))
pp = gxgm.PPoint(((5, 5, 5), (7, 5, 5), (7, 7, 7)))
g.polydata_3d(pp, render_cubes, (1, gxg.Color('y').int_value))
pp += (0, 0, 2)
g.polydata_3d(pp, render_cylinders, (1, gxg.Color('m').int_value))
pp += (0, 0, 2)
n = 0
g.polydata_3d(pp, render_cones, (1, gxg.Color('r255g128b128').int_value))
self.crc_map(v3d_file)
def test_polydata_3d_grd(self):
self.start()
def render_spheres(item, cmap_radius):
cmap, radius = cmap_radius
if not np.isnan(item[2]):
cint = cmap.color_of_value(item[2]).int_value
return gxg.SYMBOL_3D_SPHERE, item, cint, radius
folder, files = gsys.unzip(os.path.join(os.path.dirname(self._test_case_py), 'dem_small.zip'),
folder=self.gx.temp_folder())
grid_file = os.path.join(folder, 'dem_small.grd')
with gxgrd.Grid.open(grid_file) as grd:
# get the data and replace z with DEM valie
data = grd.xyzv().reshape(-1, 4)
data[:, 2] = data[:, 3] * 3
data = data[:, 0:3]
cmap = gxg.Color_map()
try:
std = np.nanstd(data[:, 2])
mean = np.nanmean(data[:, 2])
cmap.set_normal(std, mean)
except:
cmap.set_linear(0, 1)
with gxv.View_3d.new(coordinate_system=grd.coordinate_system) as v:
v3d_file = v.file_name
with gxg.Draw_3d(v, 'dem_points') as g:
g.polydata_3d(data.reshape((-1, 3)), render_spheres, (cmap, 10 * v.units_per_map_cm))
p_min = gxgm.Point((np.nanmin(data[:, 0]), np.nanmin(data[:, 1]), np.nanmin(data[:, 2])))
p_max = gxgm.Point((np.nanmax(data[:, 0]), np.nanmax(data[:, 1]), np.nanmax(data[:, 2])))
extent = gxgm.Point2((p_min, p_max))
g.box_3d(extent,
wireframe=True,
pen=gxg.Pen(line_color='c', line_thick= 20 * v.units_per_map_cm))
self.crc_map(v3d_file)
def test_plane_relief_surface(self):
self.start()
folder, files = gsys.unzip(os.path.join(os.path.dirname(self._test_case_py), 'dem_small.zip'),
folder=self.gx.temp_folder())
grid_file = os.path.join(folder, 'dem_small.grd')
v3d_name = ''
try:
# create a 3D view
with gxv.View_3d.new("data",
area_2d=gxgrd.Grid(grid_file).extent_2d(),
coordinate_system=gxgrd.Grid(grid_file).coordinate_system,
scale=5000,
overwrite=True) as v:
v3d_name = v.file_name
v.set_plane_relief_surface(grid_file, base=200, scale=2, max=250, min=150, refine=2)
# add the grid image to the view, with shading, 20 nT contour interval to match default contour lines
gxg.Aggregate_group.new(v, gxagg.Aggregate_image.new(grid_file, shade=True, contour=20))
# contour the grid
gxg.contour(v, 'TMI_contour', grid_file)
self.crc_map(v3d_name)
finally:
gxv.delete_files(v3d_name)
def test_plane_contour(self):
self.start()
folder, files = gsys.unzip(os.path.join(os.path.dirname(self._test_case_py), 'dem_small.zip'),
folder=self.gx.temp_folder())
grid_file = os.path.join(folder, 'dem_small.grd')
# create a 2D view
with gxmap.Map.new(data_area=gxgrd.Grid(grid_file).extent_2d(),
scale=20000,
inside_margin=0.1,
coordinate_system=gxgrd.Grid(grid_file).coordinate_system,
overwrite=True) as map:
map_name = map.file_name
with gxv.View.open(map, "data") as v:
gxg.contour(v, 'TMI_contour', grid_file)
with gxg.Draw(v, 'edge') as g:
g.rectangle((v.extent_clip), pen=gxg.Pen(line_thick=v.units_per_map_cm * 0.1))
self.crc_map(map_name)
def test_plane_contour_3d(self):
self.start()
folder, files = gsys.unzip(os.path.join(os.path.dirname(self._test_case_py), 'dem_small.zip'),
folder=self.gx.temp_folder())
grid_file = os.path.join(folder, 'dem_small.grd')
v3d_name = ''
try:
# create a 3D view
with gxv.View_3d.new("data",
area_2d=gxgrd.Grid(grid_file).extent_2d(),
coordinate_system=gxgrd.Grid(grid_file).coordinate_system,
scale=20000,
overwrite=True) as v:
v3d_name = v.file_name
gxg.contour(v, 'TMI_contour', grid_file)
with gxg.Draw(v, 'edge') as g:
g.rectangle((v.extent_clip), pen=gxg.Pen(line_thick=v.units_per_map_cm * 0.1))
self.crc_map(v3d_name)
finally:
gxv.delete_files(v3d_name)
def test_polydata_3d_grd_cone(self):
self.start()
def render_spheres(item, cmap_radius):
cmap, radius = cmap_radius
if not np.isnan(item[2]):
cint = cmap.color_of_value(item[2]).int_value
item = gxgm.Point(item)
item2 = item + (0, radius, radius * 2)
return gxg.SYMBOL_3D_CONE, gxgm.Point2((item, item2)), cint, radius
folder, files = gsys.unzip(os.path.join(os.path.dirname(self._test_case_py), 'dem_small.zip'),
folder=self.gx.temp_folder())
grid_file = os.path.join(folder, 'dem_small.grd')
with gxgrd.Grid.open(grid_file) as grd:
# get the data and replace z with DEM valie
data = grd.xyzv().reshape(-1, 4)
data[:, 2] = data[:, 3] * 3
data = data[:, 0:3]
cmap = gxg.Color_map()
try:
std = np.nanstd(data[:, 2])
mean = np.nanmean(data[:, 2])
cmap.set_normal(std, mean)
except:
cmap.set_linear(0, 1)
with gxv.View_3d.new(coordinate_system=grd.coordinate_system) as v:
v3d_file = v.file_name
with gxg.Draw_3d(v, 'dem_points') as g:
g.polydata_3d(data.reshape((-1, 3)), render_spheres, (cmap, 10 * v.units_per_map_cm))
p_min = gxgm.Point((np.nanmin(data[:, 0]), np.nanmin(data[:, 1]), np.nanmin(data[:, 2])))
p_max = gxgm.Point((np.nanmax(data[:, 0]), np.nanmax(data[:, 1]), np.nanmax(data[:, 2])))
extent = gxgm.Point2((p_min, p_max))
g.box_3d(extent,
wireframe=True,
pen=gxg.Pen(line_color='c', line_thick= 20 * v.units_per_map_cm))
self.crc_map(v3d_file)
def test_polydata_3d_grd_cylinder(self):
self.start()
def render_spheres(item, cmap_radius):
cmap, radius = cmap_radius
if not np.isnan(item[2]):
cint = cmap.color_of_value(item[2]).int_value
item = gxgm.Point(item)
item2 = item + (0, radius, radius * 2)
return gxg.SYMBOL_3D_CYLINDER, gxgm.Point2((item, item2)), cint, radius
folder, files = gsys.unzip(os.path.join(os.path.dirname(self._test_case_py), 'dem_small.zip'),
folder=self.gx.temp_folder())
grid_file = os.path.join(folder, 'dem_small.grd')
with gxgrd.Grid.open(grid_file) as grd:
# get the data and replace z with DEM valie
data = grd.xyzv().reshape(-1, 4)
data[:, 2] = data[:, 3] * 3
data = data[:, 0:3]
cmap = gxg.Color_map()
try:
std = np.nanstd(data[:, 2])
mean = np.nanmean(data[:, 2])
cmap.set_normal(std, mean)
except:
cmap.set_linear(0, 1)
with gxv.View_3d.new(coordinate_system=grd.coordinate_system) as v:
v3d_file = v.file_name
with gxg.Draw_3d(v, 'dem_points') as g:
g.polydata_3d(data.reshape((-1, 3)), render_spheres, (cmap, 10 * v.units_per_map_cm))
p_min = gxgm.Point((np.nanmin(data[:, 0]), np.nanmin(data[:, 1]), np.nanmin(data[:, 2])))
p_max = gxgm.Point((np.nanmax(data[:, 0]), np.nanmax(data[:, 1]), np.nanmax(data[:, 2])))
extent = gxgm.Point2((p_min, p_max))
g.box_3d(extent,
wireframe=True,
pen=gxg.Pen(line_color='c', line_thick= 20 * v.units_per_map_cm))
self.crc_map(v3d_file)
def test_contour(self):
self.start()
# test grid file
folder, files = gsys.unzip(os.path.join(os.path.dirname(self._test_case_py), 'testgrids.zip'),
folder=self.gx.temp_folder())
grid_file = os.path.join(folder, 'test_agg_utm.grd')
map_file = os.path.join(self.gx.temp_folder(), "test_agg_utm")
with gxgrd.Grid(grid_file) as grd:
cs = grd.coordinate_system
area = grd.extent_2d()
with gxmap.Map.new(map_file,
data_area=area, media="A4", margins=(0, 10, 0, 0),
coordinate_system=cs, overwrite=True) as gmap:
map_file = gmap.file_name
with gxv.View.open(gmap, "base") as v:
with gxg.Draw(v, 'line') as g:
g.rectangle(v.extent_clip, pen=g.new_pen(line_thick=1, line_color='K'))
with gxv.View.open(gmap, "data") as v:
with gxg.Draw(v, 'line') as g:
g.rectangle(area, pen=g.new_pen(line_thick=0.1, line_color='R'))
with gxagg.Aggregate_image.new(grid_file) as agg:
with gxg.Aggregate_group.new(v, agg) as gagg:
self.assertEqual(gagg.name, str(agg))
self.assertEqual(len(v.group_list_agg), 1)
gxg.contour(v, 'contour', grid_file)
self.crc_map(map_file)
def test_contour2(self):
self.start()
# test grid file
folder, files = gsys.unzip(os.path.join(os.path.dirname(self._test_case_py), 'testgrids.zip'),
folder=self.gx.temp_folder())
grid_file = os.path.join(folder, 'test_agg_utm.grd')
map_file = os.path.join(self.gx.temp_folder(), "test_agg_utm")
with gxgrd.Grid(grid_file) as grd:
cs = grd.coordinate_system
area = grd.extent_2d()
with gxmap.Map.new(map_file,
data_area=area, margins=(2, 10, 2, 2),
coordinate_system=cs, overwrite=True, scale=20000) as gmap:
map_file = gmap.file_name
with gxv.View.open(gmap, "base") as v:
with gxg.Draw(v, 'line') as g:
g.rectangle(v.extent_clip, pen=g.new_pen(line_thick=1, line_color='K'))
with gxv.View.open(gmap, "data") as v:
with gxg.Draw(v, 'line') as g:
g.rectangle(area, pen=g.new_pen(line_thick=0.1, line_color='R'))
with gxagg.Aggregate_image.new(grid_file, contour=10) as agg:
cmap = agg.layer_color_map()
cname = agg.name
with gxg.Aggregate_group.new(v, agg) as gagg:
self.assertEqual(gagg.name, str(agg))
gxg.legend_color_bar(v, cname, cmap)
self.assertEqual(len(v.group_list_agg), 1)
gxg.contour(v, 'contour', grid_file)
self.crc_map(map_file)
def test_contour_parameters(self):
self.start()
# test grid file
folder, files = gsys.unzip(os.path.join(os.path.dirname(self._test_case_py), 'testgrids.zip'),
folder=self.gx.temp_folder())
grid_file = os.path.join(folder, 'test_agg_utm.grd')
map_file = os.path.join(self.gx.temp_folder(), "test_agg_utm")
with gxgrd.Grid(grid_file) as grd:
cs = grd.coordinate_system
area = grd.extent_2d()
with gxmap.Map.new(map_file,
data_area=area, margins=(2, 10, 2, 2),
coordinate_system=cs, overwrite=True, scale=20000) as gmap:
map_file = gmap.file_name
with gxv.View.open(gmap, "base") as v:
with gxg.Draw(v, 'line') as g:
g.rectangle(v.extent_clip, pen=g.new_pen(line_thick=1, line_color='K'))
with gxv.View.open(gmap, "data") as v:
with gxg.Draw(v, 'line') as g:
g.rectangle(area, pen=g.new_pen(line_thick=0.1, line_color='R'))
#gxg.contour(v, '_250', grid_file, parameters=('', '', '', '', '', '', '50/'))
gxg.contour(v, '_250', grid_file, parameters=('', '', '', '', '', '', '10', '50', '250'))
gxg.contour(v, '_260_270', grid_file,
parameters={'levels': {'levopt': 1},
'contours': [{'cint': 260, 'label': 0, 'catt': 'a=rt50'},
{'cint': 270, 'label': 1, 'catt': 'b=gt1000'},
{'cint': 280, 'label': 1, 'catt': 'c=br100g100t500'}]})
self.crc_map(map_file)
def test_color_str(self):
self.start()
self.assertEqual(gxg.color_from_string("R"), 33554687)
self.assertEqual(gxg.color_from_string("H255S127V32"), 18907135)
def test_group_properties(self):
self.start()
rect = gxgm.Point2((0,0,10,5))
with gxmap.Map.new(data_area=rect.extent_xy) as gmap:
with gxv.View.new(gmap, "data") as v:
gxg.Draw(v, 'rect').rectangle(rect)
self.assertTrue(len(v.group_list), 1)
gxg.Draw(v, 'rect').rectangle(rect)
self.assertTrue(len(v.group_list), 1)
gxg.Draw(v, 'rect', mode=gxg.NEW).rectangle(rect)
self.assertTrue(len(v.group_list), 2)
self.assertTrue('rect_1' in v.group_list)
gxg.Draw(v, 'rect_1', mode=gxg.REPLACE).rectangle(rect)
self.assertTrue(len(v.group_list), 2)
self.assertTrue('rect_1' in v.group_list)
with gxg.Draw(v, 'property_test') as g:
self.assertEqual(g.group_opacity, 1.0)
g.group_opacity = 0.25
self.assertEqual(g.group_opacity, 0.25)
g.group_opacity = -50
self.assertEqual(g.group_opacity, 0.)
g.group_opacity = 5
self.assertEqual(g.group_opacity, 1.)
self.assertFalse(g.group_3d)
self.assertEqual(g.name, 'property_test')
self.assertEqual(g.view.name, 'data')
@unittest.skip('WIP see issue #73')
def test_surface(self):
self.start()
verts = np.array([[0, 0, 0],
[5, 0, 0],
[5, 5, 0],
[0, 3, 5],
[2.5, 2, 10],
[-3, 6, 8],
[-4, 0, 12]], dtype=np.float64)
faces = np.array([[0, 1, 2],
[0, 2, 3],
[3, 2, 4],
[1, 2, 4],
[3, 4, 5],
[6, 4, 5]], dtype=np.int32)
with gxv.View_3d.new() as v3d:
v3d_file = v3d.file_name
with gxg.Draw_3d(v3d, 'Surface') as g:
g._surface(faces, verts)
image_file = gxmap.Map.open(v3d_file).image_file(pix_width=800)
gxviewer.view_document(v3d_file, wait_for_close=True)
pass # self.crc_map(v3d_file)
if __name__ == '__main__':
unittest.main() | 41.425786 | 133 | 0.5127 | import unittest
import os
import numpy as np
import geosoft
import geosoft.gxapi as gxapi
import geosoft.gxpy.system as gsys
import geosoft.gxpy.map as gxmap
import geosoft.gxpy.geometry as gxgm
import geosoft.gxpy.grid as gxgrd
import geosoft.gxpy.agg as gxagg
import geosoft.gxpy.system as gxsys
import geosoft.gxpy.view as gxv
import geosoft.gxpy.group as gxg
import geosoft.gxpy.vv as gxvv
import geosoft.gxpy.viewer as gxviewer
from base import GXPYTest
def rect_line(g, size=100):
g.rectangle(gxgm.Point2((0, 0, size, size), coordinate_system="cm"), pen=g.new_pen(line_thick=1))
p1 = gxgm.Point((0.1, 0.1)) * size
p2 = gxgm.Point((0.9, 0.9)) * size
poff = gxgm.Point((0.15, 0.05)) * size
g.rectangle((p1, p2), pen=g.new_pen(fill_color=gxg.C_LT_GREEN))
p12 = gxgm.Point2((p1 + poff, p2 - poff))
g.line((p12.p0.x, p12.p0.y, p12.p1.x, p12.p1.y), pen=g.new_pen(line_style=2, line_pitch=2.0))
def pline():
return gxgm.PPoint([[10, 5],
[20, 20],
[30, 15],
[50, 50],
[60, 70],
[75, 35],
[90, 65],
[20, 50],
[35, 18.5]])
def draw_stuff(g, size=1.0):
plinelist = [[110, 5],
[120, 20],
[130, 15],
[150, 50],
[160, 70],
[175, 35],
[190, 65],
[220, 50],
[235, 18.5]]
pp = gxgm.PPoint.from_list(plinelist) * size
g.pen = g.new_pen(line_style=2, line_pitch=2.0)
g.polyline(pp)
g.pen = g.new_pen(line_style=4, line_pitch=2.0, line_smooth=gxg.SMOOTH_AKIMA)
g.polyline(pp)
ppp = np.array(plinelist)
pp = gxgm.PPoint(ppp[3:, :]) * size
g.pen = g.new_pen(line_style=5, line_pitch=5.0,
line_smooth=gxg.SMOOTH_CUBIC,
line_color=gxg.C_RED,
line_thick=0.25,
fill_color=gxg.C_LT_BLUE)
g.polygon(pp)
g.pen = g.new_pen(fill_color=gxg.C_LT_GREEN)
p1 = gxgm.Point((100, 0, 0)) * size
p2 = gxgm.Point((100, 0, 0)) * size
pp = (pp - p1) / 2 + p2
g.polygon(pp)
pp += gxgm.Point((0, 25, 0)) * size
g.pen = g.new_pen(fill_color=gxg.C_LT_RED)
g.polygon(pp)
class Test(GXPYTest):
def test_version(self):
self.start()
self.assertEqual(gxmap.__version__, geosoft.__version__)
def test_create(self):
self.start()
def test_lock(self):
self.start()
with gxmap.Map.new(data_area=(0, 0, 50, 40), coordinate_system='cm') as map:
with gxv.View.open(map, 'data') as v:
self.assertFalse(bool(v.lock))
with gxg.Draw(v, 'rectangle') as g:
self.assertEqual(str(g), 'rectangle/data')
self.assertTrue(g.drawing_plane is None)
self.assertEqual(g.unit_of_measure, '')
self.assertTrue(bool(v.lock))
self.assertEqual(v.lock, 'rectangle')
self.assertRaises(gxg.GroupException, gxg.Group, v)
self.assertFalse(bool(v.lock))
def test_metadata(self):
self.start()
with gxmap.Map.new(data_area=(0, 0, 50, 40), coordinate_system='cm') as map:
with gxv.View.open(map, 'data') as v:
with gxg.Draw(v, 'rectangle') as g:
self.assertTrue(g.guid)
meta = g.gx_metadata
meta.node_token('maki/data/more')
meta.set_attribute('/maki/data/more/scale', 45)
meta.set_attribute('/maki/data/more/unit_of_measure', 'cm')
g.gx_metadata = meta
g.unit_of_measure = 'billy-bob'
with gxg.Draw(v, 'rectangle') as g:
meta = g.gx_metadata
self.assertTrue(meta.has_node('/maki/data'))
self.assertTrue(meta.has_node('/maki/data/more'))
self.assertEqual(meta.get_attribute('/maki/data/more/scale'), 45)
self.assertEqual(meta.get_attribute('/maki/data/more/unit_of_measure'), 'cm')
self.assertEqual(g.unit_of_measure, 'billy-bob')
def test_cs(self):
self.start()
with gxmap.Map.new(data_area=(0, 0, 50, 40), coordinate_system='cm') as map:
with gxv.View.open(map, 'data') as v:
with gxg.Draw(v, 'rectangle') as g:
self.assertEqual(g.drawing_coordinate_system.unit_of_measure, 'cm')
g.drawing_coordinate_system = "NAD83 / UTM zone 15N"
self.assertEqual(str(g.drawing_coordinate_system), "NAD83 / UTM zone 15N")
g.drawing_coordinate_system = None
self.assertEqual(g.drawing_coordinate_system.unit_of_measure, 'cm')
def test_extent(self):
self.start()
map_file = None
try:
with gxmap.Map.new(data_area=(3, 2, 50, 40), coordinate_system='cm', overwrite=True) as map:
map_file = map.file_name
with gxv.View.open(map, 'data') as v:
self.assertEqual(v.extent_map_cm(), (2.0, 6.0, 41.6, 38.4))
with gxg.Draw(v, 'rectangle') as g:
g.rectangle((3, 2, 28, 20),
pen=g.new_pen(line_thick=0.25, line_color='R', line_style=gxg.LINE_STYLE_LONG,
line_pitch=5))
self.assertEqual(g.extent, (3., 2., 28., 20.))
self.assertEqual(g.extent_map_cm(), (3.0, 7.0, 23.0, 21.4))
finally:
gxmap.delete_files(map_file)
@unittest.skip('skipping to let fixture pass')
def test_force_assert(self):
self.start()
with gxmap.Map.figure((0, 0, 1000, 1000)) as gmap:
with gxv.View.open(gmap, "data") as v:
gxapi.GXMVU.arrow(v.gxview, 500, 500, 450, 450, 0.5, 30, 1)
with gxg.Draw(v, "arrow") as g:
gxapi.GXMVU.arrow(g.view.gxview, 500, 500, 450, 450, 0.5, 30, 1)
def test_point(self):
self.start()
p1 = gxgm.Point((10, 20))
p2 = gxgm.Point((20, 20))
p3 = gxgm.Point((30, 20))
rect = gxgm.Point2((p1 - (15, 15), p3 + (15, 15)))
with gxmap.Map.new(data_area=rect.extent_xy) as gmap:
map_file = gmap.file_name
with gxv.View.new(gmap, "data") as v:
with gxg.Draw(v, 'test_point') as g:
g.pen = gxg.Pen(line_thick=1)
g.rectangle(rect)
g.pen = gxg.Pen(line_thick=2, line_color='R')
g.line((p1, p1))
g.pen = gxg.Pen(line_thick=2, line_color='G')
g.line((p2, p2 + (0.04, 0)))
g.pen = gxg.Pen(line_thick=2, line_color='B')
g.line((p3, p3 + (0.05, 0)))
self.crc_map(map_file, pix_width=800)
def test_points(self):
self.start()
plinelist = [[110, 5],
[120, 20],
[130, 15],
[150, 50],
[160, 70],
[175, 35],
[190, 65],
[220, 50],
[235, 18.5]]
pp = gxgm.PPoint.from_list(plinelist)
with gxmap.Map.new() as gmap:
map_file = gmap.file_name
with gxv.View.new(gmap, "points", area=(100, 0, 260, 100)) as v:
with gxg.Draw(v, 'test_group') as g:
g.rectangle(pp.extent, pen=gxg.Pen(line_thick=1))
g.pen = gxg.Pen(line_thick=2, line_color='B')
for p in pp:
g.point(p)
pp += (15, 15)
g.pen = gxg.Pen(line_thick=1.5, line_color='G')
g.polypoint(pp)
pp -= (0, 5)
g.pen = gxg.Pen(line_thick=1, line_color='R')
g.polypoint((gxvv.GXvv(pp.x), gxvv.GXvv(pp.y)))
self.crc_map(map_file, pix_width=800)
def test_rectangle(self):
self.start()
with gxmap.Map.new(data_area=(0, 0, 50, 40), coordinate_system='cm', overwrite=True) as map:
map_file = map.file_name
with gxv.View.open(map, 'data') as v:
with gxg.Draw(v, 'rectangle') as g:
g.rectangle(v.extent_clip, pen=g.new_pen(line_thick=0.5, line_color='B'))
g.rectangle((2, 2, 48, 38),
pen=g.new_pen(line_thick=0.25, line_color='R', line_style=gxg.LINE_STYLE_LONG,
line_pitch=5))
self.crc_map(map_file)
def test_smooth_line(self):
self.start()
pp = pline()
p1, p2 = pp.extent
area = (p1.x, p1.y, p2.x, p2.y)
with gxmap.Map.new() as map:
map_file = map.file_name
with gxv.View.new(map, 'smooth') as v:
v.locate(coordinate_system='mm', area=area, map_location=(1,1), scale=0.4)
with gxg.Draw(v) as g:
g.rectangle(v.extent_clip)
g.polyline(pp, pen=g.new_pen(line_smooth=gxg.SMOOTH_AKIMA, line_color='r', line_thick=1))
g.polyline(pp, pen=g.new_pen(line_smooth=gxg.SMOOTH_CUBIC, line_color='b', line_thick=2))
g.polyline(pp)
map.delete_view('data')
map.delete_view('base')
self.crc_map(map_file)
def test_view_groups_1(self):
self.start()
testmap = os.path.join(self.gx.temp_folder(), "test")
with gxmap.Map.new(testmap, overwrite=True) as gmap:
map_file = gmap.file_name
with gxv.View.new(gmap, "rectangle_test", area=(0, 0, 250, 125)) as v:
with gxg.Draw(v, 'test_group') as g:
rect_line(g)
g.graticule(25, 20, style=gxg.GRATICULE_LINE)
g.pen = g.new_pen(line_thick=0.1)
g.rectangle(((0, 0), (250, 125)), pen=g.new_pen(line_thick=0.1, line_color='R'))
with gxv.View.new(gmap, "poly") as v:
with gxg.Draw(v) as g:
draw_stuff(g)
try:
self.crc_map(map_file)
finally:
gxmap.delete_files(map_file)
def test_view_groups_2(self):
self.start()
testmap = os.path.join(self.gx.temp_folder(), "test")
with gxmap.Map.new(testmap, overwrite=True) as gmap:
map_file = gmap.file_name
with gxv.View.new(gmap, "rectangle_test", area=(0, 0, 250, 125)) as v:
with gxg.Draw(v, 'line') as g:
rect_line(g)
with gxg.Draw(v, 'graticule') as g:
g.graticule(25, 20, style=gxg.GRATICULE_LINE)
g.pen = g.new_pen(line_thick=0.1)
with gxg.Draw(v, 'test_rectangles') as g:
g.rectangle(((0, 0), (250, 125)), pen=g.new_pen(line_thick=0.1, line_color='R'))
g.rectangle(((10, 5), (240, 120)), pen=g.new_pen(line_thick=2, line_color='B'))
v.delete_group('graticule')
with gxv.View.new(gmap, "poly") as v:
with gxg.Draw(v, 'test_group') as g:
draw_stuff(g)
try:
self.crc_map(map_file)
finally:
gxmap.delete_files(map_file)
def test_reopen_map_view(self):
self.start()
testmap = os.path.join(self.gx.temp_folder(), "test")
with gxmap.Map.new(testmap, overwrite=True) as gmap:
map_file = gmap.file_name
with gxv.View.new(gmap, "test_view") as v:
with gxg.Draw(v) as g:
rect_line(g)
with gxv.View.open(gmap, "test_view") as v:
pass
gxmap.delete_files(map_file)
def test_3D(self):
self.start()
testmap = os.path.join(self.gx.temp_folder(), "test.map")
with gxmap.Map.new(testmap, overwrite=True) as gmap:
with gxv.View.open(gmap, "base") as view_base:
with gxg.Draw(view_base, 'Surround') as g:
g.rectangle(((0, 0), (280, 260)))
test3dv = os.path.join(self.gx.temp_folder(), "test.geosoft_3dv")
with gxv.View_3d.new(test3dv, overwrite=True) as view_3d:
self.assertTrue(view_3d.extent == None)
with gxg.Draw(view_3d, '2d_group') as g:
rect_line(g)
draw_stuff(g)
with gxg.Draw_3d(view_3d, '3d_group_cylinders') as g:
self.assertEqual(g.render_backfaces, False)
g.cylinder_3d(((100, 10, 10), (120, 10, 10)), 8, pen='r', close=gxg.CYLINDER_CLOSE_ALL)
self.assertEqual(view_3d.extent_xyz, (92.0, 2.0, 2.0, 128.0, 18.0, 18.0))
g.cylinder_3d(((100, 10, 70), (120, 10, 70)), 8, pen='c', close=gxg.CYLINDER_OPEN)
self.assertEqual(view_3d.extent_xyz, (92.0, 2.0, 2.0, 128.0, 18.0, 78.0))
g.cylinder_3d(((100, 10, 50), (120, 10, 50)), 8, pen='b', close=gxg.CYLINDER_CLOSE_END)
g.cylinder_3d(((100, 10, 30), (120, 10, 30)), 8, pen='g', close=gxg.CYLINDER_CLOSE_START)
self.assertEqual(view_3d.extent_xyz, (92.0, 2.0, 2.0, 128.0, 18.0, 78.0))
self.assertEqual(g.render_backfaces, True)
with gxg.Draw_3d(view_3d, '3d_group') as g:
g.cylinder_3d(((20, 10, 60), (80, 50, 80)), 5, pen='b')
g.cone_3d(((20, 10, 80), (80, 50, 60)), 8, pen='g')
g.cone_3d(((20, 50, 65), (20, 50, 40)), 30, pen='r')
g.sphere((20, 50, 80), 10, pen='c')
self.assertEqual(g.render_backfaces, False)
g.cylinder_3d(((80, 10, 0), (80, 10, 80)), 5, pen='y', close=gxg.CYLINDER_OPEN)
self.assertEqual(g.render_backfaces, True)
g.box_3d(((20, 10, 30), (80, 50, 50)), pen=g.new_pen(line_color='R255G100B50'))
g.box_3d(((80, 50, 50), (90,60, 65)), wireframe=True,
pen=g.new_pen(line_color='R25G255B50', line_thick=2))
with gxmap.Map.open(testmap) as gmap:
gmap.create_linked_3d_view(view_3d, area_on_map=(10, 10, 270, 250))
view_3d = gxv.View_3d.open(test3dv)
group_list = view_3d.group_list
self.assertEqual(len(group_list), 3)
view_3d.close()
self.crc_map(test3dv, alt_crc_name=gxsys.func_name() + '_3dv')
self.crc_map(testmap, alt_crc_name=gxsys.func_name() + '_map')
def test_basic_grid_1(self):
self.start()
folder, files = gsys.unzip(os.path.join(os.path.dirname(self._test_case_py), 'testgrids.zip'),
folder=self.gx.temp_folder())
grid_file = os.path.join(folder, 'test_agg_utm.grd')
map_file = os.path.join(self.gx.temp_folder(), "test_agg_utm")
with gxgrd.Grid(grid_file) as grd:
cs = grd.coordinate_system
area = grd.extent_2d()
with gxmap.Map.new(map_file,
data_area=area, media="A4", margins=(0, 10, 0, 0),
coordinate_system=cs, overwrite=True) as gmap:
map_file = gmap.file_name
with gxv.View.open(gmap, "base") as v:
with gxg.Draw(v, 'line') as g:
g.rectangle(v.extent_clip, pen=g.new_pen(line_thick=1, line_color='K'))
with gxv.View.open(gmap, "data") as v:
with gxg.Draw(v, 'line') as g:
g.rectangle(area, pen=g.new_pen(line_thick=0.1, line_color='R'))
with gxagg.Aggregate_image.new(grid_file) as agg:
with gxg.Aggregate_group.new(v, agg) as gagg:
self.assertEqual(gagg.name, str(agg))
self.assertEqual(len(v.group_list_agg), 1)
self.crc_map(map_file)
def test_basic_grid_3D(self):
self.start()
folder, files = gsys.unzip(os.path.join(os.path.dirname(self._test_case_py), 'testgrids.zip'),
folder=self.gx.temp_folder())
grid_file = os.path.join(folder, 'test_agg_utm.grd')
with gxgrd.Grid(grid_file) as grd:
cs = grd.coordinate_system
area = grd.extent_2d()
with gxv.View_3d.new() as v:
v3d_file = v.file_name
with gxg.Draw(v, 'line') as g:
self.assertEqual(g.drawing_plane, 'Plane')
self.assertEqual(str(g), 'line/Plane/uuid_test_basic_grid_3D_1')
g.rectangle(area, pen=g.new_pen(line_thick=0.1, line_color='R'))
with gxagg.Aggregate_image.new(grid_file) as agg:
with gxg.Aggregate_group.new(v, agg) as gagg:
self.assertEqual(str(gagg), agg.name + '/Plane/uuid_test_basic_grid_3D_1')
self.assertEqual(len(v.group_list_agg), 1)
self.crc_map(v3d_file)
def test_basic_grid_2(self):
self.start()
folder, files = gsys.unzip(os.path.join(os.path.dirname(self._test_case_py), 'testgrids.zip'),
folder=self.gx.temp_folder())
grid_file = os.path.join(folder, 'test_agg_utm.grd')
map_file = os.path.join(self.gx.temp_folder(), "test_agg_utm")
with gxgrd.Grid(grid_file) as grd:
cs = grd.coordinate_system
area = grd.extent_2d()
with gxmap.Map.new(map_file,
data_area=area, media="A3", margins=(0, 0, 0, 0),
scale=(area[2] - area[0]) / 0.2,
coordinate_system=cs, overwrite=True) as gmap:
map_file = gmap.file_name
with gxv.View.open(gmap, "base") as v:
with gxg.Draw(v, 'line') as g:
g.rectangle(v.extent_clip, pen=g.new_pen(line_thick=2, line_color='K'))
with gxv.View.open(gmap, "data") as v:
with gxg.Draw(v, 'line') as g:
g.rectangle(area, pen=g.new_pen(line_thick=0.1, line_color='G'))
with gxagg.Aggregate_image.new(grid_file) as agg:
gxg.Aggregate_group.new(v, agg)
self.crc_map(map_file)
def test_zone_grid(self):
self.start()
def test_zone(zone, suffix, shade=False):
map_file = os.path.join(self.gx.temp_folder(), "test_agg_" + suffix)
with gxmap.Map.new(map_file, overwrite=True,
data_area=(ex[0], ex[1], ex[2], ex[3]),
scale=(ex[2] - ex[0]) / 0.2) as gmap:
map_file = gmap.file_name
with gxv.View.open(gmap, "data") as v:
with gxagg.Aggregate_image.new(grid_file, zone=zone, shade=shade) as agg:
gxg.Aggregate_group.new(v, agg)
gmap.delete_view('base')
self.crc_map(map_file, alt_crc_name='{}_{}'.format(gxsys.func_name(1), suffix))
folder, files = gsys.unzip(os.path.join(os.path.dirname(self._test_case_py), 'testgrids.zip'),
folder=self.gx.temp_folder())
with gxgrd.Grid(os.path.join(folder, 'test_agg_utm.grd')) as grd:
ex = grd.extent_2d()
grid_file = 'test_zone'
gxgrd.delete_files(grid_file)
with gxgrd.Grid.copy(grd, grid_file) as test:
grid_file = test.file_name
try:
test_zone(gxagg.ZONE_LINEAR, "linear_shade", shade=True)
test_zone(gxagg.ZONE_EQUALAREA, "eq_area")
test_zone(gxagg.ZONE_DEFAULT, "default")
test_zone(gxagg.ZONE_LAST, "last")
test_zone(gxagg.ZONE_LINEAR, "linear")
test_zone(gxagg.ZONE_NORMAL, "normal")
test_zone(gxagg.ZONE_SHADE, "shade")
test_zone(gxagg.ZONE_LOGLINEAR, "log_linear")
finally:
gxgrd.delete_files(grid_file)
def test_text_definition(self):
self.start()
t = gxg.Text_def()
self.assertEqual(t.slant, 0)
self.assertEqual(t.height, 0.25)
self.assertEqual(t.weight, gxg.FONT_WEIGHT_MEDIUM)
self.assertEqual(t.font, 'DEFAULT')
t.font = "Arial"
self.assertEqual(t.font, 'Arial')
self.assertEqual(t.mapplot_string, '0.25,,,0,"Arial(TT)"')
t.font = 'sr.gfn'
self.assertEqual(t.mapplot_string, '0.25,,,0,"sr"')
t.font = ''
self.assertEqual(t.mapplot_string, '0.25,,,0,"DEFAULT"')
t.italics = True
self.assertTrue(t.italics)
self.assertEqual(t.slant, 15)
t.italics = 0
self.assertFalse(t.italics)
self.assertEqual(t.slant, 0)
t.weight = gxg.FONT_WEIGHT_ULTRALIGHT
self.assertAlmostEqual(t.line_thick, 0.005208333333333333)
t.weight = gxg.FONT_WEIGHT_BOLD
self.assertAlmostEqual(t.line_thick, 0.020833333333333331)
thick = t.line_thick
t.weight = gxg.FONT_WEIGHT_XXBOLD
self.assertAlmostEqual(t.line_thick, 0.0625)
t.line_thick = thick
self.assertEqual(t.weight, gxg.FONT_WEIGHT_BOLD)
t.height = 10.
self.assertEqual(t.weight, gxg.FONT_WEIGHT_BOLD)
self.assertAlmostEqual(t.line_thick, 0.8333333333333333)
t.line_thick = t.line_thick
self.assertEqual(t.weight, gxg.FONT_WEIGHT_BOLD)
def test_colours(self):
self.start()
c = gxg.Color((150, 200, 500))
self.assertEqual(c.rgb, (150, 200, 255))
c = gxg.Color((150, 200, 500), model=gxg.CMODEL_CMY)
self.assertEqual(c.cmy, (150, 200, 255))
c = gxg.Color('r255g128b56')
self.assertEqual(c.rgb, (255, 128, 56))
self.assertEqual(c.cmy, (0, 127, 199))
c.rgb = (64, 32, 16)
self.assertEqual(c.rgb, (64, 32, 16))
c.cmy = (100, 200, 300)
self.assertEqual(c.cmy, (100, 200, 255))
c = gxg.Color((0, 127, 64), gxg.CMODEL_HSV)
self.assertEqual(c.rgb, (191, 96, 96))
c = gxg.Color((0, 127, 64), gxg.CMODEL_RGB)
self.assertEqual(c.rgb, (0, 127, 64))
c = gxg.Color(gxg.C_GREEN)
self.assertEqual(c.rgb, (0, 255, 0))
c2 = gxg.Color(c)
self.assertEqual(c2.rgb, (0, 255, 0))
c = gxg.Color(gxg.C_TRANSPARENT)
self.assertEqual(c.rgb, None)
self.assertEqual(c.cmy, None)
self.assertTrue(c == gxg.Color(gxg.C_TRANSPARENT))
def test_pen(self):
self.start()
p = gxg.Pen()
self.assertEqual(p.line_color.int_value, gxg.C_BLACK)
self.assertEqual(p.fill_color.int_value, gxg.C_TRANSPARENT)
self.assertEqual(p.line_style, gxg.LINE_STYLE_SOLID)
p.line_color = (255, 127, 64)
self.assertEqual(p.mapplot_string, 'r255g127b64t10')
p2 = gxg.Pen(line_color = (255, 127, 64))
self.assertTrue(p == p2)
p2.line_color = 'K'
self.assertFalse(p == p2)
p = gxg.Pen.from_mapplot_string('r20b100k16R64K16')
ms = p.mapplot_string
self.assertEqual(ms, 'r4g0b84R48G0B0t1')
p = gxg.Pen.from_mapplot_string(ms)
self.assertEqual(p.mapplot_string, ms)
p = gxg.Pen.from_mapplot_string('c64K64')
self.assertEqual(p.line_color.rgb, (191, 255, 255))
self.assertEqual(p.fill_color.rgb, (191, 191, 191))
p = gxg.Pen(line_color='K')
self.assertEqual(p.line_color.int_value, gxg.C_BLACK)
self.assertTrue(p.line_color == gxg.Color(gxg.C_BLACK))
p = gxg.Pen(line_color=gxg.C_WHITE)
self.assertEqual(p.line_color.int_value, gxg.C_WHITE)
self.assertTrue(p.line_color == gxg.Color(gxg.C_WHITE))
p = gxg.Pen.from_mapplot_string('r20b100k16R64K16')
p = gxg.Pen(default=p, line_thick=0.5, fill_color='K')
ms = p.mapplot_string
self.assertEqual(ms, 'r4g0b84R0G0B0t500')
p = gxg.Pen.from_mapplot_string(ms)
self.assertEqual(p.mapplot_string, ms)
self.assertRaises(gxg.GroupException, gxg.Pen, bad=1)
def test_scaled(self):
self.start()
p = gxg.Pen(factor=10)
self.assertEqual(p.line_thick, 0.1)
self.assertEqual(p.line_pitch, 5.0)
self.assertEqual(p.pat_thick, 0.1)
self.assertEqual(p.pat_size, 10.0)
p = gxg.Pen(default=p, factor=5)
self.assertEqual(p.line_thick, 0.5)
self.assertEqual(p.line_pitch, 25.0)
self.assertEqual(p.pat_thick, 0.5)
self.assertEqual(p.pat_size, 50.0)
t = gxg.Text_def(factor=0.2)
self.assertEqual(t.height, 0.05)
def test_text(self):
self.start()
with gxmap.Map.new(data_area=(400000, 5000000, 500000, 5150000),
coordinate_system='WGS 84 / UTM zone 15N [geoid]') as map:
map_file = map.file_name
with gxv.View.open(map, 'base') as v:
with gxg.Draw(v) as g:
g.rectangle(g.extent)
g.text('Text on base view')
g.text('Bigger, blue, higher',
(v.units_per_map_cm, v.units_per_map_cm),
text_def=gxg.Text_def(height=20, color='B', font='Times New Roman'))
g.text('Bigger, blue, angled, italics',
(10, 25),
angle=60,
text_def=gxg.Text_def(height=20, color='B', font='Calibri', italics=True))
g.text_def = gxg.Text_def(height=20, color='B', font='Calibri', italics=True)
tex = g.text_extent('Bigger, blue, angled, italics')
self.assertAlmostEqual(209.9629, tex.dimension_xy[0], 3)
self.assertAlmostEqual(334.6408, tex.dimension_xy[1], 3)
tex = g.text_extent('Bigger, blue, angled, italics',
gxg.Text_def(height=10, font='Calibri', italics=True))
self.assertAlmostEqual(104.98147, tex.dimension_xy[0], 3)
self.assertAlmostEqual(167.32042, tex.dimension_xy[1], 3)
self.crc_map(map_file)
def test_text_1(self):
self.start()
with gxmap.Map.new(data_area=(400000, 5000000, 500000, 5050000),
coordinate_system='WGS 84 / UTM zone 15N [geoid]') as map:
map_file = map.file_name
with gxv.View.open(map, '*data') as v:
with gxg.Draw(v) as g:
g.rectangle(g.extent)
ex = g.extent
width = ex[2] - ex[0]
height = ex[3] - ex[1]
cxy = (ex[0] + width / 2, ex[1] + height / 2)
td = gxg.Text_def(height=width / 20, color='K128', font='sr.gfn', weight=gxg.FONT_WEIGHT_XBOLD)
self.assertTrue(td == gxg.Text_def(height=width / 20, color='K128', font='sr.gfn', weight=gxg.FONT_WEIGHT_XBOLD))
self.assertEqual(td.mapplot_string, '5227.3,,,0,"sr"')
g.rectangle(ex)
g.line((ex[0], cxy[1], ex[2], cxy[1]))
g.line((cxy[0], ex[1], cxy[0], ex[3]))
g.text('Centered',
cxy,
text_def=td,
reference=gxg.REF_CENTER)
g.text('Bottom',
(cxy[0], ex[1]),
text_def=td,
reference=gxg.REF_BOTTOM_CENTER)
g.text('Top',
(cxy[0], ex[3]),
text_def=td,
reference=gxg.REF_TOP_CENTER)
g.text('Left',
(ex[0], cxy[1]),
text_def=td,
angle=90,
reference=gxg.REF_TOP_CENTER)
g.text('Right',
(ex[2], cxy[1]),
text_def=td,
angle=-90,
reference=gxg.REF_TOP_CENTER)
self.crc_map(map_file)
def test_text_multiline(self):
self.start()
with gxmap.Map.new(data_area=(400000, 5000000, 500000, 5050000),
coordinate_system='WGS 84 / UTM zone 15N [geoid]') as map:
map_file = map.file_name
with gxv.View.open(map, '*data') as v:
with gxg.Draw(v) as g:
g.rectangle(g.extent)
ex = v.extent_clip
width = ex[2] - ex[0]
height = ex[3] - ex[1]
cxy = (ex[0] + width / 2, ex[1] + height / 2)
td = gxg.Text_def(height=width / 20, color='K128', font='sr.gfn', weight=gxg.FONT_WEIGHT_XBOLD)
g.rectangle(ex)
g.line((ex[0], cxy[1], ex[2], cxy[1]))
g.line((cxy[0], ex[1], cxy[0], ex[3]))
g.text('Centered\nline2\nand another',
cxy,
text_def=td,
reference=gxg.REF_CENTER)
self.crc_map(map_file)
def test_locate_group(self):
self.start()
with gxmap.Map.new(data_area=(400000, 5000000, 500000, 5050000),
coordinate_system='WGS 84 / UTM zone 15N [geoid]') as map:
map_file = map.file_name
with gxv.View.open(map, '*data') as v:
with gxg.Draw(v) as g:
g.rectangle(v.extent_clip)
rect = gxgm.Point2((v.extent_clip[0], v.extent_clip[1],
(v.extent_clip[2] + v.extent_clip[0]) * 0.5,
(v.extent_clip[3] + v.extent_clip[1]) * 0.5))
with gxg.Draw(v, 'a') as g:
g.rectangle(rect)
with gxg.Draw(v, 'b') as g:
self.assertEqual(g.number, 2)
g.rectangle(rect, pen="b")
g.locate((450000, 5025000),
reference=gxg.REF_TOP_CENTER)
self.crc_map(map_file)
def test_color_bar(self):
self.start()
folder, files = gsys.unzip(os.path.join(os.path.dirname(self._test_case_py), 'testgrids.zip'),
folder=self.gx.temp_folder())
grid_file = os.path.join(folder, 'test_agg_utm.grd')
map_file = os.path.join(self.gx.temp_folder(), "test_agg_utm")
with gxgrd.Grid.open(grid_file, mode=gxgrd.FILE_READWRITE) as grd:
cs = grd.coordinate_system
area = grd.extent_2d()
grd.unit_of_measure = 'maki'
with gxmap.Map.new(map_file, fixed_size=False,
data_area=area, media="A4", margins=(7, 7, 2.5, 2.5),
coordinate_system=cs, overwrite=True) as gmap:
map_file = gmap.file_name
with gxv.View.open(gmap, "base") as v:
with gxg.Draw(v, 'line') as g:
g.rectangle(v.extent_clip, pen=g.new_pen(line_thick=1, line_color='K'))
with gxv.View.open(gmap, "data") as v:
with gxg.Draw(v, 'line') as g:
g.rectangle(v.extent_clip, pen=g.new_pen(line_thick=0.1, line_color='G'))
g.rectangle(v.extent_all, pen=g.new_pen(line_thick=0.1, line_color='B'))
with gxagg.Aggregate_image.new(grid_file) as agg:
self.assertEqual(agg.layer_unit_of_measure(0), 'maki')
self.assertEqual(agg.layer_unit_of_measure(agg.layer_file_names[0]), 'maki')
self.assertEqual(agg.layer_color_map(0).unit_of_measure, 'maki')
gxg.legend_color_bar(v, 'color_legend', agg.layer_color_map())
gxg.legend_color_bar(v, 'color_legend', agg.layer_color_map(),
bar_location=gxg.COLOR_BAR_LEFT)
gxg.legend_color_bar(v, 'bottom', agg.layer_color_map(),
bar_location=gxg.COLOR_BAR_BOTTOM,
box_size=0.5,
location=(1, -0.1),
annotation_offset=0.1)
gxg.legend_color_bar(v, 'top', agg.layer_color_map(),
bar_location=gxg.COLOR_BAR_TOP,
box_size=0.5,
bar_width=0.1,
location=0.5,
interval_1 = 50,
annotation_offset=0.1)
self.crc_map(map_file)
def test_color_bar_existing_agg(self):
self.start()
folder, files = gsys.unzip(os.path.join(os.path.dirname(self._test_case_py), 'testgrids.zip'),
folder=self.gx.temp_folder())
grid_file = os.path.join(folder, 'test_agg_utm.grd')
map_file = os.path.join(self.gx.temp_folder(), "test_agg_utm")
with gxgrd.Grid(grid_file) as grd:
cs = grd.coordinate_system
area = grd.extent_2d()
with gxmap.Map.new(map_file, fixed_size=False,
data_area=area, media="A4", margins=(2, 10, 2, 1),
coordinate_system=cs, overwrite=True) as gmap:
map_file = gmap.file_name
with gxv.View.open(gmap, "base") as v:
with gxg.Draw(v, 'line') as g:
g.rectangle(v.extent_clip, pen=g.new_pen(line_thick=1, line_color='K'))
with gxv.View.open(gmap, "data") as v:
with gxg.Draw(v, 'line') as g:
g.rectangle(v.extent_clip, pen=g.new_pen(line_thick=0.1, line_color='G'))
g.rectangle(v.extent_all, pen=g.new_pen(line_thick=0.1, line_color='B'))
with gxagg.Aggregate_image.new(grid_file) as agg:
with gxg.Aggregate_group.new(v, agg) as g:
agg_group_name = g.name
with gxv.View.open(gmap, "data") as v:
with gxg.Aggregate_group.open(v, agg_group_name) as g:
gxg.legend_color_bar(v, 'color_legend', g.agg.layer_color_map())
self.crc_map(map_file)
def test_properties(self):
self.start()
with gxmap.Map.new() as map:
with gxv.View.open(map, "base") as v:
with gxg.Draw(v, 'edge') as g:
g.rectangle(v.extent_clip, pen=g.new_pen(line_thick=1, line_color='K'))
with gxv.View.open(map, "data") as v:
with gxg.Draw(v, 'edge') as g:
g.rectangle(v.extent_clip, pen=g.new_pen(line_thick=1, line_color='B'))
self.assertTrue(g.visible)
g.visible = False
self.assertFalse(g.visible)
def test_graticule(self):
self.start()
test_map = os.path.join(self.gx.temp_folder(), "test")
with gxmap.Map.new(test_map, overwrite=True) as map:
map_file = map.file_name
map.delete_view('data')
with gxv.View.new(map, "my_data_1", map_location=(2, 3), area=(0, 0, 1000, 1500), scale=10000) as v:
with gxg.Draw(v, 'line') as g:
g.rectangle(v.extent_clip,
pen=g.new_pen(line_thick=5, line_color='G'))
g.graticule(style=gxg.GRATICULE_LINE, pen=g.new_pen(line_thick=5))
ex1 = v.extent_group('line', unit=gxv.UNIT_MAP)
with gxv.View.new(map, "my_data_2", map_location=(15, 3), area=(0, 0, 1000, 1500), scale=10000) as v:
with gxg.Draw(v, 'line') as g:
g.rectangle(v.extent_clip,
pen=g.new_pen(line_thick=5, line_color='G'))
g.graticule(style=gxg.GRATICULE_DOT, pen=g.new_pen(line_thick=5))
ex2 = v.extent_group('line', unit=gxv.UNIT_MAP)
with gxv.View.new(map, "my_data_3", map_location=(28, 3), area=(0, 0, 1000, 1500), scale=10000) as v:
with gxg.Draw(v, 'line') as g:
g.rectangle(v.extent_clip,
pen=g.new_pen(line_thick=5, line_color='G'))
g.graticule(style=gxg.GRATICULE_CROSS, pen=g.new_pen(line_thick=5))
ex3 = v.extent_group('line', unit=gxv.UNIT_MAP)
area = (min(ex1[0], ex2[0], ex3[0])/10.0 - 2, max(ex1[1], ex2[1], ex3[1])/10.0 - 2,
max(ex1[2], ex2[2], ex3[2])/10.0 + 2, max(ex1[3], ex2[3], ex3[3])/10.0 + 2)
with gxv.View.new(map, "my_base_view", area=area, scale=100.0) as v:
with gxg.Draw(v, 'base_edge') as g:
g.rectangle(v.extent_clip, pen=g.new_pen(line_thick=0.1, line_color='R'))
map.delete_view('base')
self.crc_map(map_file)
def test_ppoint_3d(self):
self.start()
plist = [[110, 5, 0],
[120, 20, 10],
[130, 15, 50],
[150, 50, 20],
[160, 70, 0],
[175, 35, 30],
[190, 65, 80],
[220, 50, 90],
[235, 18.5, 100]]
pp = gxgm.PPoint(plist)
with gxv.View_3d.new(gxsys.func_name(), overwrite=True) as v:
file_name = v.file_name
with gxg.Draw_3d(v) as g:
g.pen = gxg.Pen(line_color='R')
g.polypoint_3d(pp)
pp += (0, 0, 20)
g.polypoint_3d(pp, style=gxg.POINT_STYLE_SPHERE, pen=gxg.Pen(line_color='G', line_thick=5))
try:
self.crc_map(file_name)
finally:
gxmap.delete_files(file_name)
def test_pp_3d(self):
self.start()
plist = [[110, 5, 0],
[120, 20, 10],
[130, 15, 50],
[150, 50, 20],
[160, 70, 0],
[175, 35, 30],
[190, 65, 80],
[220, 50, 90],
[235, 18.5, 100]]
with gxv.View_3d.new(gxsys.func_name(), overwrite=True) as v:
file_name = v.file_name
with gxg.Draw_3d(v) as g:
pp = gxgm.PPoint(plist)
g.pen = gxg.Pen(line_color='R')
g.polypoint_3d(pp)
pp += (0, 0, 10)
g.polypoint_3d(pp, style=gxg.POINT_STYLE_SPHERE, pen=gxg.Pen(line_color='G', line_thick=4))
pp += (0, 0, 10)
g.pen = gxg.Pen(line_color='R')
g.polyline_3d(pp)
pp += (0, 0, 10)
g.pen = gxg.Pen(line_color='C', line_thick=3)
g.polyline_3d(pp, style=gxg.LINE3D_STYLE_TUBE)
pp += (0, 0, 10)
g.polyline_3d(pp, style=gxg.LINE3D_STYLE_TUBE_JOINED, pen=gxg.Pen(line_color='K64', line_thick=4))
try:
self.crc_map(file_name)
finally:
gxmap.delete_files(file_name)
def test_color_map(self):
self.start()
cm = gxg.Color_map()
self.assertEqual(cm.length, 39)
self.assertFalse(cm.initialized)
cm = gxg.Color_map(16)
self.assertEqual(cm.length, 16)
self.assertEqual(cm[0][1], gxg.Color(gxg.C_BLACK))
self.assertEqual(cm[cm.length-1], (None, gxg.Color(gxg.C_BLACK)))
cm[4] = (cm[4][0], gxg.Color(gxg.C_GREEN))
self.assertEqual(cm[4][1].rgb, (0, 255, 0))
self.assertFalse(cm.initialized)
self.assertTrue(isinstance(cm.gxitr, gxapi.GXITR))
cm = gxg.Color_map('grey')
self.assertFalse(cm.initialized)
cm.set_sequential()
self.assertTrue(cm.initialized)
self.assertEqual(cm.length, 32)
self.assertEqual(cm[0][1].rgb, (31, 31, 31))
self.assertEqual(cm[cm.length-1][1].rgb, (255, 255, 255))
self.assertEqual(cm.color_of_value(0), cm[0][1])
self.assertEqual(cm.color_of_value(7.0), cm[7][1])
self.assertEqual(cm.color_of_value(7.000001), cm[8][1])
self.assertEqual(cm.brightness, 0.)
cm.brightness = 0.5
self.assertEqual(cm.brightness, 0.5)
self.assertEqual(cm[0][1].rgb, (143, 143, 143))
self.assertEqual(cm[cm.length - 1][1].rgb, (255, 255, 255))
cm.brightness = -0.25
self.assertEqual(cm.brightness, -0.25)
self.assertEqual(cm[0][1].rgb, (24, 24, 24))
self.assertEqual(cm[cm.length - 1][1].rgb, (192, 192, 192))
cm.brightness = 0
self.assertEqual(cm[0][1].rgb, (31, 31, 31))
self.assertEqual(cm[cm.length - 1][1].rgb, (255, 255, 255))
cm.set_linear(4, 45)
self.assertEqual(cm.length, 32)
self.assertEqual(cm[0][0], 4)
self.assertEqual(cm[30][0], 45)
cm.set_linear(4, 45, inner_limits=False)
self.assertEqual(cm.length, 32)
self.assertEqual(cm[0][0], 5.28125)
self.assertEqual(cm[30][0], 43.71875)
cm.set_linear(5, 50, contour_interval=5)
self.assertEqual(cm.length, 11)
cm = gxg.Color_map('grey')
cm.set_logarithmic(0.0001,1000)
self.assertEqual(cm.length, 32)
cm.set_logarithmic(0.0001,1000, contour_interval=10)
self.assertEqual(cm.length, 7)
cm = gxg.Color_map('grey')
cm.set_logarithmic(0.000023,18000, contour_interval=100)
self.assertEqual(cm.length, 5)
cm = gxg.Color_map()
cm.set_normal(25, 55000)
self.assertAlmostEqual(cm[cm.length//2][0], 55000.811582690316)
itr = cm.save_file()
cm2 = gxg.Color_map(itr)
self.assertTrue(cm == cm2)
tbl = gxg.Color_map().save_file()
self.assertEqual(os.path.splitext(tbl)[1], '.tbl')
cm = gxg.Color_map(tbl)
self.assertFalse(cm.initialized)
def test_color_symbols(self):
self.start()
data = [((0, 0), 1),
((10, 0), 2),
((0, 10), 3),
((10, 10), 4)]
data2 = [((0, 0, 45), 1, 4),
((10, 0, 8), None, None),
((0, 10, 16), 3, 75),
((None, 10, -22), 4, 7)]
cmap = gxg.Color_map()
cmap.set_linear(0, 5, contour_interval=1)
with gxmap.Map.new(data_area=(-1, -1, 11, 11), scale=100) as map:
map_file = map.file_name
with gxv.View.open(map, '*data') as v:
with gxg.Draw(v) as g:
g.rectangle(g.extent)
gxg.Color_symbols_group.new(v, 'outer_symbols', data, cmap, unit_of_measure='maki').close()
with gxg.Color_symbols_group.open(v, 'outer_symbols') as cs:
cm = cs.color_map()
self.assertEqual(cm.unit_of_measure, 'maki')
self.assertEqual(cm.unit_of_measure, cs.unit_of_measure)
cmap = gxg.Color_map()
cmap.set_linear(0, 5, contour_interval=1)
with gxg.Color_symbols_group.new(v, 'mark', data2, cmap,
symbol=gxg.SYMBOL_BOX,
symbol_def=gxg.Text_def(font='symbols.gfn',
height=0.15,
color=gxg.C_WHITE,
weight=gxg.FONT_WEIGHT_ULTRALIGHT)) as cs:
nv = cs.name
with gxg.Color_symbols_group.open(v, nv) as cs:
gxg.legend_color_bar(v, 'symbol_legend', cs.color_map())
self.crc_map(map_file)
def test_color_symbols_from_array(self):
self.start()
data = [(0, 0, 1),
(10, 0, 2),
(0, 10, 3),
(10, 10, 4)]
cmap = gxg.Color_map()
cmap.set_linear(0, 5, contour_interval=1)
with gxmap.Map.new(data_area=(-1, -1, 11, 11), scale=100) as map:
map_file = map.file_name
with gxv.View.open(map, '*data') as v:
with gxg.Draw(v) as g:
g.rectangle(g.extent)
gxg.Color_symbols_group.new(v, 'outer_symbols',
np.array(data), cmap,
unit_of_measure='maki').close()
cmap = gxg.Color_map()
cmap.set_linear(0, 5, contour_interval=1)
self.crc_map(map_file)
def test_color_symbols_3d(self):
self.start()
data = [((0, 0), 1),
((10, 0), 2),
((0, 10), 3),
((10, 10), 4)]
data2 = [((0, 0, 45), 1, 4),
((10, 0, 8), None, None),
((0, 10, 16), 3, 75),
((None, 10, -22), 4, 7)]
cmap = gxg.Color_map()
cmap.set_linear(0, 5, contour_interval=1)
with gxv.View_3d.new() as v:
v3d_file = v.file_name
with gxg.Draw(v) as g:
g.rectangle(g.extent)
gxg.Color_symbols_group.new(v, 'outer_symbols', data, cmap, unit_of_measure='maki').close()
cmap = gxg.Color_map('hotcycle')
cmap.set_linear(0, 5, contour_interval=1)
with gxg.Color_symbols_group.new(v, 'mark', data2, cmap,
symbol=gxg.SYMBOL_BOX,
symbol_def=gxg.Text_def(font='symbols.gfn',
height=0.15,
color=gxg.C_WHITE,
weight=gxg.FONT_WEIGHT_ULTRALIGHT)) as cs:
nv = cs.name
with gxg.Color_symbols_group.open(v, nv) as cs:
self.assertEqual(cs.number, 2)
self.crc_map(v3d_file)
def test_polydata_3d(self):
self.start()
def render_spheres(item, cmap_radius):
xyz, value = item
if None in xyz or value is None:
return None
cmap, radius = cmap_radius
cint = cmap.color_of_value(value)
return gxg.SYMBOL_3D_SPHERE, xyz, cint.int_value, radius
def render_cubes(point, size_color):
size, cint = size_color
half = size * 0.5
p2 = gxgm.Point2((point - (half, half, half), point + (half, half, half)))
return gxg.SYMBOL_3D_CUBE, p2, cint, None
def render_cylinders(point, size_color):
size, cint = size_color
half = size * 0.2
p2 = gxgm.Point2((point - (half, half, half), point + (half, half, half)))
return gxg.SYMBOL_3D_CYLINDER, p2, cint, size * 0.4
def render_cones(point, size_color):
size, cint = size_color
half = size * 0.5
p2 = gxgm.Point2((point - (half, half, half), point + (half, half, half)))
return gxg.SYMBOL_3D_CONE, p2, cint, size * 0.2
data = [((0, 0, 0), 1),
((10, 0, 5), 2),
((0, 10, -5), 3),
((0, None, -5), 99),
((0, 10, -5), None),
((10, 10, 10), 4)]
cmap = gxg.Color_map()
cmap.set_linear(0, 5, contour_interval=1)
for c in cmap:
if c[0]:
self.assertTrue(isinstance(c[0], float))
self.assertTrue(isinstance(c[1], gxg.Color))
with gxv.View_3d.new(area_2d=(-1, -1, 11, 11)) as v:
v3d_file = v.file_name
with gxg.Draw(v, 'rect') as g:
g.rectangle((0,0,10,10),
pen=gxg.Pen(line_color=gxg.C_BLACK,
line_thick=0.2,
fill_color=gxg.C_GREEN))
with gxg.Draw_3d(v, 'pipes') as g:
g.polyline_3d(((0,0,0), (10,0,0), (10,10,0), (0,10,0), (0,0,0)),
style=gxg.LINE3D_STYLE_TUBE_JOINED,
pen=gxg.Pen(line_color=gxg.C_GREY,
line_thick=0.2))
with gxg.Draw_3d(v, 'outer') as g:
g.polydata_3d(data, render_spheres, (cmap, 0.25))
pp = gxgm.PPoint(((5, 5, 5), (7, 5, 5), (7, 7, 7)))
g.polydata_3d(pp, render_cubes, (1, gxg.Color('y').int_value))
pp += (0, 0, 2)
g.polydata_3d(pp, render_cylinders, (1, gxg.Color('m').int_value))
pp += (0, 0, 2)
n = 0
g.polydata_3d(pp, render_cones, (1, gxg.Color('r255g128b128').int_value))
self.crc_map(v3d_file)
def test_polydata_3d_grd(self):
self.start()
def render_spheres(item, cmap_radius):
cmap, radius = cmap_radius
if not np.isnan(item[2]):
cint = cmap.color_of_value(item[2]).int_value
return gxg.SYMBOL_3D_SPHERE, item, cint, radius
folder, files = gsys.unzip(os.path.join(os.path.dirname(self._test_case_py), 'dem_small.zip'),
folder=self.gx.temp_folder())
grid_file = os.path.join(folder, 'dem_small.grd')
with gxgrd.Grid.open(grid_file) as grd:
data = grd.xyzv().reshape(-1, 4)
data[:, 2] = data[:, 3] * 3
data = data[:, 0:3]
cmap = gxg.Color_map()
try:
std = np.nanstd(data[:, 2])
mean = np.nanmean(data[:, 2])
cmap.set_normal(std, mean)
except:
cmap.set_linear(0, 1)
with gxv.View_3d.new(coordinate_system=grd.coordinate_system) as v:
v3d_file = v.file_name
with gxg.Draw_3d(v, 'dem_points') as g:
g.polydata_3d(data.reshape((-1, 3)), render_spheres, (cmap, 10 * v.units_per_map_cm))
p_min = gxgm.Point((np.nanmin(data[:, 0]), np.nanmin(data[:, 1]), np.nanmin(data[:, 2])))
p_max = gxgm.Point((np.nanmax(data[:, 0]), np.nanmax(data[:, 1]), np.nanmax(data[:, 2])))
extent = gxgm.Point2((p_min, p_max))
g.box_3d(extent,
wireframe=True,
pen=gxg.Pen(line_color='c', line_thick= 20 * v.units_per_map_cm))
self.crc_map(v3d_file)
def test_plane_relief_surface(self):
self.start()
folder, files = gsys.unzip(os.path.join(os.path.dirname(self._test_case_py), 'dem_small.zip'),
folder=self.gx.temp_folder())
grid_file = os.path.join(folder, 'dem_small.grd')
v3d_name = ''
try:
with gxv.View_3d.new("data",
area_2d=gxgrd.Grid(grid_file).extent_2d(),
coordinate_system=gxgrd.Grid(grid_file).coordinate_system,
scale=5000,
overwrite=True) as v:
v3d_name = v.file_name
v.set_plane_relief_surface(grid_file, base=200, scale=2, max=250, min=150, refine=2)
gxg.Aggregate_group.new(v, gxagg.Aggregate_image.new(grid_file, shade=True, contour=20))
gxg.contour(v, 'TMI_contour', grid_file)
self.crc_map(v3d_name)
finally:
gxv.delete_files(v3d_name)
def test_plane_contour(self):
self.start()
folder, files = gsys.unzip(os.path.join(os.path.dirname(self._test_case_py), 'dem_small.zip'),
folder=self.gx.temp_folder())
grid_file = os.path.join(folder, 'dem_small.grd')
with gxmap.Map.new(data_area=gxgrd.Grid(grid_file).extent_2d(),
scale=20000,
inside_margin=0.1,
coordinate_system=gxgrd.Grid(grid_file).coordinate_system,
overwrite=True) as map:
map_name = map.file_name
with gxv.View.open(map, "data") as v:
gxg.contour(v, 'TMI_contour', grid_file)
with gxg.Draw(v, 'edge') as g:
g.rectangle((v.extent_clip), pen=gxg.Pen(line_thick=v.units_per_map_cm * 0.1))
self.crc_map(map_name)
def test_plane_contour_3d(self):
self.start()
folder, files = gsys.unzip(os.path.join(os.path.dirname(self._test_case_py), 'dem_small.zip'),
folder=self.gx.temp_folder())
grid_file = os.path.join(folder, 'dem_small.grd')
v3d_name = ''
try:
with gxv.View_3d.new("data",
area_2d=gxgrd.Grid(grid_file).extent_2d(),
coordinate_system=gxgrd.Grid(grid_file).coordinate_system,
scale=20000,
overwrite=True) as v:
v3d_name = v.file_name
gxg.contour(v, 'TMI_contour', grid_file)
with gxg.Draw(v, 'edge') as g:
g.rectangle((v.extent_clip), pen=gxg.Pen(line_thick=v.units_per_map_cm * 0.1))
self.crc_map(v3d_name)
finally:
gxv.delete_files(v3d_name)
def test_polydata_3d_grd_cone(self):
self.start()
def render_spheres(item, cmap_radius):
cmap, radius = cmap_radius
if not np.isnan(item[2]):
cint = cmap.color_of_value(item[2]).int_value
item = gxgm.Point(item)
item2 = item + (0, radius, radius * 2)
return gxg.SYMBOL_3D_CONE, gxgm.Point2((item, item2)), cint, radius
folder, files = gsys.unzip(os.path.join(os.path.dirname(self._test_case_py), 'dem_small.zip'),
folder=self.gx.temp_folder())
grid_file = os.path.join(folder, 'dem_small.grd')
with gxgrd.Grid.open(grid_file) as grd:
data = grd.xyzv().reshape(-1, 4)
data[:, 2] = data[:, 3] * 3
data = data[:, 0:3]
cmap = gxg.Color_map()
try:
std = np.nanstd(data[:, 2])
mean = np.nanmean(data[:, 2])
cmap.set_normal(std, mean)
except:
cmap.set_linear(0, 1)
with gxv.View_3d.new(coordinate_system=grd.coordinate_system) as v:
v3d_file = v.file_name
with gxg.Draw_3d(v, 'dem_points') as g:
g.polydata_3d(data.reshape((-1, 3)), render_spheres, (cmap, 10 * v.units_per_map_cm))
p_min = gxgm.Point((np.nanmin(data[:, 0]), np.nanmin(data[:, 1]), np.nanmin(data[:, 2])))
p_max = gxgm.Point((np.nanmax(data[:, 0]), np.nanmax(data[:, 1]), np.nanmax(data[:, 2])))
extent = gxgm.Point2((p_min, p_max))
g.box_3d(extent,
wireframe=True,
pen=gxg.Pen(line_color='c', line_thick= 20 * v.units_per_map_cm))
self.crc_map(v3d_file)
def test_polydata_3d_grd_cylinder(self):
self.start()
def render_spheres(item, cmap_radius):
cmap, radius = cmap_radius
if not np.isnan(item[2]):
cint = cmap.color_of_value(item[2]).int_value
item = gxgm.Point(item)
item2 = item + (0, radius, radius * 2)
return gxg.SYMBOL_3D_CYLINDER, gxgm.Point2((item, item2)), cint, radius
folder, files = gsys.unzip(os.path.join(os.path.dirname(self._test_case_py), 'dem_small.zip'),
folder=self.gx.temp_folder())
grid_file = os.path.join(folder, 'dem_small.grd')
with gxgrd.Grid.open(grid_file) as grd:
data = grd.xyzv().reshape(-1, 4)
data[:, 2] = data[:, 3] * 3
data = data[:, 0:3]
cmap = gxg.Color_map()
try:
std = np.nanstd(data[:, 2])
mean = np.nanmean(data[:, 2])
cmap.set_normal(std, mean)
except:
cmap.set_linear(0, 1)
with gxv.View_3d.new(coordinate_system=grd.coordinate_system) as v:
v3d_file = v.file_name
with gxg.Draw_3d(v, 'dem_points') as g:
g.polydata_3d(data.reshape((-1, 3)), render_spheres, (cmap, 10 * v.units_per_map_cm))
p_min = gxgm.Point((np.nanmin(data[:, 0]), np.nanmin(data[:, 1]), np.nanmin(data[:, 2])))
p_max = gxgm.Point((np.nanmax(data[:, 0]), np.nanmax(data[:, 1]), np.nanmax(data[:, 2])))
extent = gxgm.Point2((p_min, p_max))
g.box_3d(extent,
wireframe=True,
pen=gxg.Pen(line_color='c', line_thick= 20 * v.units_per_map_cm))
self.crc_map(v3d_file)
def test_contour(self):
self.start()
folder, files = gsys.unzip(os.path.join(os.path.dirname(self._test_case_py), 'testgrids.zip'),
folder=self.gx.temp_folder())
grid_file = os.path.join(folder, 'test_agg_utm.grd')
map_file = os.path.join(self.gx.temp_folder(), "test_agg_utm")
with gxgrd.Grid(grid_file) as grd:
cs = grd.coordinate_system
area = grd.extent_2d()
with gxmap.Map.new(map_file,
data_area=area, media="A4", margins=(0, 10, 0, 0),
coordinate_system=cs, overwrite=True) as gmap:
map_file = gmap.file_name
with gxv.View.open(gmap, "base") as v:
with gxg.Draw(v, 'line') as g:
g.rectangle(v.extent_clip, pen=g.new_pen(line_thick=1, line_color='K'))
with gxv.View.open(gmap, "data") as v:
with gxg.Draw(v, 'line') as g:
g.rectangle(area, pen=g.new_pen(line_thick=0.1, line_color='R'))
with gxagg.Aggregate_image.new(grid_file) as agg:
with gxg.Aggregate_group.new(v, agg) as gagg:
self.assertEqual(gagg.name, str(agg))
self.assertEqual(len(v.group_list_agg), 1)
gxg.contour(v, 'contour', grid_file)
self.crc_map(map_file)
def test_contour2(self):
self.start()
folder, files = gsys.unzip(os.path.join(os.path.dirname(self._test_case_py), 'testgrids.zip'),
folder=self.gx.temp_folder())
grid_file = os.path.join(folder, 'test_agg_utm.grd')
map_file = os.path.join(self.gx.temp_folder(), "test_agg_utm")
with gxgrd.Grid(grid_file) as grd:
cs = grd.coordinate_system
area = grd.extent_2d()
with gxmap.Map.new(map_file,
data_area=area, margins=(2, 10, 2, 2),
coordinate_system=cs, overwrite=True, scale=20000) as gmap:
map_file = gmap.file_name
with gxv.View.open(gmap, "base") as v:
with gxg.Draw(v, 'line') as g:
g.rectangle(v.extent_clip, pen=g.new_pen(line_thick=1, line_color='K'))
with gxv.View.open(gmap, "data") as v:
with gxg.Draw(v, 'line') as g:
g.rectangle(area, pen=g.new_pen(line_thick=0.1, line_color='R'))
with gxagg.Aggregate_image.new(grid_file, contour=10) as agg:
cmap = agg.layer_color_map()
cname = agg.name
with gxg.Aggregate_group.new(v, agg) as gagg:
self.assertEqual(gagg.name, str(agg))
gxg.legend_color_bar(v, cname, cmap)
self.assertEqual(len(v.group_list_agg), 1)
gxg.contour(v, 'contour', grid_file)
self.crc_map(map_file)
def test_contour_parameters(self):
self.start()
folder, files = gsys.unzip(os.path.join(os.path.dirname(self._test_case_py), 'testgrids.zip'),
folder=self.gx.temp_folder())
grid_file = os.path.join(folder, 'test_agg_utm.grd')
map_file = os.path.join(self.gx.temp_folder(), "test_agg_utm")
with gxgrd.Grid(grid_file) as grd:
cs = grd.coordinate_system
area = grd.extent_2d()
with gxmap.Map.new(map_file,
data_area=area, margins=(2, 10, 2, 2),
coordinate_system=cs, overwrite=True, scale=20000) as gmap:
map_file = gmap.file_name
with gxv.View.open(gmap, "base") as v:
with gxg.Draw(v, 'line') as g:
g.rectangle(v.extent_clip, pen=g.new_pen(line_thick=1, line_color='K'))
with gxv.View.open(gmap, "data") as v:
with gxg.Draw(v, 'line') as g:
g.rectangle(area, pen=g.new_pen(line_thick=0.1, line_color='R'))
gxg.contour(v, '_250', grid_file, parameters=('', '', '', '', '', '', '10', '50', '250'))
gxg.contour(v, '_260_270', grid_file,
parameters={'levels': {'levopt': 1},
'contours': [{'cint': 260, 'label': 0, 'catt': 'a=rt50'},
{'cint': 270, 'label': 1, 'catt': 'b=gt1000'},
{'cint': 280, 'label': 1, 'catt': 'c=br100g100t500'}]})
self.crc_map(map_file)
def test_color_str(self):
self.start()
self.assertEqual(gxg.color_from_string("R"), 33554687)
self.assertEqual(gxg.color_from_string("H255S127V32"), 18907135)
def test_group_properties(self):
self.start()
rect = gxgm.Point2((0,0,10,5))
with gxmap.Map.new(data_area=rect.extent_xy) as gmap:
with gxv.View.new(gmap, "data") as v:
gxg.Draw(v, 'rect').rectangle(rect)
self.assertTrue(len(v.group_list), 1)
gxg.Draw(v, 'rect').rectangle(rect)
self.assertTrue(len(v.group_list), 1)
gxg.Draw(v, 'rect', mode=gxg.NEW).rectangle(rect)
self.assertTrue(len(v.group_list), 2)
self.assertTrue('rect_1' in v.group_list)
gxg.Draw(v, 'rect_1', mode=gxg.REPLACE).rectangle(rect)
self.assertTrue(len(v.group_list), 2)
self.assertTrue('rect_1' in v.group_list)
with gxg.Draw(v, 'property_test') as g:
self.assertEqual(g.group_opacity, 1.0)
g.group_opacity = 0.25
self.assertEqual(g.group_opacity, 0.25)
g.group_opacity = -50
self.assertEqual(g.group_opacity, 0.)
g.group_opacity = 5
self.assertEqual(g.group_opacity, 1.)
self.assertFalse(g.group_3d)
self.assertEqual(g.name, 'property_test')
self.assertEqual(g.view.name, 'data')
@unittest.skip('WIP see issue #73')
def test_surface(self):
self.start()
verts = np.array([[0, 0, 0],
[5, 0, 0],
[5, 5, 0],
[0, 3, 5],
[2.5, 2, 10],
[-3, 6, 8],
[-4, 0, 12]], dtype=np.float64)
faces = np.array([[0, 1, 2],
[0, 2, 3],
[3, 2, 4],
[1, 2, 4],
[3, 4, 5],
[6, 4, 5]], dtype=np.int32)
with gxv.View_3d.new() as v3d:
v3d_file = v3d.file_name
with gxg.Draw_3d(v3d, 'Surface') as g:
g._surface(faces, verts)
image_file = gxmap.Map.open(v3d_file).image_file(pix_width=800)
gxviewer.view_document(v3d_file, wait_for_close=True)
pass
if __name__ == '__main__':
unittest.main() | true | true |
f7f48a2ec24bfc793c81134fe471b959c169fcdd | 5,250 | py | Python | nethud/proto/tee.py | ryansb/netHUD | 87f80a1beccad50c832028e2e57105fe277d37d6 | [
"MIT"
] | 1 | 2015-11-05T14:34:01.000Z | 2015-11-05T14:34:01.000Z | nethud/proto/tee.py | ryansb/netHUD | 87f80a1beccad50c832028e2e57105fe277d37d6 | [
"MIT"
] | null | null | null | nethud/proto/tee.py | ryansb/netHUD | 87f80a1beccad50c832028e2e57105fe277d37d6 | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
from twisted.internet import reactor, defer
from twisted.internet.protocol import Protocol, ClientFactory, Factory
from nethud.controller import Controller
try:
import ultrajson as json
except:
import json
"""
tee.py
nethud.proto.tee
tee.py acts kinda like a tee and copies all data going acrossed a connection
between the a nethack client and the nethack server. It will do a tiny bit of
processing to add a username to every message copied and sent through the tee
[Nethack Server]
----
| |
| |_______
| _______| [NetHUD] (NOTE: the NetHUD bit is replacable)
| |
| |
----
[Nethack Client]
NOTE: This is heavily inspired by https://gist.github.com/1878983
"""
class TeeFromClientProtocol(Protocol):
"""
We are connected to by the client
and forward messages from the client to the server and the hud
"""
def connectionMade(self):
"""
Called when the client connects to this object
we use this to setup all our connections to other things,
such as the hud, and the actual server
"""
self.outgoing_queue = defer.DeferredQueue()
self.outgoing_queue.get().addCallback(self.dataFromNetHack)
self.incoming_queue = defer.DeferredQueue()
self.hud_queue = defer.DeferredQueue()
toNetHackFactory = TeeToNetHackFactory(self.incoming_queue,
self.outgoing_queue, self.hud_queue)
# This factory is used to spin up connections to the actual nethack
# server
hudToController = TeeToHUDController(self.hud_queue)
# this obeject is used to forward all of our messages in the hud_queue
# out of the telnet server
reactor.connectTCP("games-ng.csh.rit.edu", 53421, toNetHackFactory)
def dataFromNetHack(self, data):
"""Data returned from the nethack server"""
self.transport.write(data)
self.outgoing_queue.get().addCallback(self.dataFromNetHack)
def dataReceived(self, data):
"""
Data from the nethack client
We should only need to send the auth data to the hud all other client
data may be irrelevent
"""
self.incoming_queue.put(data)
if "auth" in data:
self.hud_queue.put(data)
class TeeToNetHackProtocol(Protocol):
"""
We connect to the nethack server
and return any messages received to both the client and the hud
"""
def connectionMade(self):
"""
Called when we connect to the nethack server
"""
self.outgoing_queue = self.factory.outgoing_queue
self.incoming_queue = self.factory.incoming_queue
self.hud_queue = self.factory.hud_queue
self.incoming_queue.get().addCallback(self.dataFromNetHackClient)
self.data_buffer = ""
self.authPacket = None
def dataFromNetHackClient(self, data):
"""
Used as a callback to grab data from the incoming queue and write it to
the server
"""
self.transport.write(data)
self.incoming_queue.get().addCallback(self.dataFromNetHackClient)
def dataReceived(self, data):
"""
Take data coming from the server and put it into the queue for the
client
We are also doing a bit of checking to combine auth messages.
"""
if self.data_buffer:
data = self.data_buffer + data
self.data_buffer = ''
try:
data = json.loads(data)
except ValueError:
# We probably just didn't get all of it
self.data_buffer = data
return
self.outgoing_queue.put(json.dumps(data))
jData = data
if "auth" in jData and not self.authPacket:
self.authPacket = jData
elif "auth" in jData and self.authPacket:
self.authPacket.update(jData['auth'])
self.hud_queue.put(json.dumps(self.authPacket))
else:
self.hud_queue.put(json.dumps(data))
class TeeToHUDController(object):
"""
This class is hooked in to a deferred queue which gets messages from the
client and server
and uses controller.send_msg to push them out
"""
def __init__(self, hud_queue):
self.hud_queue = hud_queue
self.hud_queue.get().addCallback(self.dataFromTeeReceived)
self.user = ""
def dataFromTeeReceived(self, data):
jData = json.loads(data)
if "auth" in jData:
if 'username' in jData['auth']:
self.user = jData['auth']['username']
Controller.send_message(self.user, data)
self.hud_queue.get().addCallback(self.dataFromTeeReceived)
class TeeToNetHackFactory(ClientFactory):
protocol = TeeToNetHackProtocol
def __init__(self, incoming_queue, outgoing_queue, hud_queue):
self.incoming_queue = incoming_queue
self.outgoing_queue = outgoing_queue
self.hud_queue = hud_queue
### Testing shitz go after this point ###
def main():
factory = Factory()
factory.protocol = TeeFromClientProtocol
reactor.listenTCP(12435, factory, interface="0.0.0.0")
reactor.run()
if __name__ == "__main__":
main()
| 30.882353 | 79 | 0.653333 | from __future__ import unicode_literals
from twisted.internet import reactor, defer
from twisted.internet.protocol import Protocol, ClientFactory, Factory
from nethud.controller import Controller
try:
import ultrajson as json
except:
import json
class TeeFromClientProtocol(Protocol):
def connectionMade(self):
self.outgoing_queue = defer.DeferredQueue()
self.outgoing_queue.get().addCallback(self.dataFromNetHack)
self.incoming_queue = defer.DeferredQueue()
self.hud_queue = defer.DeferredQueue()
toNetHackFactory = TeeToNetHackFactory(self.incoming_queue,
self.outgoing_queue, self.hud_queue)
hudToController = TeeToHUDController(self.hud_queue)
reactor.connectTCP("games-ng.csh.rit.edu", 53421, toNetHackFactory)
def dataFromNetHack(self, data):
self.transport.write(data)
self.outgoing_queue.get().addCallback(self.dataFromNetHack)
def dataReceived(self, data):
self.incoming_queue.put(data)
if "auth" in data:
self.hud_queue.put(data)
class TeeToNetHackProtocol(Protocol):
def connectionMade(self):
self.outgoing_queue = self.factory.outgoing_queue
self.incoming_queue = self.factory.incoming_queue
self.hud_queue = self.factory.hud_queue
self.incoming_queue.get().addCallback(self.dataFromNetHackClient)
self.data_buffer = ""
self.authPacket = None
def dataFromNetHackClient(self, data):
self.transport.write(data)
self.incoming_queue.get().addCallback(self.dataFromNetHackClient)
def dataReceived(self, data):
if self.data_buffer:
data = self.data_buffer + data
self.data_buffer = ''
try:
data = json.loads(data)
except ValueError:
self.data_buffer = data
return
self.outgoing_queue.put(json.dumps(data))
jData = data
if "auth" in jData and not self.authPacket:
self.authPacket = jData
elif "auth" in jData and self.authPacket:
self.authPacket.update(jData['auth'])
self.hud_queue.put(json.dumps(self.authPacket))
else:
self.hud_queue.put(json.dumps(data))
class TeeToHUDController(object):
def __init__(self, hud_queue):
self.hud_queue = hud_queue
self.hud_queue.get().addCallback(self.dataFromTeeReceived)
self.user = ""
def dataFromTeeReceived(self, data):
jData = json.loads(data)
if "auth" in jData:
if 'username' in jData['auth']:
self.user = jData['auth']['username']
Controller.send_message(self.user, data)
self.hud_queue.get().addCallback(self.dataFromTeeReceived)
class TeeToNetHackFactory(ClientFactory):
protocol = TeeToNetHackProtocol
def __init__(self, incoming_queue, outgoing_queue, hud_queue):
self.incoming_queue = incoming_queue
self.outgoing_queue = outgoing_queue
self.hud_queue = hud_queue
### Testing shitz go after this point ###
def main():
factory = Factory()
factory.protocol = TeeFromClientProtocol
reactor.listenTCP(12435, factory, interface="0.0.0.0")
reactor.run()
if __name__ == "__main__":
main()
| true | true |
f7f48ac87388f3180e6acb887393e4c5eed7c890 | 2,001 | py | Python | vultr/vultr.py | mgodiya/python-vultr | 4c314b426a2c5920276ad81a69757fa6ce72b73b | [
"MIT"
] | null | null | null | vultr/vultr.py | mgodiya/python-vultr | 4c314b426a2c5920276ad81a69757fa6ce72b73b | [
"MIT"
] | null | null | null | vultr/vultr.py | mgodiya/python-vultr | 4c314b426a2c5920276ad81a69757fa6ce72b73b | [
"MIT"
] | null | null | null | '''Python library for the Vultr cloud API'''
from .utils import VultrBase
from .v2_account import VultrAcc
from .v2_applications import VultrApp
from .v2_backup import VultrBackup
from .v2_baremetal import VultrBareMetal
#from .v2_billing import VultrBilling
from .v2_blockstorage import VultrBlockStorage
from .v2_dns import VultrDNS
from .v2_firewall import VultrFirewall
from .v2_instances import VultrInstances
from .v2_iso import VultrISO
from .v2_kubernetes import VultrKubernetes
from .v2_loadbalancers import VultrLoadBalancers
from .v2_objectstorage import VultrObjectStorage
from .v2_operatingsystems import VultrOperatingSystems
from .v2_plans import VultrPlans
from .v2_privatenetworks import VultrPrivateNetworks
from .v2_regions import VultrRegions
from .v2_reservedips import VultrReservedIPs
from .v2_snapshots import VultrSnapshots
from .v2_sshkeys import VultrSSHKeys
from .v2_startupscripts import VultrStartupScripts
from .v2_users import VultrUsers
class Vultr(VultrBase):
def __init__(self, api_key):
VultrBase.__init__(self, api_key)
self.account = VultrAcc(api_key)
self.applications = VultrApp(api_key)
self.backup = VultrBackup(api_key)
self.baremetal = VultrBareMetal(api_key)
# self.billing = VultrBilling(api_key)
self.blockstorage = VultrBlockStorage(api_key)
self.dns = VultrDNS(api_key)
self.firewall = VultrFirewall(api_key)
self.instances = VultrInstances(api_key)
self.iso = VultrISO(api_key)
self.kubernetes = VultrKubernetes(api_key)
self.loadbalancers = VultrLoadBalancers(api_key)
self.objectstorage = VultrObjectStorage(api_key)
self.operatingsystems = VultrOperatingSystems(api_key)
self.plans = VultrPlans(api_key)
self.privatenetworks = VultrPrivateNetworks(api_key)
self.regions = VultrRegions(api_key)
self.reservedips = VultrReservedIPs(api_key)
self.snapshots = VultrSnapshots(api_key)
self.sshkeys = VultrSSHKeys(api_key)
self.startupscripts = VultrStartupScripts(api_key)
self.users = VultrUsers(api_key)
| 39.235294 | 56 | 0.825087 | from .utils import VultrBase
from .v2_account import VultrAcc
from .v2_applications import VultrApp
from .v2_backup import VultrBackup
from .v2_baremetal import VultrBareMetal
from .v2_blockstorage import VultrBlockStorage
from .v2_dns import VultrDNS
from .v2_firewall import VultrFirewall
from .v2_instances import VultrInstances
from .v2_iso import VultrISO
from .v2_kubernetes import VultrKubernetes
from .v2_loadbalancers import VultrLoadBalancers
from .v2_objectstorage import VultrObjectStorage
from .v2_operatingsystems import VultrOperatingSystems
from .v2_plans import VultrPlans
from .v2_privatenetworks import VultrPrivateNetworks
from .v2_regions import VultrRegions
from .v2_reservedips import VultrReservedIPs
from .v2_snapshots import VultrSnapshots
from .v2_sshkeys import VultrSSHKeys
from .v2_startupscripts import VultrStartupScripts
from .v2_users import VultrUsers
class Vultr(VultrBase):
def __init__(self, api_key):
VultrBase.__init__(self, api_key)
self.account = VultrAcc(api_key)
self.applications = VultrApp(api_key)
self.backup = VultrBackup(api_key)
self.baremetal = VultrBareMetal(api_key)
self.blockstorage = VultrBlockStorage(api_key)
self.dns = VultrDNS(api_key)
self.firewall = VultrFirewall(api_key)
self.instances = VultrInstances(api_key)
self.iso = VultrISO(api_key)
self.kubernetes = VultrKubernetes(api_key)
self.loadbalancers = VultrLoadBalancers(api_key)
self.objectstorage = VultrObjectStorage(api_key)
self.operatingsystems = VultrOperatingSystems(api_key)
self.plans = VultrPlans(api_key)
self.privatenetworks = VultrPrivateNetworks(api_key)
self.regions = VultrRegions(api_key)
self.reservedips = VultrReservedIPs(api_key)
self.snapshots = VultrSnapshots(api_key)
self.sshkeys = VultrSSHKeys(api_key)
self.startupscripts = VultrStartupScripts(api_key)
self.users = VultrUsers(api_key)
| true | true |
f7f48b3413d138f5b0a697ac5cb1fd9ca5047463 | 968 | py | Python | search/views.py | taedori81/stylishclothing | 5ef8a978a9f7636ed0f1c840d4926e76d46c4c1a | [
"BSD-3-Clause"
] | null | null | null | search/views.py | taedori81/stylishclothing | 5ef8a978a9f7636ed0f1c840d4926e76d46c4c1a | [
"BSD-3-Clause"
] | null | null | null | search/views.py | taedori81/stylishclothing | 5ef8a978a9f7636ed0f1c840d4926e76d46c4c1a | [
"BSD-3-Clause"
] | null | null | null | from django.shortcuts import render
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from wagtail.wagtailcore.models import Page
from wagtail.wagtailsearch.models import Query
def search(request):
search_query = request.GET.get('query', None)
page = request.GET.get('page', 1)
# Search
if search_query:
search_results = Page.objects.live().search(search_query)
query = Query.get(search_query)
# Record hit
query.add_hit()
else:
search_results = Page.objects.none()
# Pagination
paginator = Paginator(search_results, 10)
try:
search_results = paginator.page(page)
except PageNotAnInteger:
search_results = paginator.page(1)
except EmptyPage:
search_results = paginator.page(paginator.num_pages)
return render(request, 'search/search.html', {
'search_query': search_query,
'search_results': search_results,
})
| 26.888889 | 72 | 0.688017 | from django.shortcuts import render
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from wagtail.wagtailcore.models import Page
from wagtail.wagtailsearch.models import Query
def search(request):
search_query = request.GET.get('query', None)
page = request.GET.get('page', 1)
if search_query:
search_results = Page.objects.live().search(search_query)
query = Query.get(search_query)
query.add_hit()
else:
search_results = Page.objects.none()
paginator = Paginator(search_results, 10)
try:
search_results = paginator.page(page)
except PageNotAnInteger:
search_results = paginator.page(1)
except EmptyPage:
search_results = paginator.page(paginator.num_pages)
return render(request, 'search/search.html', {
'search_query': search_query,
'search_results': search_results,
})
| true | true |
f7f48b4141432e98b74e9e645404b960f772bb32 | 998 | py | Python | pol/api/v0/me.py | xwu64/server | d358db21db4a8faf33a3681fc499aeea07e9784b | [
"BSD-3-Clause"
] | null | null | null | pol/api/v0/me.py | xwu64/server | d358db21db4a8faf33a3681fc499aeea07e9784b | [
"BSD-3-Clause"
] | null | null | null | pol/api/v0/me.py | xwu64/server | d358db21db4a8faf33a3681fc499aeea07e9784b | [
"BSD-3-Clause"
] | null | null | null | from fastapi import Depends, APIRouter
from pydantic import Field, BaseModel
from pol import res
from pol.res import ErrorDetail
from pol.models import Avatar
from pol.router import ErrorCatchRoute
from pol.permission import UserGroup
from pol.api.v0.depends.auth import User, get_current_user
__all__ = ["Me", "get_user"]
router = APIRouter(tags=["用户"], route_class=ErrorCatchRoute)
class Me(BaseModel):
id: int
url: str
username: str = Field(..., description="唯一用户名,初始与uid相同,可修改")
nickname: str
user_group: UserGroup
avatar: Avatar
sign: str
@router.get(
"/me",
response_model=Me,
description="返回当前 Access Token 对应的用户信息",
responses={
403: res.response(model=ErrorDetail, description="unauthorized"),
},
)
async def get_user(user: User = Depends(get_current_user)):
d = user.dict(by_alias=False)
d["avatar"] = user.avatar
d["url"] = "https://bgm.tv/user/" + user.username
d["user_group"] = user.group_id
return d
| 24.341463 | 73 | 0.700401 | from fastapi import Depends, APIRouter
from pydantic import Field, BaseModel
from pol import res
from pol.res import ErrorDetail
from pol.models import Avatar
from pol.router import ErrorCatchRoute
from pol.permission import UserGroup
from pol.api.v0.depends.auth import User, get_current_user
__all__ = ["Me", "get_user"]
router = APIRouter(tags=["用户"], route_class=ErrorCatchRoute)
class Me(BaseModel):
id: int
url: str
username: str = Field(..., description="唯一用户名,初始与uid相同,可修改")
nickname: str
user_group: UserGroup
avatar: Avatar
sign: str
@router.get(
"/me",
response_model=Me,
description="返回当前 Access Token 对应的用户信息",
responses={
403: res.response(model=ErrorDetail, description="unauthorized"),
},
)
async def get_user(user: User = Depends(get_current_user)):
d = user.dict(by_alias=False)
d["avatar"] = user.avatar
d["url"] = "https://bgm.tv/user/" + user.username
d["user_group"] = user.group_id
return d
| true | true |
f7f48b47c2b71997c4dd6bf685a9fc6aeeae6330 | 390 | py | Python | PBMSupport.py | jakehyvonen/PaintByMotors | 6ec568633e0f8bbddeb5d1731d21144a2b6a5ced | [
"MIT"
] | null | null | null | PBMSupport.py | jakehyvonen/PaintByMotors | 6ec568633e0f8bbddeb5d1731d21144a2b6a5ced | [
"MIT"
] | null | null | null | PBMSupport.py | jakehyvonen/PaintByMotors | 6ec568633e0f8bbddeb5d1731d21144a2b6a5ced | [
"MIT"
] | null | null | null | from decimal import *
def MakeDec(num,places = 2):
p = '0.1'
if(places == 0):
p = '0'
else:
for i in range(1,places) :
l = p.split('.')[1]
p = '0.0' + l
#print('p: %s' % p)
r = Decimal(str(num)).quantize(Decimal(p), rounding=ROUND_HALF_DOWN)
return r
if __name__ == '__main__':
print('dec: ' + str(MakeDec(2.34567,4)))
| 22.941176 | 72 | 0.502564 | from decimal import *
def MakeDec(num,places = 2):
p = '0.1'
if(places == 0):
p = '0'
else:
for i in range(1,places) :
l = p.split('.')[1]
p = '0.0' + l
r = Decimal(str(num)).quantize(Decimal(p), rounding=ROUND_HALF_DOWN)
return r
if __name__ == '__main__':
print('dec: ' + str(MakeDec(2.34567,4)))
| true | true |
f7f48bc9c8fbea58839822ea4d61ccfd009ac531 | 6,931 | py | Python | AppServer/lib/django-1.4/tests/regressiontests/conditional_processing/models.py | loftwah/appscale | 586fc1347ebc743d7a632de698f4dbfb09ae38d6 | [
"Apache-2.0"
] | 790 | 2015-01-03T02:13:39.000Z | 2020-05-10T19:53:57.000Z | AppServer/lib/django-1.4/tests/regressiontests/conditional_processing/models.py | nlake44/appscale | 6944af660ca4cb772c9b6c2332ab28e5ef4d849f | [
"Apache-2.0"
] | 1,361 | 2015-01-08T23:09:40.000Z | 2020-04-14T00:03:04.000Z | AppServer/lib/django-1.4/tests/regressiontests/conditional_processing/models.py | nlake44/appscale | 6944af660ca4cb772c9b6c2332ab28e5ef4d849f | [
"Apache-2.0"
] | 155 | 2015-01-08T22:59:31.000Z | 2020-04-08T08:01:53.000Z | # -*- coding:utf-8 -*-
from datetime import datetime
from django.test import TestCase
from django.utils import unittest
from django.utils.http import parse_etags, quote_etag, parse_http_date
FULL_RESPONSE = 'Test conditional get response'
LAST_MODIFIED = datetime(2007, 10, 21, 23, 21, 47)
LAST_MODIFIED_STR = 'Sun, 21 Oct 2007 23:21:47 GMT'
LAST_MODIFIED_NEWER_STR = 'Mon, 18 Oct 2010 16:56:23 GMT'
LAST_MODIFIED_INVALID_STR = 'Mon, 32 Oct 2010 16:56:23 GMT'
EXPIRED_LAST_MODIFIED_STR = 'Sat, 20 Oct 2007 23:21:47 GMT'
ETAG = 'b4246ffc4f62314ca13147c9d4f76974'
EXPIRED_ETAG = '7fae4cd4b0f81e7d2914700043aa8ed6'
class ConditionalGet(TestCase):
urls = 'regressiontests.conditional_processing.urls'
def assertFullResponse(self, response, check_last_modified=True, check_etag=True):
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, FULL_RESPONSE)
if check_last_modified:
self.assertEqual(response['Last-Modified'], LAST_MODIFIED_STR)
if check_etag:
self.assertEqual(response['ETag'], '"%s"' % ETAG)
def assertNotModified(self, response):
self.assertEqual(response.status_code, 304)
self.assertEqual(response.content, '')
def testWithoutConditions(self):
response = self.client.get('/condition/')
self.assertFullResponse(response)
def testIfModifiedSince(self):
self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = LAST_MODIFIED_STR
response = self.client.get('/condition/')
self.assertNotModified(response)
self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = LAST_MODIFIED_NEWER_STR
response = self.client.get('/condition/')
self.assertNotModified(response)
self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = LAST_MODIFIED_INVALID_STR
response = self.client.get('/condition/')
self.assertFullResponse(response)
self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = EXPIRED_LAST_MODIFIED_STR
response = self.client.get('/condition/')
self.assertFullResponse(response)
def testIfNoneMatch(self):
self.client.defaults['HTTP_IF_NONE_MATCH'] = '"%s"' % ETAG
response = self.client.get('/condition/')
self.assertNotModified(response)
self.client.defaults['HTTP_IF_NONE_MATCH'] = '"%s"' % EXPIRED_ETAG
response = self.client.get('/condition/')
self.assertFullResponse(response)
# Several etags in If-None-Match is a bit exotic but why not?
self.client.defaults['HTTP_IF_NONE_MATCH'] = '"%s", "%s"' % (ETAG, EXPIRED_ETAG)
response = self.client.get('/condition/')
self.assertNotModified(response)
def testIfMatch(self):
self.client.defaults['HTTP_IF_MATCH'] = '"%s"' % ETAG
response = self.client.put('/condition/etag/', {'data': ''})
self.assertEqual(response.status_code, 200)
self.client.defaults['HTTP_IF_MATCH'] = '"%s"' % EXPIRED_ETAG
response = self.client.put('/condition/etag/', {'data': ''})
self.assertEqual(response.status_code, 412)
def testBothHeaders(self):
self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = LAST_MODIFIED_STR
self.client.defaults['HTTP_IF_NONE_MATCH'] = '"%s"' % ETAG
response = self.client.get('/condition/')
self.assertNotModified(response)
self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = EXPIRED_LAST_MODIFIED_STR
self.client.defaults['HTTP_IF_NONE_MATCH'] = '"%s"' % ETAG
response = self.client.get('/condition/')
self.assertFullResponse(response)
self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = LAST_MODIFIED_STR
self.client.defaults['HTTP_IF_NONE_MATCH'] = '"%s"' % EXPIRED_ETAG
response = self.client.get('/condition/')
self.assertFullResponse(response)
def testSingleCondition1(self):
self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = LAST_MODIFIED_STR
response = self.client.get('/condition/last_modified/')
self.assertNotModified(response)
response = self.client.get('/condition/etag/')
self.assertFullResponse(response, check_last_modified=False)
def testSingleCondition2(self):
self.client.defaults['HTTP_IF_NONE_MATCH'] = '"%s"' % ETAG
response = self.client.get('/condition/etag/')
self.assertNotModified(response)
response = self.client.get('/condition/last_modified/')
self.assertFullResponse(response, check_etag=False)
def testSingleCondition3(self):
self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = EXPIRED_LAST_MODIFIED_STR
response = self.client.get('/condition/last_modified/')
self.assertFullResponse(response, check_etag=False)
def testSingleCondition4(self):
self.client.defaults['HTTP_IF_NONE_MATCH'] = '"%s"' % EXPIRED_ETAG
response = self.client.get('/condition/etag/')
self.assertFullResponse(response, check_last_modified=False)
def testSingleCondition5(self):
self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = LAST_MODIFIED_STR
response = self.client.get('/condition/last_modified2/')
self.assertNotModified(response)
response = self.client.get('/condition/etag2/')
self.assertFullResponse(response, check_last_modified=False)
def testSingleCondition6(self):
self.client.defaults['HTTP_IF_NONE_MATCH'] = '"%s"' % ETAG
response = self.client.get('/condition/etag2/')
self.assertNotModified(response)
response = self.client.get('/condition/last_modified2/')
self.assertFullResponse(response, check_etag=False)
def testInvalidETag(self):
self.client.defaults['HTTP_IF_NONE_MATCH'] = r'"\"'
response = self.client.get('/condition/etag/')
self.assertFullResponse(response, check_last_modified=False)
class ETagProcessing(unittest.TestCase):
def testParsing(self):
etags = parse_etags(r'"", "etag", "e\"t\"ag", "e\\tag", W/"weak"')
self.assertEqual(etags, ['', 'etag', 'e"t"ag', r'e\tag', 'weak'])
def testQuoting(self):
quoted_etag = quote_etag(r'e\t"ag')
self.assertEqual(quoted_etag, r'"e\\t\"ag"')
class HttpDateProcessing(unittest.TestCase):
def testParsingRfc1123(self):
parsed = parse_http_date('Sun, 06 Nov 1994 08:49:37 GMT')
self.assertEqual(datetime.utcfromtimestamp(parsed),
datetime(1994, 11, 06, 8, 49, 37))
def testParsingRfc850(self):
parsed = parse_http_date('Sunday, 06-Nov-94 08:49:37 GMT')
self.assertEqual(datetime.utcfromtimestamp(parsed),
datetime(1994, 11, 06, 8, 49, 37))
def testParsingAsctime(self):
parsed = parse_http_date('Sun Nov 6 08:49:37 1994')
self.assertEqual(datetime.utcfromtimestamp(parsed),
datetime(1994, 11, 06, 8, 49, 37))
| 44.146497 | 88 | 0.68172 |
from datetime import datetime
from django.test import TestCase
from django.utils import unittest
from django.utils.http import parse_etags, quote_etag, parse_http_date
FULL_RESPONSE = 'Test conditional get response'
LAST_MODIFIED = datetime(2007, 10, 21, 23, 21, 47)
LAST_MODIFIED_STR = 'Sun, 21 Oct 2007 23:21:47 GMT'
LAST_MODIFIED_NEWER_STR = 'Mon, 18 Oct 2010 16:56:23 GMT'
LAST_MODIFIED_INVALID_STR = 'Mon, 32 Oct 2010 16:56:23 GMT'
EXPIRED_LAST_MODIFIED_STR = 'Sat, 20 Oct 2007 23:21:47 GMT'
ETAG = 'b4246ffc4f62314ca13147c9d4f76974'
EXPIRED_ETAG = '7fae4cd4b0f81e7d2914700043aa8ed6'
class ConditionalGet(TestCase):
urls = 'regressiontests.conditional_processing.urls'
def assertFullResponse(self, response, check_last_modified=True, check_etag=True):
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, FULL_RESPONSE)
if check_last_modified:
self.assertEqual(response['Last-Modified'], LAST_MODIFIED_STR)
if check_etag:
self.assertEqual(response['ETag'], '"%s"' % ETAG)
def assertNotModified(self, response):
self.assertEqual(response.status_code, 304)
self.assertEqual(response.content, '')
def testWithoutConditions(self):
response = self.client.get('/condition/')
self.assertFullResponse(response)
def testIfModifiedSince(self):
self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = LAST_MODIFIED_STR
response = self.client.get('/condition/')
self.assertNotModified(response)
self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = LAST_MODIFIED_NEWER_STR
response = self.client.get('/condition/')
self.assertNotModified(response)
self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = LAST_MODIFIED_INVALID_STR
response = self.client.get('/condition/')
self.assertFullResponse(response)
self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = EXPIRED_LAST_MODIFIED_STR
response = self.client.get('/condition/')
self.assertFullResponse(response)
def testIfNoneMatch(self):
self.client.defaults['HTTP_IF_NONE_MATCH'] = '"%s"' % ETAG
response = self.client.get('/condition/')
self.assertNotModified(response)
self.client.defaults['HTTP_IF_NONE_MATCH'] = '"%s"' % EXPIRED_ETAG
response = self.client.get('/condition/')
self.assertFullResponse(response)
self.client.defaults['HTTP_IF_NONE_MATCH'] = '"%s", "%s"' % (ETAG, EXPIRED_ETAG)
response = self.client.get('/condition/')
self.assertNotModified(response)
def testIfMatch(self):
self.client.defaults['HTTP_IF_MATCH'] = '"%s"' % ETAG
response = self.client.put('/condition/etag/', {'data': ''})
self.assertEqual(response.status_code, 200)
self.client.defaults['HTTP_IF_MATCH'] = '"%s"' % EXPIRED_ETAG
response = self.client.put('/condition/etag/', {'data': ''})
self.assertEqual(response.status_code, 412)
def testBothHeaders(self):
self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = LAST_MODIFIED_STR
self.client.defaults['HTTP_IF_NONE_MATCH'] = '"%s"' % ETAG
response = self.client.get('/condition/')
self.assertNotModified(response)
self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = EXPIRED_LAST_MODIFIED_STR
self.client.defaults['HTTP_IF_NONE_MATCH'] = '"%s"' % ETAG
response = self.client.get('/condition/')
self.assertFullResponse(response)
self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = LAST_MODIFIED_STR
self.client.defaults['HTTP_IF_NONE_MATCH'] = '"%s"' % EXPIRED_ETAG
response = self.client.get('/condition/')
self.assertFullResponse(response)
def testSingleCondition1(self):
self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = LAST_MODIFIED_STR
response = self.client.get('/condition/last_modified/')
self.assertNotModified(response)
response = self.client.get('/condition/etag/')
self.assertFullResponse(response, check_last_modified=False)
def testSingleCondition2(self):
self.client.defaults['HTTP_IF_NONE_MATCH'] = '"%s"' % ETAG
response = self.client.get('/condition/etag/')
self.assertNotModified(response)
response = self.client.get('/condition/last_modified/')
self.assertFullResponse(response, check_etag=False)
def testSingleCondition3(self):
self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = EXPIRED_LAST_MODIFIED_STR
response = self.client.get('/condition/last_modified/')
self.assertFullResponse(response, check_etag=False)
def testSingleCondition4(self):
self.client.defaults['HTTP_IF_NONE_MATCH'] = '"%s"' % EXPIRED_ETAG
response = self.client.get('/condition/etag/')
self.assertFullResponse(response, check_last_modified=False)
def testSingleCondition5(self):
self.client.defaults['HTTP_IF_MODIFIED_SINCE'] = LAST_MODIFIED_STR
response = self.client.get('/condition/last_modified2/')
self.assertNotModified(response)
response = self.client.get('/condition/etag2/')
self.assertFullResponse(response, check_last_modified=False)
def testSingleCondition6(self):
self.client.defaults['HTTP_IF_NONE_MATCH'] = '"%s"' % ETAG
response = self.client.get('/condition/etag2/')
self.assertNotModified(response)
response = self.client.get('/condition/last_modified2/')
self.assertFullResponse(response, check_etag=False)
def testInvalidETag(self):
self.client.defaults['HTTP_IF_NONE_MATCH'] = r'"\"'
response = self.client.get('/condition/etag/')
self.assertFullResponse(response, check_last_modified=False)
class ETagProcessing(unittest.TestCase):
def testParsing(self):
etags = parse_etags(r'"", "etag", "e\"t\"ag", "e\\tag", W/"weak"')
self.assertEqual(etags, ['', 'etag', 'e"t"ag', r'e\tag', 'weak'])
def testQuoting(self):
quoted_etag = quote_etag(r'e\t"ag')
self.assertEqual(quoted_etag, r'"e\\t\"ag"')
class HttpDateProcessing(unittest.TestCase):
def testParsingRfc1123(self):
parsed = parse_http_date('Sun, 06 Nov 1994 08:49:37 GMT')
self.assertEqual(datetime.utcfromtimestamp(parsed),
datetime(1994, 11, 06, 8, 49, 37))
def testParsingRfc850(self):
parsed = parse_http_date('Sunday, 06-Nov-94 08:49:37 GMT')
self.assertEqual(datetime.utcfromtimestamp(parsed),
datetime(1994, 11, 06, 8, 49, 37))
def testParsingAsctime(self):
parsed = parse_http_date('Sun Nov 6 08:49:37 1994')
self.assertEqual(datetime.utcfromtimestamp(parsed),
datetime(1994, 11, 06, 8, 49, 37))
| false | true |
f7f48d4ee06aa1913ab8bad70c5c82603cf93037 | 155 | py | Python | navigation/scripts/tests/context.py | archit2604/Trotbot | 8ff34049b9c81fa50d29493b5669140b0f75d0d5 | [
"MIT"
] | 1 | 2020-08-04T12:00:18.000Z | 2020-08-04T12:00:18.000Z | navigation/scripts/tests/context.py | archit2604/Trotbot | 8ff34049b9c81fa50d29493b5669140b0f75d0d5 | [
"MIT"
] | null | null | null | navigation/scripts/tests/context.py | archit2604/Trotbot | 8ff34049b9c81fa50d29493b5669140b0f75d0d5 | [
"MIT"
] | null | null | null | #! /usr/bin/env python2.7
import sys
import os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import RRT
import utils | 19.375 | 82 | 0.722581 |
import sys
import os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import RRT
import utils | true | true |
f7f48dbf813350bf83b765bc98d6621a3b87e62c | 281 | py | Python | iris_data_set.py | karolinaszafranbelzowska/Fisher-s-Iris-Data-Set-2019 | 5ad7fb98ab65d268dd7a9ebebd13eb00970ddc4b | [
"Apache-2.0"
] | null | null | null | iris_data_set.py | karolinaszafranbelzowska/Fisher-s-Iris-Data-Set-2019 | 5ad7fb98ab65d268dd7a9ebebd13eb00970ddc4b | [
"Apache-2.0"
] | null | null | null | iris_data_set.py | karolinaszafranbelzowska/Fisher-s-Iris-Data-Set-2019 | 5ad7fb98ab65d268dd7a9ebebd13eb00970ddc4b | [
"Apache-2.0"
] | null | null | null | # Karolina Szafran-Belzowska, 2019/04/15
# Iris flower Data analysis
# This code will print the whole Fisher's iris flower data
import csv
with open('irisdata.csv') as data:
readCSV = csv.reader(data, delimiter=',')
for columns in readCSV:
print(columns)
| 18.733333 | 58 | 0.690391 |
import csv
with open('irisdata.csv') as data:
readCSV = csv.reader(data, delimiter=',')
for columns in readCSV:
print(columns)
| true | true |
f7f48ddedfe460350379700a3c32617aea44c469 | 587 | py | Python | contact/tests/test_admin.py | uktrade/help | b9c433e639191768e42a7ef4915100a2485fcba9 | [
"MIT"
] | 1 | 2017-05-09T14:45:41.000Z | 2017-05-09T14:45:41.000Z | contact/tests/test_admin.py | uktrade/help | b9c433e639191768e42a7ef4915100a2485fcba9 | [
"MIT"
] | 29 | 2016-11-10T11:15:42.000Z | 2018-11-14T18:40:45.000Z | contact/tests/test_admin.py | uktrade/help | b9c433e639191768e42a7ef4915100a2485fcba9 | [
"MIT"
] | null | null | null | from django.test import TestCase
from django.urls import reverse
class AdminViewTests(TestCase):
def test_admin_restricted(self):
with self.settings(RESTRICT_ADMIN=True):
response = self.client.get(
reverse('admin:login'),
**{'HTTP_X_FORWARDED_FOR': '74.125.224.72'}
)
assert response.status_code == 404
def test_admin_unrestricted(self):
with self.settings(RESTRICT_ADMIN=False):
response = self.client.get(reverse('admin:login'))
assert response.status_code == 200
| 30.894737 | 62 | 0.632027 | from django.test import TestCase
from django.urls import reverse
class AdminViewTests(TestCase):
def test_admin_restricted(self):
with self.settings(RESTRICT_ADMIN=True):
response = self.client.get(
reverse('admin:login'),
**{'HTTP_X_FORWARDED_FOR': '74.125.224.72'}
)
assert response.status_code == 404
def test_admin_unrestricted(self):
with self.settings(RESTRICT_ADMIN=False):
response = self.client.get(reverse('admin:login'))
assert response.status_code == 200
| true | true |
f7f48dfe15e52f4c215a87c215bad982fec434c2 | 2,507 | py | Python | call_map/custom_typing.py | ajylee/call_map | 21e7684b0814eae6f16cd4bc75597dc4e9239ec0 | [
"BSD-2-Clause"
] | 20 | 2017-12-24T00:19:15.000Z | 2021-11-15T07:42:25.000Z | call_map/custom_typing.py | ajylee/call_map | 21e7684b0814eae6f16cd4bc75597dc4e9239ec0 | [
"BSD-2-Clause"
] | 1 | 2017-10-22T21:03:41.000Z | 2017-12-24T04:26:22.000Z | call_map/custom_typing.py | ajylee/call_map | 21e7684b0814eae6f16cd4bc75597dc4e9239ec0 | [
"BSD-2-Clause"
] | 2 | 2017-11-04T10:06:59.000Z | 2019-08-01T22:24:49.000Z | import toolz as tz
import typing
import abc
class TypeSpec(metaclass=abc.ABCMeta):
'''Used to classify objects, but is not an actual Python type
Instead of `isinstance`, use `matches_spec` with `TypeSpec`s. Does not implement any
other interface.
For example, this is useful for specifying a list of strings. `TypeSpec`s
differ from type hints, in that they can easily be checked.
'''
@abc.abstractmethod
def __matches_spec__(self, obj):
pass
def matches_spec(obj: typing.Any, type_spec: typing.Union[type, TypeSpec, typing.Iterable]):
if isinstance(type_spec, type):
return isinstance(obj, type_spec)
elif isinstance(type_spec, TypeSpec):
return type_spec.__matches_spec__(obj)
elif tz.isiterable(type_spec):
return any(matches_spec(obj, elt) for elt in type_spec)
class CheckableOptional(TypeSpec):
def __init__(self, arg):
self.nontrivial_type = arg
def __repr__(self):
return '<CheckableOptional {}>'.format(repr(self.nontrivial_type))
def __matches_spec__(self, obj):
return matches_spec(obj, (type(None), self.nontrivial_type))
class CheckableDict(TypeSpec):
def __init__(self, types: dict):
self.value_types = types
def __repr__(self):
return '<CheckableDict {}>'.format(repr(self.value_types))
def __matches_spec__(self, obj):
return (
isinstance(obj, dict)
and set(self.value_types.keys()) == set(obj.keys())
and all(matches_spec(obj[key], val_type)
for key, val_type in self.value_types.items()))
def new_empty_instance(self):
return {key: None for key in self.value_types}
class CheckableList(TypeSpec):
def __init__(self, value_type):
self.value_type = value_type
def __repr__(self):
return '<CheckableList [{}]>'.format(repr(self.value_type))
def __matches_spec__(self, obj):
return (isinstance(obj, list)
and all(matches_spec(elt, self.value_type) for elt in obj))
def new_empty_instance(self):
return list()
class CheckableTuple(TypeSpec):
def __init__(self, *value_types):
self.value_types = value_types
def __repr__(self):
return '<CheckableTuple ({})>'.format(repr(self.value_types))
def __matches_spec__(self, obj):
return (isinstance(obj, tuple)
and all(matches_spec(elt, self.value_type) for elt in zip(obj, self.value_types)))
| 29.494118 | 98 | 0.668528 | import toolz as tz
import typing
import abc
class TypeSpec(metaclass=abc.ABCMeta):
@abc.abstractmethod
def __matches_spec__(self, obj):
pass
def matches_spec(obj: typing.Any, type_spec: typing.Union[type, TypeSpec, typing.Iterable]):
if isinstance(type_spec, type):
return isinstance(obj, type_spec)
elif isinstance(type_spec, TypeSpec):
return type_spec.__matches_spec__(obj)
elif tz.isiterable(type_spec):
return any(matches_spec(obj, elt) for elt in type_spec)
class CheckableOptional(TypeSpec):
def __init__(self, arg):
self.nontrivial_type = arg
def __repr__(self):
return '<CheckableOptional {}>'.format(repr(self.nontrivial_type))
def __matches_spec__(self, obj):
return matches_spec(obj, (type(None), self.nontrivial_type))
class CheckableDict(TypeSpec):
def __init__(self, types: dict):
self.value_types = types
def __repr__(self):
return '<CheckableDict {}>'.format(repr(self.value_types))
def __matches_spec__(self, obj):
return (
isinstance(obj, dict)
and set(self.value_types.keys()) == set(obj.keys())
and all(matches_spec(obj[key], val_type)
for key, val_type in self.value_types.items()))
def new_empty_instance(self):
return {key: None for key in self.value_types}
class CheckableList(TypeSpec):
def __init__(self, value_type):
self.value_type = value_type
def __repr__(self):
return '<CheckableList [{}]>'.format(repr(self.value_type))
def __matches_spec__(self, obj):
return (isinstance(obj, list)
and all(matches_spec(elt, self.value_type) for elt in obj))
def new_empty_instance(self):
return list()
class CheckableTuple(TypeSpec):
def __init__(self, *value_types):
self.value_types = value_types
def __repr__(self):
return '<CheckableTuple ({})>'.format(repr(self.value_types))
def __matches_spec__(self, obj):
return (isinstance(obj, tuple)
and all(matches_spec(elt, self.value_type) for elt in zip(obj, self.value_types)))
| true | true |
f7f48e9ef1ec08ddb43ac6924e1001278169e594 | 29,599 | py | Python | pyvips/tests/test_foreign.py | kleisauke/pyvips | ae3b0c09669cfb662e773e8ae69cf589ac15e320 | [
"MIT"
] | null | null | null | pyvips/tests/test_foreign.py | kleisauke/pyvips | ae3b0c09669cfb662e773e8ae69cf589ac15e320 | [
"MIT"
] | null | null | null | pyvips/tests/test_foreign.py | kleisauke/pyvips | ae3b0c09669cfb662e773e8ae69cf589ac15e320 | [
"MIT"
] | null | null | null | # vim: set fileencoding=utf-8 :
import gc
import os
import shutil
import tempfile
import unittest
import pyvips
from .helpers import PyvipsTester, JPEG_FILE, SRGB_FILE, \
MATLAB_FILE, PNG_FILE, TIF_FILE, OME_FILE, ANALYZE_FILE, \
GIF_FILE, WEBP_FILE, EXR_FILE, FITS_FILE, OPENSLIDE_FILE, \
PDF_FILE, SVG_FILE, SVGZ_FILE, SVG_GZ_FILE, GIF_ANIM_FILE, \
DICOM_FILE, temp_filename
class TestForeign(PyvipsTester):
tempdir = None
@classmethod
def setUpClass(cls):
cls.tempdir = tempfile.mkdtemp()
def setUp(self):
self.colour = pyvips.Image.jpegload(JPEG_FILE)
self.mono = self.colour.extract_band(1)
# we remove the ICC profile: the RGB one will no longer be appropriate
self.mono.remove("icc-profile-data")
self.rad = self.colour.float2rad()
self.rad.remove("icc-profile-data")
self.cmyk = self.colour.bandjoin(self.mono)
self.cmyk = self.cmyk.copy(interpretation=pyvips.Interpretation.CMYK)
self.cmyk.remove("icc-profile-data")
im = pyvips.Image.new_from_file(GIF_FILE)
self.onebit = im > 128
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.tempdir, ignore_errors=True)
# we have test files for formats which have a clear standard
def file_loader(self, loader, test_file, validate):
im = pyvips.Operation.call(loader, test_file)
validate(self, im)
im = pyvips.Image.new_from_file(test_file)
validate(self, im)
def buffer_loader(self, loader, test_file, validate):
with open(test_file, 'rb') as f:
buf = f.read()
im = pyvips.Operation.call(loader, buf)
validate(self, im)
im = pyvips.Image.new_from_buffer(buf, "")
validate(self, im)
def save_load(self, format, im):
x = pyvips.Image.new_temp_file(format)
im.write(x)
self.assertEqual(im.width, x.width)
self.assertEqual(im.height, x.height)
self.assertEqual(im.bands, x.bands)
max_diff = (im - x).abs().max()
self.assertEqual(max_diff, 0)
def save_load_file(self, format, options, im, thresh):
# yuk!
# but we can't set format parameters for pyvips.Image.new_temp_file()
filename = temp_filename(self.tempdir, format)
im.write_to_file(filename + options)
x = pyvips.Image.new_from_file(filename)
self.assertEqual(im.width, x.width)
self.assertEqual(im.height, x.height)
self.assertEqual(im.bands, x.bands)
max_diff = (im - x).abs().max()
self.assertTrue(max_diff <= thresh)
x = None
def save_load_buffer(self, saver, loader, im, max_diff=0):
buf = pyvips.Operation.call(saver, im)
x = pyvips.Operation.call(loader, buf)
self.assertEqual(im.width, x.width)
self.assertEqual(im.height, x.height)
self.assertEqual(im.bands, x.bands)
self.assertLessEqual((im - x).abs().max(), max_diff)
def save_buffer_tempfile(self, saver, suf, im, max_diff=0):
filename = temp_filename(self.tempdir, suf)
buf = pyvips.Operation.call(saver, im)
f = open(filename, 'wb')
f.write(buf)
f.close()
x = pyvips.Image.new_from_file(filename)
self.assertEqual(im.width, x.width)
self.assertEqual(im.height, x.height)
self.assertEqual(im.bands, x.bands)
self.assertLessEqual((im - x).abs().max(), max_diff)
def test_vips(self):
self.save_load_file(".v", "", self.colour, 0)
# check we can save and restore metadata
filename = temp_filename(self.tempdir, ".v")
self.colour.write_to_file(filename)
x = pyvips.Image.new_from_file(filename)
before_exif = self.colour.get_value("exif-data")
after_exif = x.get_value("exif-data")
self.assertEqual(len(before_exif), len(after_exif))
for i in range(len(before_exif)):
self.assertEqual(before_exif[i], after_exif[i])
x = None
def test_jpeg(self):
if pyvips.type_find("VipsForeign", "jpegload") == 0:
print("no jpeg support in this vips, skipping test")
return
def jpeg_valid(self, im):
a = im(10, 10)
self.assertAlmostEqualObjects(a, [6, 5, 3])
profile = im.get_value("icc-profile-data")
self.assertEqual(len(profile), 1352)
self.assertEqual(im.width, 1024)
self.assertEqual(im.height, 768)
self.assertEqual(im.bands, 3)
self.file_loader("jpegload", JPEG_FILE, jpeg_valid)
self.save_load("%s.jpg", self.mono)
self.save_load("%s.jpg", self.colour)
self.buffer_loader("jpegload_buffer", JPEG_FILE, jpeg_valid)
self.save_load_buffer("jpegsave_buffer", "jpegload_buffer",
self.colour, 80)
# see if we have exif parsing: our test image has this field
x = pyvips.Image.new_from_file(JPEG_FILE)
if x.get_typeof("exif-ifd0-Orientation") != 0:
# we need a copy of the image to set the new metadata on
# otherwise we get caching problems
x = pyvips.Image.new_from_file(JPEG_FILE)
x = x.copy()
x.set_value("orientation", 2)
filename = temp_filename(self.tempdir, '.jpg')
x.write_to_file(filename)
x = pyvips.Image.new_from_file(filename)
y = x.get_value("orientation")
self.assertEqual(y, 2)
filename = temp_filename(self.tempdir, '.jpg')
x = pyvips.Image.new_from_file(JPEG_FILE)
x = x.copy()
x.set_value("orientation", 2)
x.write_to_file(filename)
x = pyvips.Image.new_from_file(filename)
y = x.get_value("orientation")
self.assertEqual(y, 2)
x.remove("orientation")
filename = temp_filename(self.tempdir, '.jpg')
x.write_to_file(filename)
x = pyvips.Image.new_from_file(filename)
y = x.get_value("orientation")
self.assertEqual(y, 1)
filename = temp_filename(self.tempdir, '.jpg')
x = pyvips.Image.new_from_file(JPEG_FILE)
x = x.copy()
x.set_value("orientation", 6)
x.write_to_file(filename)
x1 = pyvips.Image.new_from_file(filename)
x2 = pyvips.Image.new_from_file(filename, autorotate=True)
self.assertEqual(x1.width, x2.height)
self.assertEqual(x1.height, x2.width)
def test_png(self):
if pyvips.type_find("VipsForeign", "pngload") == 0 or \
not os.path.isfile(PNG_FILE):
print("no png support, skipping test")
def png_valid(self, im):
a = im(10, 10)
self.assertAlmostEqualObjects(a, [38671.0, 33914.0, 26762.0])
self.assertEqual(im.width, 290)
self.assertEqual(im.height, 442)
self.assertEqual(im.bands, 3)
self.file_loader("pngload", PNG_FILE, png_valid)
self.buffer_loader("pngload_buffer", PNG_FILE, png_valid)
self.save_load_buffer("pngsave_buffer", "pngload_buffer", self.colour)
self.save_load("%s.png", self.mono)
self.save_load("%s.png", self.colour)
def test_tiff(self):
if pyvips.type_find("VipsForeign", "tiffload") == 0 or \
not os.path.isfile(TIF_FILE):
print("no tiff support, skipping test")
return
def tiff_valid(self, im):
a = im(10, 10)
self.assertAlmostEqualObjects(a, [38671.0, 33914.0, 26762.0])
self.assertEqual(im.width, 290)
self.assertEqual(im.height, 442)
self.assertEqual(im.bands, 3)
self.file_loader("tiffload", TIF_FILE, tiff_valid)
self.buffer_loader("tiffload_buffer", TIF_FILE, tiff_valid)
self.save_load_buffer("tiffsave_buffer",
"tiffload_buffer",
self.colour)
self.save_load("%s.tif", self.mono)
self.save_load("%s.tif", self.colour)
self.save_load("%s.tif", self.cmyk)
self.save_load("%s.tif", self.onebit)
self.save_load_file(".tif", "[squash]", self.onebit, 0)
self.save_load_file(".tif", "[miniswhite]", self.onebit, 0)
self.save_load_file(".tif", "[squash,miniswhite]", self.onebit, 0)
self.save_load_file(".tif",
"[profile={0}]".format(SRGB_FILE),
self.colour, 0)
self.save_load_file(".tif", "[tile]", self.colour, 0)
self.save_load_file(".tif", "[tile,pyramid]", self.colour, 0)
self.save_load_file(".tif",
"[tile,pyramid,compression=jpeg]", self.colour, 80)
self.save_load_file(".tif", "[bigtiff]", self.colour, 0)
self.save_load_file(".tif", "[compression=jpeg]", self.colour, 80)
self.save_load_file(".tif",
"[tile,tile-width=256]", self.colour, 10)
# we need a copy of the image to set the new metadata on
# otherwise we get caching problems
filename = temp_filename(self.tempdir, '.tif')
x = pyvips.Image.new_from_file(TIF_FILE)
x = x.copy()
x.set_value("orientation", 2)
x.write_to_file(filename)
x = pyvips.Image.new_from_file(filename)
y = x.get_value("orientation")
self.assertEqual(y, 2)
# we need a copy of the image to set the new metadata on
# otherwise we get caching problems
filename = temp_filename(self.tempdir, '.tif')
x = pyvips.Image.new_from_file(TIF_FILE)
x = x.copy()
x.set_value("orientation", 2)
x.write_to_file(filename)
x = pyvips.Image.new_from_file(filename)
y = x.get_value("orientation")
self.assertEqual(y, 2)
x.remove("orientation")
filename = temp_filename(self.tempdir, '.tif')
x.write_to_file(filename)
x = pyvips.Image.new_from_file(filename)
y = x.get_value("orientation")
self.assertEqual(y, 1)
filename = temp_filename(self.tempdir, '.tif')
x = pyvips.Image.new_from_file(TIF_FILE)
x = x.copy()
x.set_value("orientation", 6)
x.write_to_file(filename)
x1 = pyvips.Image.new_from_file(filename)
x2 = pyvips.Image.new_from_file(filename, autorotate=True)
self.assertEqual(x1.width, x2.height)
self.assertEqual(x1.height, x2.width)
x = pyvips.Image.new_from_file(OME_FILE)
self.assertEqual(x.width, 439)
self.assertEqual(x.height, 167)
page_height = x.height
x = pyvips.Image.new_from_file(OME_FILE, n=-1)
self.assertEqual(x.width, 439)
self.assertEqual(x.height, page_height * 15)
x = pyvips.Image.new_from_file(OME_FILE, page=1, n=-1)
self.assertEqual(x.width, 439)
self.assertEqual(x.height, page_height * 14)
x = pyvips.Image.new_from_file(OME_FILE, page=1, n=2)
self.assertEqual(x.width, 439)
self.assertEqual(x.height, page_height * 2)
x = pyvips.Image.new_from_file(OME_FILE, n=-1)
self.assertEqual(x(0, 166)[0], 96)
self.assertEqual(x(0, 167)[0], 0)
self.assertEqual(x(0, 168)[0], 1)
filename = temp_filename(self.tempdir, '.tif')
x.write_to_file(filename)
x = pyvips.Image.new_from_file(filename, n=-1)
self.assertEqual(x.width, 439)
self.assertEqual(x.height, page_height * 15)
self.assertEqual(x(0, 166)[0], 96)
self.assertEqual(x(0, 167)[0], 0)
self.assertEqual(x(0, 168)[0], 1)
def test_magickload(self):
if pyvips.type_find("VipsForeign", "magickload") == 0 or \
not os.path.isfile(GIF_FILE):
print("no magick support, skipping test")
return
def gif_valid(self, im):
# some libMagick produce an RGB for this image, some a mono, some
# rgba, some have a valid alpha, some don't :-(
# therefore ... just test channel 0
a = im(10, 10)[0]
self.assertAlmostEqual(a, 33)
self.assertEqual(im.width, 159)
self.assertEqual(im.height, 203)
self.file_loader("magickload", GIF_FILE, gif_valid)
self.buffer_loader("magickload_buffer", GIF_FILE, gif_valid)
# we should have rgba for svg files
im = pyvips.Image.magickload(SVG_FILE)
self.assertEqual(im.bands, 4)
# density should change size of generated svg
im = pyvips.Image.magickload(SVG_FILE, density='100')
width = im.width
height = im.height
im = pyvips.Image.magickload(SVG_FILE, density='200')
# This seems to fail on travis, no idea why, some problem in their IM
# perhaps
# self.assertEqual(im.width, width * 2)
# self.assertEqual(im.height, height * 2)
# all-frames should load every frame of the animation
# (though all-frames is deprecated)
im = pyvips.Image.magickload(GIF_ANIM_FILE)
width = im.width
height = im.height
im = pyvips.Image.magickload(GIF_ANIM_FILE, all_frames=True)
self.assertEqual(im.width, width)
self.assertEqual(im.height, height * 5)
# page/n let you pick a range of pages
im = pyvips.Image.magickload(GIF_ANIM_FILE)
width = im.width
height = im.height
im = pyvips.Image.magickload(GIF_ANIM_FILE, page=1, n=2)
self.assertEqual(im.width, width)
self.assertEqual(im.height, height * 2)
page_height = im.get_value("page-height")
self.assertEqual(page_height, height)
# should work for dicom
im = pyvips.Image.magickload(DICOM_FILE)
self.assertEqual(im.width, 128)
self.assertEqual(im.height, 128)
# some IMs are 3 bands, some are 1, can't really test
# self.assertEqual(im.bands, 1)
def test_webp(self):
if pyvips.type_find("VipsForeign", "webpload") == 0 or \
not os.path.isfile(WEBP_FILE):
print("no webp support, skipping test")
return
def webp_valid(self, im):
a = im(10, 10)
self.assertAlmostEqualObjects(a, [71, 166, 236])
self.assertEqual(im.width, 550)
self.assertEqual(im.height, 368)
self.assertEqual(im.bands, 3)
self.file_loader("webpload", WEBP_FILE, webp_valid)
self.buffer_loader("webpload_buffer", WEBP_FILE, webp_valid)
self.save_load_buffer("webpsave_buffer", "webpload_buffer",
self.colour, 60)
self.save_load("%s.webp", self.colour)
# test lossless mode
im = pyvips.Image.new_from_file(WEBP_FILE)
buf = im.webpsave_buffer(lossless=True)
im2 = pyvips.Image.new_from_buffer(buf, "")
self.assertEqual(im.avg(), im2.avg())
# higher Q should mean a bigger buffer
b1 = im.webpsave_buffer(Q=10)
b2 = im.webpsave_buffer(Q=90)
self.assertGreater(len(b2), len(b1))
# try saving an image with an ICC profile and reading it back ... if we
# can do it, our webp supports metadata load/save
buf = self.colour.webpsave_buffer()
im = pyvips.Image.new_from_buffer(buf, "")
if im.get_typeof("icc-profile-data") != 0:
# verify that the profile comes back unharmed
p1 = self.colour.get_value("icc-profile-data")
p2 = im.get_value("icc-profile-data")
self.assertEqual(p1, p2)
# add tests for exif, xmp, exif
# the exif test will need us to be able to walk the header,
# we can't just check exif-data
# we can test that exif changes change the output of webpsave
x = self.colour.copy()
x.set_value("orientation", 6)
buf = x.webpsave_buffer()
y = pyvips.Image.new_from_buffer(buf, "")
self.assertEqual(y.get_value("orientation"), 6)
def test_analyzeload(self):
if pyvips.type_find("VipsForeign", "analyzeload") == 0 or \
not os.path.isfile(ANALYZE_FILE):
print("no analyze support, skipping test")
return
def analyze_valid(self, im):
a = im(10, 10)
self.assertAlmostEqual(a[0], 3335)
self.assertEqual(im.width, 128)
self.assertEqual(im.height, 8064)
self.assertEqual(im.bands, 1)
self.file_loader("analyzeload", ANALYZE_FILE, analyze_valid)
def test_matload(self):
if pyvips.type_find("VipsForeign", "matload") == 0 or \
not os.path.isfile(MATLAB_FILE):
print("no matlab support, skipping test")
return
def matlab_valid(self, im):
a = im(10, 10)
self.assertAlmostEqualObjects(a, [38671.0, 33914.0, 26762.0])
self.assertEqual(im.width, 290)
self.assertEqual(im.height, 442)
self.assertEqual(im.bands, 3)
self.file_loader("matload", MATLAB_FILE, matlab_valid)
def test_openexrload(self):
if pyvips.type_find("VipsForeign", "openexrload") == 0 or \
not os.path.isfile(EXR_FILE):
print("no openexr support, skipping test")
return
def exr_valid(self, im):
a = im(10, 10)
self.assertAlmostEqualObjects(a, [0.124512, 0.159668,
0.040375, 1.0],
places=5)
self.assertEqual(im.width, 610)
self.assertEqual(im.height, 406)
self.assertEqual(im.bands, 4)
self.file_loader("openexrload", EXR_FILE, exr_valid)
def test_fitsload(self):
if pyvips.type_find("VipsForeign", "fitsload") == 0 or \
not os.path.isfile(FITS_FILE):
print("no fits support, skipping test")
return
def fits_valid(self, im):
a = im(10, 10)
self.assertAlmostEqualObjects(a, [-0.165013, -0.148553, 1.09122,
-0.942242],
places=5)
self.assertEqual(im.width, 200)
self.assertEqual(im.height, 200)
self.assertEqual(im.bands, 4)
self.file_loader("fitsload", FITS_FILE, fits_valid)
self.save_load("%s.fits", self.mono)
def test_openslideload(self):
if pyvips.type_find("VipsForeign", "openslideload") == 0 or \
not os.path.isfile(OPENSLIDE_FILE):
print("no openslide support, skipping test")
return
def openslide_valid(self, im):
a = im(10, 10)
self.assertAlmostEqualObjects(a, [244, 250, 243, 255])
self.assertEqual(im.width, 2220)
self.assertEqual(im.height, 2967)
self.assertEqual(im.bands, 4)
self.file_loader("openslideload", OPENSLIDE_FILE, openslide_valid)
def test_pdfload(self):
if pyvips.type_find("VipsForeign", "pdfload") == 0 or \
not os.path.isfile(PDF_FILE):
print("no pdf support, skipping test")
return
def pdf_valid(self, im):
a = im(10, 10)
self.assertAlmostEqualObjects(a, [35, 31, 32, 255])
self.assertEqual(im.width, 1133)
self.assertEqual(im.height, 680)
self.assertEqual(im.bands, 4)
self.file_loader("pdfload", PDF_FILE, pdf_valid)
self.buffer_loader("pdfload_buffer", PDF_FILE, pdf_valid)
im = pyvips.Image.new_from_file(PDF_FILE)
x = pyvips.Image.new_from_file(PDF_FILE, scale=2)
self.assertLess(abs(im.width * 2 - x.width), 2)
self.assertLess(abs(im.height * 2 - x.height), 2)
im = pyvips.Image.new_from_file(PDF_FILE)
x = pyvips.Image.new_from_file(PDF_FILE, dpi=144)
self.assertLess(abs(im.width * 2 - x.width), 2)
self.assertLess(abs(im.height * 2 - x.height), 2)
def test_gifload(self):
if pyvips.type_find("VipsForeign", "gifload") == 0 or \
not os.path.isfile(GIF_FILE):
print("no gif support, skipping test")
return
def gif_valid(self, im):
a = im(10, 10)
self.assertAlmostEqualObjects(a, [33])
self.assertEqual(im.width, 159)
self.assertEqual(im.height, 203)
self.assertEqual(im.bands, 1)
self.file_loader("gifload", GIF_FILE, gif_valid)
self.buffer_loader("gifload_buffer", GIF_FILE, gif_valid)
x1 = pyvips.Image.new_from_file(GIF_ANIM_FILE)
x2 = pyvips.Image.new_from_file(GIF_ANIM_FILE, n=2)
self.assertEqual(x2.height, 2 * x1.height)
page_height = x2.get_value("page-height")
self.assertEqual(page_height, x1.height)
x2 = pyvips.Image.new_from_file(GIF_ANIM_FILE, n=-1)
self.assertEqual(x2.height, 5 * x1.height)
x2 = pyvips.Image.new_from_file(GIF_ANIM_FILE, page=1, n=-1)
self.assertEqual(x2.height, 4 * x1.height)
def test_svgload(self):
if pyvips.type_find("VipsForeign", "svgload") == 0 or \
not os.path.isfile(SVG_FILE):
print("no svg support, skipping test")
return
def svg_valid(self, im):
a = im(10, 10)
# some old rsvg versions are way, way off
self.assertLess(abs(a[0] - 79), 2)
self.assertLess(abs(a[1] - 79), 2)
self.assertLess(abs(a[2] - 132), 2)
self.assertLess(abs(a[3] - 255), 2)
self.assertEqual(im.width, 288)
self.assertEqual(im.height, 470)
self.assertEqual(im.bands, 4)
self.file_loader("svgload", SVG_FILE, svg_valid)
self.buffer_loader("svgload_buffer", SVG_FILE, svg_valid)
self.file_loader("svgload", SVGZ_FILE, svg_valid)
self.buffer_loader("svgload_buffer", SVGZ_FILE, svg_valid)
self.file_loader("svgload", SVG_GZ_FILE, svg_valid)
im = pyvips.Image.new_from_file(SVG_FILE)
x = pyvips.Image.new_from_file(SVG_FILE, scale=2)
self.assertLess(abs(im.width * 2 - x.width), 2)
self.assertLess(abs(im.height * 2 - x.height), 2)
im = pyvips.Image.new_from_file(SVG_FILE)
x = pyvips.Image.new_from_file(SVG_FILE, dpi=144)
self.assertLess(abs(im.width * 2 - x.width), 2)
self.assertLess(abs(im.height * 2 - x.height), 2)
def test_csv(self):
self.save_load("%s.csv", self.mono)
def test_matrix(self):
self.save_load("%s.mat", self.mono)
def test_ppm(self):
if pyvips.type_find("VipsForeign", "ppmload") == 0:
print("no PPM support, skipping test")
return
self.save_load("%s.ppm", self.mono)
self.save_load("%s.ppm", self.colour)
def test_rad(self):
if pyvips.type_find("VipsForeign", "radload") == 0:
print("no Radiance support, skipping test")
return
self.save_load("%s.hdr", self.colour)
self.save_buffer_tempfile("radsave_buffer", ".hdr",
self.rad, max_diff=0)
def test_dzsave(self):
if pyvips.type_find("VipsForeign", "dzsave") == 0:
print("no dzsave support, skipping test")
return
# dzsave is hard to test, there are so many options
# test each option separately and hope they all function together
# correctly
# default deepzoom layout ... we must use png here, since we want to
# test the overlap for equality
filename = temp_filename(self.tempdir, '')
self.colour.dzsave(filename, suffix=".png")
# test horizontal overlap ... expect 256 step, overlap 1
x = pyvips.Image.new_from_file(filename + "_files/10/0_0.png")
self.assertEqual(x.width, 255)
y = pyvips.Image.new_from_file(filename + "_files/10/1_0.png")
self.assertEqual(y.width, 256)
# the right two columns of x should equal the left two columns of y
left = x.crop(x.width - 2, 0, 2, x.height)
right = y.crop(0, 0, 2, y.height)
self.assertEqual((left - right).abs().max(), 0)
# test vertical overlap
self.assertEqual(x.height, 255)
y = pyvips.Image.new_from_file(filename + "_files/10/0_1.png")
self.assertEqual(y.height, 256)
# the bottom two rows of x should equal the top two rows of y
top = x.crop(0, x.height - 2, x.width, 2)
bottom = y.crop(0, 0, y.width, 2)
self.assertEqual((top - bottom).abs().max(), 0)
# there should be a bottom layer
x = pyvips.Image.new_from_file(filename + "_files/0/0_0.png")
self.assertEqual(x.width, 1)
self.assertEqual(x.height, 1)
# 10 should be the final layer
self.assertFalse(os.path.isdir(filename + "_files/11"))
# default google layout
filename = temp_filename(self.tempdir, '')
self.colour.dzsave(filename, layout="google")
# test bottom-right tile ... default is 256x256 tiles, overlap 0
x = pyvips.Image.new_from_file(filename + "/2/2/3.jpg")
self.assertEqual(x.width, 256)
self.assertEqual(x.height, 256)
self.assertFalse(os.path.exists(filename + "/2/2/4.jpg"))
self.assertFalse(os.path.exists(filename + "/3"))
x = pyvips.Image.new_from_file(filename + "/blank.png")
self.assertEqual(x.width, 256)
self.assertEqual(x.height, 256)
# google layout with overlap ... verify that we clip correctly
# with overlap 192 tile size 256, we should step by 64 pixels each time
# so 3x3 tiles exactly
filename = temp_filename(self.tempdir, '')
self.colour.crop(0, 0, 384, 384).dzsave(filename, layout="google",
overlap=192, depth="one")
# test bottom-right tile ... default is 256x256 tiles, overlap 0
x = pyvips.Image.new_from_file(filename + "/0/2/2.jpg")
self.assertEqual(x.width, 256)
self.assertEqual(x.height, 256)
self.assertFalse(os.path.exists(filename + "/0/3/3.jpg"))
filename = temp_filename(self.tempdir, '')
self.colour.crop(0, 0, 385, 385).dzsave(filename, layout="google",
overlap=192, depth="one")
# test bottom-right tile ... default is 256x256 tiles, overlap 0
x = pyvips.Image.new_from_file(filename + "/0/3/3.jpg")
self.assertEqual(x.width, 256)
self.assertEqual(x.height, 256)
self.assertFalse(os.path.exists(filename + "/0/4/4.jpg"))
# default zoomify layout
filename = temp_filename(self.tempdir, '')
self.colour.dzsave(filename, layout="zoomify")
# 256x256 tiles, no overlap
self.assertTrue(os.path.exists(filename + "/ImageProperties.xml"))
x = pyvips.Image.new_from_file(filename + "/TileGroup0/2-3-2.jpg")
self.assertEqual(x.width, 256)
self.assertEqual(x.height, 256)
# test zip output
filename = temp_filename(self.tempdir, '.zip')
self.colour.dzsave(filename)
# before 8.5.8, you needed a gc on pypy to flush small zip output to
# disc
gc.collect()
self.assertTrue(os.path.exists(filename))
self.assertFalse(os.path.exists(filename + "_files"))
self.assertFalse(os.path.exists(filename + ".dzi"))
# test compressed zip output
filename2 = temp_filename(self.tempdir, '.zip')
self.colour.dzsave(filename2, compression=-1)
# before 8.5.8, you needed a gc on pypy to flush small zip output to
# disc
gc.collect()
self.assertTrue(os.path.exists(filename2))
self.assertLess(os.path.getsize(filename2),
os.path.getsize(filename))
# test suffix
filename = temp_filename(self.tempdir, '')
self.colour.dzsave(filename, suffix=".png")
x = pyvips.Image.new_from_file(filename + "_files/10/0_0.png")
self.assertEqual(x.width, 255)
# test overlap
filename = temp_filename(self.tempdir, '')
self.colour.dzsave(filename, overlap=200)
y = pyvips.Image.new_from_file(filename + "_files/10/1_1.jpeg")
self.assertEqual(y.width, 654)
# test tile-size
filename = temp_filename(self.tempdir, '')
self.colour.dzsave(filename, tile_size=512)
y = pyvips.Image.new_from_file(filename + "_files/10/0_0.jpeg")
self.assertEqual(y.width, 513)
self.assertEqual(y.height, 513)
# test save to memory buffer
filename = temp_filename(self.tempdir, '.zip')
base = os.path.basename(filename)
root, ext = os.path.splitext(base)
self.colour.dzsave(filename)
# before 8.5.8, you needed a gc on pypy to flush small zip output to
# disc
gc.collect()
with open(filename, 'rb') as f:
buf1 = f.read()
buf2 = self.colour.dzsave_buffer(basename=root)
self.assertEqual(len(buf1), len(buf2))
# we can't test the bytes are exactly equal, the timestamps will be
# different
if __name__ == '__main__':
unittest.main()
| 38.340674 | 79 | 0.597453 |
import gc
import os
import shutil
import tempfile
import unittest
import pyvips
from .helpers import PyvipsTester, JPEG_FILE, SRGB_FILE, \
MATLAB_FILE, PNG_FILE, TIF_FILE, OME_FILE, ANALYZE_FILE, \
GIF_FILE, WEBP_FILE, EXR_FILE, FITS_FILE, OPENSLIDE_FILE, \
PDF_FILE, SVG_FILE, SVGZ_FILE, SVG_GZ_FILE, GIF_ANIM_FILE, \
DICOM_FILE, temp_filename
class TestForeign(PyvipsTester):
tempdir = None
@classmethod
def setUpClass(cls):
cls.tempdir = tempfile.mkdtemp()
def setUp(self):
self.colour = pyvips.Image.jpegload(JPEG_FILE)
self.mono = self.colour.extract_band(1)
self.mono.remove("icc-profile-data")
self.rad = self.colour.float2rad()
self.rad.remove("icc-profile-data")
self.cmyk = self.colour.bandjoin(self.mono)
self.cmyk = self.cmyk.copy(interpretation=pyvips.Interpretation.CMYK)
self.cmyk.remove("icc-profile-data")
im = pyvips.Image.new_from_file(GIF_FILE)
self.onebit = im > 128
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.tempdir, ignore_errors=True)
def file_loader(self, loader, test_file, validate):
im = pyvips.Operation.call(loader, test_file)
validate(self, im)
im = pyvips.Image.new_from_file(test_file)
validate(self, im)
def buffer_loader(self, loader, test_file, validate):
with open(test_file, 'rb') as f:
buf = f.read()
im = pyvips.Operation.call(loader, buf)
validate(self, im)
im = pyvips.Image.new_from_buffer(buf, "")
validate(self, im)
def save_load(self, format, im):
x = pyvips.Image.new_temp_file(format)
im.write(x)
self.assertEqual(im.width, x.width)
self.assertEqual(im.height, x.height)
self.assertEqual(im.bands, x.bands)
max_diff = (im - x).abs().max()
self.assertEqual(max_diff, 0)
def save_load_file(self, format, options, im, thresh):
filename = temp_filename(self.tempdir, format)
im.write_to_file(filename + options)
x = pyvips.Image.new_from_file(filename)
self.assertEqual(im.width, x.width)
self.assertEqual(im.height, x.height)
self.assertEqual(im.bands, x.bands)
max_diff = (im - x).abs().max()
self.assertTrue(max_diff <= thresh)
x = None
def save_load_buffer(self, saver, loader, im, max_diff=0):
buf = pyvips.Operation.call(saver, im)
x = pyvips.Operation.call(loader, buf)
self.assertEqual(im.width, x.width)
self.assertEqual(im.height, x.height)
self.assertEqual(im.bands, x.bands)
self.assertLessEqual((im - x).abs().max(), max_diff)
def save_buffer_tempfile(self, saver, suf, im, max_diff=0):
filename = temp_filename(self.tempdir, suf)
buf = pyvips.Operation.call(saver, im)
f = open(filename, 'wb')
f.write(buf)
f.close()
x = pyvips.Image.new_from_file(filename)
self.assertEqual(im.width, x.width)
self.assertEqual(im.height, x.height)
self.assertEqual(im.bands, x.bands)
self.assertLessEqual((im - x).abs().max(), max_diff)
def test_vips(self):
self.save_load_file(".v", "", self.colour, 0)
# check we can save and restore metadata
filename = temp_filename(self.tempdir, ".v")
self.colour.write_to_file(filename)
x = pyvips.Image.new_from_file(filename)
before_exif = self.colour.get_value("exif-data")
after_exif = x.get_value("exif-data")
self.assertEqual(len(before_exif), len(after_exif))
for i in range(len(before_exif)):
self.assertEqual(before_exif[i], after_exif[i])
x = None
def test_jpeg(self):
if pyvips.type_find("VipsForeign", "jpegload") == 0:
print("no jpeg support in this vips, skipping test")
return
def jpeg_valid(self, im):
a = im(10, 10)
self.assertAlmostEqualObjects(a, [6, 5, 3])
profile = im.get_value("icc-profile-data")
self.assertEqual(len(profile), 1352)
self.assertEqual(im.width, 1024)
self.assertEqual(im.height, 768)
self.assertEqual(im.bands, 3)
self.file_loader("jpegload", JPEG_FILE, jpeg_valid)
self.save_load("%s.jpg", self.mono)
self.save_load("%s.jpg", self.colour)
self.buffer_loader("jpegload_buffer", JPEG_FILE, jpeg_valid)
self.save_load_buffer("jpegsave_buffer", "jpegload_buffer",
self.colour, 80)
# see if we have exif parsing: our test image has this field
x = pyvips.Image.new_from_file(JPEG_FILE)
if x.get_typeof("exif-ifd0-Orientation") != 0:
# we need a copy of the image to set the new metadata on
# otherwise we get caching problems
x = pyvips.Image.new_from_file(JPEG_FILE)
x = x.copy()
x.set_value("orientation", 2)
filename = temp_filename(self.tempdir, '.jpg')
x.write_to_file(filename)
x = pyvips.Image.new_from_file(filename)
y = x.get_value("orientation")
self.assertEqual(y, 2)
filename = temp_filename(self.tempdir, '.jpg')
x = pyvips.Image.new_from_file(JPEG_FILE)
x = x.copy()
x.set_value("orientation", 2)
x.write_to_file(filename)
x = pyvips.Image.new_from_file(filename)
y = x.get_value("orientation")
self.assertEqual(y, 2)
x.remove("orientation")
filename = temp_filename(self.tempdir, '.jpg')
x.write_to_file(filename)
x = pyvips.Image.new_from_file(filename)
y = x.get_value("orientation")
self.assertEqual(y, 1)
filename = temp_filename(self.tempdir, '.jpg')
x = pyvips.Image.new_from_file(JPEG_FILE)
x = x.copy()
x.set_value("orientation", 6)
x.write_to_file(filename)
x1 = pyvips.Image.new_from_file(filename)
x2 = pyvips.Image.new_from_file(filename, autorotate=True)
self.assertEqual(x1.width, x2.height)
self.assertEqual(x1.height, x2.width)
def test_png(self):
if pyvips.type_find("VipsForeign", "pngload") == 0 or \
not os.path.isfile(PNG_FILE):
print("no png support, skipping test")
def png_valid(self, im):
a = im(10, 10)
self.assertAlmostEqualObjects(a, [38671.0, 33914.0, 26762.0])
self.assertEqual(im.width, 290)
self.assertEqual(im.height, 442)
self.assertEqual(im.bands, 3)
self.file_loader("pngload", PNG_FILE, png_valid)
self.buffer_loader("pngload_buffer", PNG_FILE, png_valid)
self.save_load_buffer("pngsave_buffer", "pngload_buffer", self.colour)
self.save_load("%s.png", self.mono)
self.save_load("%s.png", self.colour)
def test_tiff(self):
if pyvips.type_find("VipsForeign", "tiffload") == 0 or \
not os.path.isfile(TIF_FILE):
print("no tiff support, skipping test")
return
def tiff_valid(self, im):
a = im(10, 10)
self.assertAlmostEqualObjects(a, [38671.0, 33914.0, 26762.0])
self.assertEqual(im.width, 290)
self.assertEqual(im.height, 442)
self.assertEqual(im.bands, 3)
self.file_loader("tiffload", TIF_FILE, tiff_valid)
self.buffer_loader("tiffload_buffer", TIF_FILE, tiff_valid)
self.save_load_buffer("tiffsave_buffer",
"tiffload_buffer",
self.colour)
self.save_load("%s.tif", self.mono)
self.save_load("%s.tif", self.colour)
self.save_load("%s.tif", self.cmyk)
self.save_load("%s.tif", self.onebit)
self.save_load_file(".tif", "[squash]", self.onebit, 0)
self.save_load_file(".tif", "[miniswhite]", self.onebit, 0)
self.save_load_file(".tif", "[squash,miniswhite]", self.onebit, 0)
self.save_load_file(".tif",
"[profile={0}]".format(SRGB_FILE),
self.colour, 0)
self.save_load_file(".tif", "[tile]", self.colour, 0)
self.save_load_file(".tif", "[tile,pyramid]", self.colour, 0)
self.save_load_file(".tif",
"[tile,pyramid,compression=jpeg]", self.colour, 80)
self.save_load_file(".tif", "[bigtiff]", self.colour, 0)
self.save_load_file(".tif", "[compression=jpeg]", self.colour, 80)
self.save_load_file(".tif",
"[tile,tile-width=256]", self.colour, 10)
# we need a copy of the image to set the new metadata on
# otherwise we get caching problems
filename = temp_filename(self.tempdir, '.tif')
x = pyvips.Image.new_from_file(TIF_FILE)
x = x.copy()
x.set_value("orientation", 2)
x.write_to_file(filename)
x = pyvips.Image.new_from_file(filename)
y = x.get_value("orientation")
self.assertEqual(y, 2)
# we need a copy of the image to set the new metadata on
# otherwise we get caching problems
filename = temp_filename(self.tempdir, '.tif')
x = pyvips.Image.new_from_file(TIF_FILE)
x = x.copy()
x.set_value("orientation", 2)
x.write_to_file(filename)
x = pyvips.Image.new_from_file(filename)
y = x.get_value("orientation")
self.assertEqual(y, 2)
x.remove("orientation")
filename = temp_filename(self.tempdir, '.tif')
x.write_to_file(filename)
x = pyvips.Image.new_from_file(filename)
y = x.get_value("orientation")
self.assertEqual(y, 1)
filename = temp_filename(self.tempdir, '.tif')
x = pyvips.Image.new_from_file(TIF_FILE)
x = x.copy()
x.set_value("orientation", 6)
x.write_to_file(filename)
x1 = pyvips.Image.new_from_file(filename)
x2 = pyvips.Image.new_from_file(filename, autorotate=True)
self.assertEqual(x1.width, x2.height)
self.assertEqual(x1.height, x2.width)
x = pyvips.Image.new_from_file(OME_FILE)
self.assertEqual(x.width, 439)
self.assertEqual(x.height, 167)
page_height = x.height
x = pyvips.Image.new_from_file(OME_FILE, n=-1)
self.assertEqual(x.width, 439)
self.assertEqual(x.height, page_height * 15)
x = pyvips.Image.new_from_file(OME_FILE, page=1, n=-1)
self.assertEqual(x.width, 439)
self.assertEqual(x.height, page_height * 14)
x = pyvips.Image.new_from_file(OME_FILE, page=1, n=2)
self.assertEqual(x.width, 439)
self.assertEqual(x.height, page_height * 2)
x = pyvips.Image.new_from_file(OME_FILE, n=-1)
self.assertEqual(x(0, 166)[0], 96)
self.assertEqual(x(0, 167)[0], 0)
self.assertEqual(x(0, 168)[0], 1)
filename = temp_filename(self.tempdir, '.tif')
x.write_to_file(filename)
x = pyvips.Image.new_from_file(filename, n=-1)
self.assertEqual(x.width, 439)
self.assertEqual(x.height, page_height * 15)
self.assertEqual(x(0, 166)[0], 96)
self.assertEqual(x(0, 167)[0], 0)
self.assertEqual(x(0, 168)[0], 1)
def test_magickload(self):
if pyvips.type_find("VipsForeign", "magickload") == 0 or \
not os.path.isfile(GIF_FILE):
print("no magick support, skipping test")
return
def gif_valid(self, im):
# some libMagick produce an RGB for this image, some a mono, some
# rgba, some have a valid alpha, some don't :-(
a = im(10, 10)[0]
self.assertAlmostEqual(a, 33)
self.assertEqual(im.width, 159)
self.assertEqual(im.height, 203)
self.file_loader("magickload", GIF_FILE, gif_valid)
self.buffer_loader("magickload_buffer", GIF_FILE, gif_valid)
im = pyvips.Image.magickload(SVG_FILE)
self.assertEqual(im.bands, 4)
im = pyvips.Image.magickload(SVG_FILE, density='100')
width = im.width
height = im.height
im = pyvips.Image.magickload(SVG_FILE, density='200')
im = pyvips.Image.magickload(GIF_ANIM_FILE)
width = im.width
height = im.height
im = pyvips.Image.magickload(GIF_ANIM_FILE, all_frames=True)
self.assertEqual(im.width, width)
self.assertEqual(im.height, height * 5)
im = pyvips.Image.magickload(GIF_ANIM_FILE)
width = im.width
height = im.height
im = pyvips.Image.magickload(GIF_ANIM_FILE, page=1, n=2)
self.assertEqual(im.width, width)
self.assertEqual(im.height, height * 2)
page_height = im.get_value("page-height")
self.assertEqual(page_height, height)
im = pyvips.Image.magickload(DICOM_FILE)
self.assertEqual(im.width, 128)
self.assertEqual(im.height, 128)
# self.assertEqual(im.bands, 1)
def test_webp(self):
if pyvips.type_find("VipsForeign", "webpload") == 0 or \
not os.path.isfile(WEBP_FILE):
print("no webp support, skipping test")
return
def webp_valid(self, im):
a = im(10, 10)
self.assertAlmostEqualObjects(a, [71, 166, 236])
self.assertEqual(im.width, 550)
self.assertEqual(im.height, 368)
self.assertEqual(im.bands, 3)
self.file_loader("webpload", WEBP_FILE, webp_valid)
self.buffer_loader("webpload_buffer", WEBP_FILE, webp_valid)
self.save_load_buffer("webpsave_buffer", "webpload_buffer",
self.colour, 60)
self.save_load("%s.webp", self.colour)
# test lossless mode
im = pyvips.Image.new_from_file(WEBP_FILE)
buf = im.webpsave_buffer(lossless=True)
im2 = pyvips.Image.new_from_buffer(buf, "")
self.assertEqual(im.avg(), im2.avg())
# higher Q should mean a bigger buffer
b1 = im.webpsave_buffer(Q=10)
b2 = im.webpsave_buffer(Q=90)
self.assertGreater(len(b2), len(b1))
# try saving an image with an ICC profile and reading it back ... if we
# can do it, our webp supports metadata load/save
buf = self.colour.webpsave_buffer()
im = pyvips.Image.new_from_buffer(buf, "")
if im.get_typeof("icc-profile-data") != 0:
# verify that the profile comes back unharmed
p1 = self.colour.get_value("icc-profile-data")
p2 = im.get_value("icc-profile-data")
self.assertEqual(p1, p2)
# add tests for exif, xmp, exif
# the exif test will need us to be able to walk the header,
# we can't just check exif-data
x = self.colour.copy()
x.set_value("orientation", 6)
buf = x.webpsave_buffer()
y = pyvips.Image.new_from_buffer(buf, "")
self.assertEqual(y.get_value("orientation"), 6)
def test_analyzeload(self):
if pyvips.type_find("VipsForeign", "analyzeload") == 0 or \
not os.path.isfile(ANALYZE_FILE):
print("no analyze support, skipping test")
return
def analyze_valid(self, im):
a = im(10, 10)
self.assertAlmostEqual(a[0], 3335)
self.assertEqual(im.width, 128)
self.assertEqual(im.height, 8064)
self.assertEqual(im.bands, 1)
self.file_loader("analyzeload", ANALYZE_FILE, analyze_valid)
def test_matload(self):
if pyvips.type_find("VipsForeign", "matload") == 0 or \
not os.path.isfile(MATLAB_FILE):
print("no matlab support, skipping test")
return
def matlab_valid(self, im):
a = im(10, 10)
self.assertAlmostEqualObjects(a, [38671.0, 33914.0, 26762.0])
self.assertEqual(im.width, 290)
self.assertEqual(im.height, 442)
self.assertEqual(im.bands, 3)
self.file_loader("matload", MATLAB_FILE, matlab_valid)
def test_openexrload(self):
if pyvips.type_find("VipsForeign", "openexrload") == 0 or \
not os.path.isfile(EXR_FILE):
print("no openexr support, skipping test")
return
def exr_valid(self, im):
a = im(10, 10)
self.assertAlmostEqualObjects(a, [0.124512, 0.159668,
0.040375, 1.0],
places=5)
self.assertEqual(im.width, 610)
self.assertEqual(im.height, 406)
self.assertEqual(im.bands, 4)
self.file_loader("openexrload", EXR_FILE, exr_valid)
def test_fitsload(self):
if pyvips.type_find("VipsForeign", "fitsload") == 0 or \
not os.path.isfile(FITS_FILE):
print("no fits support, skipping test")
return
def fits_valid(self, im):
a = im(10, 10)
self.assertAlmostEqualObjects(a, [-0.165013, -0.148553, 1.09122,
-0.942242],
places=5)
self.assertEqual(im.width, 200)
self.assertEqual(im.height, 200)
self.assertEqual(im.bands, 4)
self.file_loader("fitsload", FITS_FILE, fits_valid)
self.save_load("%s.fits", self.mono)
def test_openslideload(self):
if pyvips.type_find("VipsForeign", "openslideload") == 0 or \
not os.path.isfile(OPENSLIDE_FILE):
print("no openslide support, skipping test")
return
def openslide_valid(self, im):
a = im(10, 10)
self.assertAlmostEqualObjects(a, [244, 250, 243, 255])
self.assertEqual(im.width, 2220)
self.assertEqual(im.height, 2967)
self.assertEqual(im.bands, 4)
self.file_loader("openslideload", OPENSLIDE_FILE, openslide_valid)
def test_pdfload(self):
if pyvips.type_find("VipsForeign", "pdfload") == 0 or \
not os.path.isfile(PDF_FILE):
print("no pdf support, skipping test")
return
def pdf_valid(self, im):
a = im(10, 10)
self.assertAlmostEqualObjects(a, [35, 31, 32, 255])
self.assertEqual(im.width, 1133)
self.assertEqual(im.height, 680)
self.assertEqual(im.bands, 4)
self.file_loader("pdfload", PDF_FILE, pdf_valid)
self.buffer_loader("pdfload_buffer", PDF_FILE, pdf_valid)
im = pyvips.Image.new_from_file(PDF_FILE)
x = pyvips.Image.new_from_file(PDF_FILE, scale=2)
self.assertLess(abs(im.width * 2 - x.width), 2)
self.assertLess(abs(im.height * 2 - x.height), 2)
im = pyvips.Image.new_from_file(PDF_FILE)
x = pyvips.Image.new_from_file(PDF_FILE, dpi=144)
self.assertLess(abs(im.width * 2 - x.width), 2)
self.assertLess(abs(im.height * 2 - x.height), 2)
def test_gifload(self):
if pyvips.type_find("VipsForeign", "gifload") == 0 or \
not os.path.isfile(GIF_FILE):
print("no gif support, skipping test")
return
def gif_valid(self, im):
a = im(10, 10)
self.assertAlmostEqualObjects(a, [33])
self.assertEqual(im.width, 159)
self.assertEqual(im.height, 203)
self.assertEqual(im.bands, 1)
self.file_loader("gifload", GIF_FILE, gif_valid)
self.buffer_loader("gifload_buffer", GIF_FILE, gif_valid)
x1 = pyvips.Image.new_from_file(GIF_ANIM_FILE)
x2 = pyvips.Image.new_from_file(GIF_ANIM_FILE, n=2)
self.assertEqual(x2.height, 2 * x1.height)
page_height = x2.get_value("page-height")
self.assertEqual(page_height, x1.height)
x2 = pyvips.Image.new_from_file(GIF_ANIM_FILE, n=-1)
self.assertEqual(x2.height, 5 * x1.height)
x2 = pyvips.Image.new_from_file(GIF_ANIM_FILE, page=1, n=-1)
self.assertEqual(x2.height, 4 * x1.height)
def test_svgload(self):
if pyvips.type_find("VipsForeign", "svgload") == 0 or \
not os.path.isfile(SVG_FILE):
print("no svg support, skipping test")
return
def svg_valid(self, im):
a = im(10, 10)
self.assertLess(abs(a[0] - 79), 2)
self.assertLess(abs(a[1] - 79), 2)
self.assertLess(abs(a[2] - 132), 2)
self.assertLess(abs(a[3] - 255), 2)
self.assertEqual(im.width, 288)
self.assertEqual(im.height, 470)
self.assertEqual(im.bands, 4)
self.file_loader("svgload", SVG_FILE, svg_valid)
self.buffer_loader("svgload_buffer", SVG_FILE, svg_valid)
self.file_loader("svgload", SVGZ_FILE, svg_valid)
self.buffer_loader("svgload_buffer", SVGZ_FILE, svg_valid)
self.file_loader("svgload", SVG_GZ_FILE, svg_valid)
im = pyvips.Image.new_from_file(SVG_FILE)
x = pyvips.Image.new_from_file(SVG_FILE, scale=2)
self.assertLess(abs(im.width * 2 - x.width), 2)
self.assertLess(abs(im.height * 2 - x.height), 2)
im = pyvips.Image.new_from_file(SVG_FILE)
x = pyvips.Image.new_from_file(SVG_FILE, dpi=144)
self.assertLess(abs(im.width * 2 - x.width), 2)
self.assertLess(abs(im.height * 2 - x.height), 2)
def test_csv(self):
self.save_load("%s.csv", self.mono)
def test_matrix(self):
self.save_load("%s.mat", self.mono)
def test_ppm(self):
if pyvips.type_find("VipsForeign", "ppmload") == 0:
print("no PPM support, skipping test")
return
self.save_load("%s.ppm", self.mono)
self.save_load("%s.ppm", self.colour)
def test_rad(self):
if pyvips.type_find("VipsForeign", "radload") == 0:
print("no Radiance support, skipping test")
return
self.save_load("%s.hdr", self.colour)
self.save_buffer_tempfile("radsave_buffer", ".hdr",
self.rad, max_diff=0)
def test_dzsave(self):
if pyvips.type_find("VipsForeign", "dzsave") == 0:
print("no dzsave support, skipping test")
return
filename = temp_filename(self.tempdir, '')
self.colour.dzsave(filename, suffix=".png")
x = pyvips.Image.new_from_file(filename + "_files/10/0_0.png")
self.assertEqual(x.width, 255)
y = pyvips.Image.new_from_file(filename + "_files/10/1_0.png")
self.assertEqual(y.width, 256)
left = x.crop(x.width - 2, 0, 2, x.height)
right = y.crop(0, 0, 2, y.height)
self.assertEqual((left - right).abs().max(), 0)
self.assertEqual(x.height, 255)
y = pyvips.Image.new_from_file(filename + "_files/10/0_1.png")
self.assertEqual(y.height, 256)
top = x.crop(0, x.height - 2, x.width, 2)
bottom = y.crop(0, 0, y.width, 2)
self.assertEqual((top - bottom).abs().max(), 0)
x = pyvips.Image.new_from_file(filename + "_files/0/0_0.png")
self.assertEqual(x.width, 1)
self.assertEqual(x.height, 1)
self.assertFalse(os.path.isdir(filename + "_files/11"))
filename = temp_filename(self.tempdir, '')
self.colour.dzsave(filename, layout="google")
x = pyvips.Image.new_from_file(filename + "/2/2/3.jpg")
self.assertEqual(x.width, 256)
self.assertEqual(x.height, 256)
self.assertFalse(os.path.exists(filename + "/2/2/4.jpg"))
self.assertFalse(os.path.exists(filename + "/3"))
x = pyvips.Image.new_from_file(filename + "/blank.png")
self.assertEqual(x.width, 256)
self.assertEqual(x.height, 256)
filename = temp_filename(self.tempdir, '')
self.colour.crop(0, 0, 384, 384).dzsave(filename, layout="google",
overlap=192, depth="one")
x = pyvips.Image.new_from_file(filename + "/0/2/2.jpg")
self.assertEqual(x.width, 256)
self.assertEqual(x.height, 256)
self.assertFalse(os.path.exists(filename + "/0/3/3.jpg"))
filename = temp_filename(self.tempdir, '')
self.colour.crop(0, 0, 385, 385).dzsave(filename, layout="google",
overlap=192, depth="one")
x = pyvips.Image.new_from_file(filename + "/0/3/3.jpg")
self.assertEqual(x.width, 256)
self.assertEqual(x.height, 256)
self.assertFalse(os.path.exists(filename + "/0/4/4.jpg"))
filename = temp_filename(self.tempdir, '')
self.colour.dzsave(filename, layout="zoomify")
self.assertTrue(os.path.exists(filename + "/ImageProperties.xml"))
x = pyvips.Image.new_from_file(filename + "/TileGroup0/2-3-2.jpg")
self.assertEqual(x.width, 256)
self.assertEqual(x.height, 256)
filename = temp_filename(self.tempdir, '.zip')
self.colour.dzsave(filename)
gc.collect()
self.assertTrue(os.path.exists(filename))
self.assertFalse(os.path.exists(filename + "_files"))
self.assertFalse(os.path.exists(filename + ".dzi"))
filename2 = temp_filename(self.tempdir, '.zip')
self.colour.dzsave(filename2, compression=-1)
gc.collect()
self.assertTrue(os.path.exists(filename2))
self.assertLess(os.path.getsize(filename2),
os.path.getsize(filename))
filename = temp_filename(self.tempdir, '')
self.colour.dzsave(filename, suffix=".png")
x = pyvips.Image.new_from_file(filename + "_files/10/0_0.png")
self.assertEqual(x.width, 255)
filename = temp_filename(self.tempdir, '')
self.colour.dzsave(filename, overlap=200)
y = pyvips.Image.new_from_file(filename + "_files/10/1_1.jpeg")
self.assertEqual(y.width, 654)
filename = temp_filename(self.tempdir, '')
self.colour.dzsave(filename, tile_size=512)
y = pyvips.Image.new_from_file(filename + "_files/10/0_0.jpeg")
self.assertEqual(y.width, 513)
self.assertEqual(y.height, 513)
filename = temp_filename(self.tempdir, '.zip')
base = os.path.basename(filename)
root, ext = os.path.splitext(base)
self.colour.dzsave(filename)
gc.collect()
with open(filename, 'rb') as f:
buf1 = f.read()
buf2 = self.colour.dzsave_buffer(basename=root)
self.assertEqual(len(buf1), len(buf2))
# different
if __name__ == '__main__':
unittest.main()
| true | true |
f7f4903170bc28980b022117038c403c77986aa6 | 5,079 | py | Python | src/sima/metocean/longtermstatisticswindcalculation.py | SINTEF/simapy | 650b8c2f15503dad98e2bfc0d0788509593822c7 | [
"MIT"
] | null | null | null | src/sima/metocean/longtermstatisticswindcalculation.py | SINTEF/simapy | 650b8c2f15503dad98e2bfc0d0788509593822c7 | [
"MIT"
] | null | null | null | src/sima/metocean/longtermstatisticswindcalculation.py | SINTEF/simapy | 650b8c2f15503dad98e2bfc0d0788509593822c7 | [
"MIT"
] | null | null | null | # This an autogenerated file
#
# Generated with LongTermStatisticsWindCalculation
from __future__ import annotations
from typing import Dict,Sequence,List
from dmt.entity import Entity
from dmt.blueprint import Blueprint
from .blueprints.longtermstatisticswindcalculation import LongTermStatisticsWindCalculationBlueprint
from typing import Dict
from sima.metocean.calculationlevel import CalculationLevel
from sima.metocean.levelstatisticsmethod import LevelStatisticsMethod
from sima.sima.moao import MOAO
from sima.sima.scriptablevalue import ScriptableValue
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from sima.metocean.windlongtermstatistics import WindLongTermStatistics
class LongTermStatisticsWindCalculation(MOAO):
"""
Keyword arguments
-----------------
name : str
(default "")
description : str
(default "")
_id : str
(default "")
scriptableValues : List[ScriptableValue]
returnPeriod : float
(default 0.0)
levels : List[CalculationLevel]
method : LevelStatisticsMethod
omniMethod : LevelStatisticsMethod
directionRelativeToWave : float
(default 0.0)
statistics : WindLongTermStatistics
omni : WindLongTermStatistics
"""
def __init__(self , name="", description="", _id="", returnPeriod=0.0, method=LevelStatisticsMethod.FROM_DISTRIBUTION, omniMethod=LevelStatisticsMethod.FROM_DISTRIBUTION, directionRelativeToWave=0.0, **kwargs):
super().__init__(**kwargs)
self.name = name
self.description = description
self._id = _id
self.scriptableValues = list()
self.returnPeriod = returnPeriod
self.levels = list()
self.method = method
self.omniMethod = omniMethod
self.directionRelativeToWave = directionRelativeToWave
self.statistics = None
self.omni = None
for key, value in kwargs.items():
if not isinstance(value, Dict):
setattr(self, key, value)
@property
def blueprint(self) -> Blueprint:
"""Return blueprint that this entity represents"""
return LongTermStatisticsWindCalculationBlueprint()
@property
def name(self) -> str:
""""""
return self.__name
@name.setter
def name(self, value: str):
"""Set name"""
self.__name = str(value)
@property
def description(self) -> str:
""""""
return self.__description
@description.setter
def description(self, value: str):
"""Set description"""
self.__description = str(value)
@property
def _id(self) -> str:
""""""
return self.___id
@_id.setter
def _id(self, value: str):
"""Set _id"""
self.___id = str(value)
@property
def scriptableValues(self) -> List[ScriptableValue]:
""""""
return self.__scriptableValues
@scriptableValues.setter
def scriptableValues(self, value: List[ScriptableValue]):
"""Set scriptableValues"""
if not isinstance(value, Sequence):
raise Exception("Expected sequense, but was " , type(value))
self.__scriptableValues = value
@property
def returnPeriod(self) -> float:
""""""
return self.__returnPeriod
@returnPeriod.setter
def returnPeriod(self, value: float):
"""Set returnPeriod"""
self.__returnPeriod = float(value)
@property
def levels(self) -> List[CalculationLevel]:
""""""
return self.__levels
@levels.setter
def levels(self, value: List[CalculationLevel]):
"""Set levels"""
if not isinstance(value, Sequence):
raise Exception("Expected sequense, but was " , type(value))
self.__levels = value
@property
def method(self) -> LevelStatisticsMethod:
""""""
return self.__method
@method.setter
def method(self, value: LevelStatisticsMethod):
"""Set method"""
self.__method = value
@property
def omniMethod(self) -> LevelStatisticsMethod:
""""""
return self.__omniMethod
@omniMethod.setter
def omniMethod(self, value: LevelStatisticsMethod):
"""Set omniMethod"""
self.__omniMethod = value
@property
def directionRelativeToWave(self) -> float:
""""""
return self.__directionRelativeToWave
@directionRelativeToWave.setter
def directionRelativeToWave(self, value: float):
"""Set directionRelativeToWave"""
self.__directionRelativeToWave = float(value)
@property
def statistics(self) -> WindLongTermStatistics:
""""""
return self.__statistics
@statistics.setter
def statistics(self, value: WindLongTermStatistics):
"""Set statistics"""
self.__statistics = value
@property
def omni(self) -> WindLongTermStatistics:
""""""
return self.__omni
@omni.setter
def omni(self, value: WindLongTermStatistics):
"""Set omni"""
self.__omni = value
| 28.694915 | 214 | 0.648356 |
from __future__ import annotations
from typing import Dict,Sequence,List
from dmt.entity import Entity
from dmt.blueprint import Blueprint
from .blueprints.longtermstatisticswindcalculation import LongTermStatisticsWindCalculationBlueprint
from typing import Dict
from sima.metocean.calculationlevel import CalculationLevel
from sima.metocean.levelstatisticsmethod import LevelStatisticsMethod
from sima.sima.moao import MOAO
from sima.sima.scriptablevalue import ScriptableValue
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from sima.metocean.windlongtermstatistics import WindLongTermStatistics
class LongTermStatisticsWindCalculation(MOAO):
def __init__(self , name="", description="", _id="", returnPeriod=0.0, method=LevelStatisticsMethod.FROM_DISTRIBUTION, omniMethod=LevelStatisticsMethod.FROM_DISTRIBUTION, directionRelativeToWave=0.0, **kwargs):
super().__init__(**kwargs)
self.name = name
self.description = description
self._id = _id
self.scriptableValues = list()
self.returnPeriod = returnPeriod
self.levels = list()
self.method = method
self.omniMethod = omniMethod
self.directionRelativeToWave = directionRelativeToWave
self.statistics = None
self.omni = None
for key, value in kwargs.items():
if not isinstance(value, Dict):
setattr(self, key, value)
@property
def blueprint(self) -> Blueprint:
return LongTermStatisticsWindCalculationBlueprint()
@property
def name(self) -> str:
return self.__name
@name.setter
def name(self, value: str):
self.__name = str(value)
@property
def description(self) -> str:
return self.__description
@description.setter
def description(self, value: str):
self.__description = str(value)
@property
def _id(self) -> str:
return self.___id
@_id.setter
def _id(self, value: str):
self.___id = str(value)
@property
def scriptableValues(self) -> List[ScriptableValue]:
return self.__scriptableValues
@scriptableValues.setter
def scriptableValues(self, value: List[ScriptableValue]):
if not isinstance(value, Sequence):
raise Exception("Expected sequense, but was " , type(value))
self.__scriptableValues = value
@property
def returnPeriod(self) -> float:
return self.__returnPeriod
@returnPeriod.setter
def returnPeriod(self, value: float):
self.__returnPeriod = float(value)
@property
def levels(self) -> List[CalculationLevel]:
return self.__levels
@levels.setter
def levels(self, value: List[CalculationLevel]):
if not isinstance(value, Sequence):
raise Exception("Expected sequense, but was " , type(value))
self.__levels = value
@property
def method(self) -> LevelStatisticsMethod:
return self.__method
@method.setter
def method(self, value: LevelStatisticsMethod):
self.__method = value
@property
def omniMethod(self) -> LevelStatisticsMethod:
return self.__omniMethod
@omniMethod.setter
def omniMethod(self, value: LevelStatisticsMethod):
self.__omniMethod = value
@property
def directionRelativeToWave(self) -> float:
return self.__directionRelativeToWave
@directionRelativeToWave.setter
def directionRelativeToWave(self, value: float):
self.__directionRelativeToWave = float(value)
@property
def statistics(self) -> WindLongTermStatistics:
return self.__statistics
@statistics.setter
def statistics(self, value: WindLongTermStatistics):
self.__statistics = value
@property
def omni(self) -> WindLongTermStatistics:
return self.__omni
@omni.setter
def omni(self, value: WindLongTermStatistics):
self.__omni = value
| true | true |
f7f49076c724598ea89e1dcd89f7e4ac8848ae2c | 3,075 | py | Python | mido/backends/amidi.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | mido/backends/amidi.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | mido/backends/amidi.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | """Mido amidi backend
Very experimental backend using amidi to access the ALSA rawmidi
interface.
Todo:
* use parser instead of from_hex()?
* default port name
* do sysex messages work?
* starting amidi for every message sent is costly
"""
import os
import select
import threading
import subprocess
from ..messages import Message
from ._common import PortMethods, InputMethods, OutputMethods
"""
Dir Device Name
IO hw:1,0,0 UM-1 MIDI 1
IO hw:2,0,0 nanoKONTROL2 MIDI 1
IO hw:2,0,0 MPK mini MIDI 1
"""
def get_devices():
devices = []
lines = os.popen('amidi -l').read().splitlines()
for line in lines[1:]:
mode, device, name = line.strip().split(None, 2)
devices.append({
'name': name.strip(),
'device': device,
'is_input': 'I' in mode,
'is_output': 'O' in mode,
})
return devices
def _get_device(name, mode):
for dev in get_devices():
if name == dev['name'] and dev[mode]:
return dev
else:
raise IOError('unknown port {!r}'.format(name))
class Input(PortMethods, InputMethods):
def __init__(self, name=None, **kwargs):
self.name = name
self.closed = False
self._proc = None
self._poller = select.poll()
self._lock = threading.RLock()
dev = _get_device(self.name, 'is_input')
self._proc = subprocess.Popen(['amidi', '-d',
'-p', dev['device']],
stdout=subprocess.PIPE)
self._poller.register(self._proc.stdout, select.POLLIN)
def _read_message(self):
line = self._proc.stdout.readline().strip().decode('ascii')
if line:
return Message.from_hex(line)
else:
# The first line is sometimes blank.
return None
def receive(self, block=True):
if not block:
return self.poll()
while True:
msg = self.poll()
if msg:
return msg
# Wait for message.
self._poller.poll()
def poll(self):
with self._lock:
while self._poller.poll(0):
msg = self._read_message()
if msg is not None:
return msg
def close(self):
if not self.closed:
if self._proc:
self._proc.kill()
self._proc = None
self.closed = True
class Output(PortMethods, OutputMethods):
def __init__(self, name=None, autoreset=False, **kwargs):
self.name = name
self.autoreset = autoreset
self.closed = False
self._dev = _get_device(self.name, 'is_output')
def send(self, msg):
proc = subprocess.Popen(['amidi', '--send-hex', msg.hex(),
'-p', self._dev['device']])
proc.wait()
def close(self):
if not self.closed:
if self.autoreset:
self.reset()
self.closed = True
| 25.204918 | 67 | 0.54439 | import os
import select
import threading
import subprocess
from ..messages import Message
from ._common import PortMethods, InputMethods, OutputMethods
def get_devices():
devices = []
lines = os.popen('amidi -l').read().splitlines()
for line in lines[1:]:
mode, device, name = line.strip().split(None, 2)
devices.append({
'name': name.strip(),
'device': device,
'is_input': 'I' in mode,
'is_output': 'O' in mode,
})
return devices
def _get_device(name, mode):
for dev in get_devices():
if name == dev['name'] and dev[mode]:
return dev
else:
raise IOError('unknown port {!r}'.format(name))
class Input(PortMethods, InputMethods):
def __init__(self, name=None, **kwargs):
self.name = name
self.closed = False
self._proc = None
self._poller = select.poll()
self._lock = threading.RLock()
dev = _get_device(self.name, 'is_input')
self._proc = subprocess.Popen(['amidi', '-d',
'-p', dev['device']],
stdout=subprocess.PIPE)
self._poller.register(self._proc.stdout, select.POLLIN)
def _read_message(self):
line = self._proc.stdout.readline().strip().decode('ascii')
if line:
return Message.from_hex(line)
else:
return None
def receive(self, block=True):
if not block:
return self.poll()
while True:
msg = self.poll()
if msg:
return msg
self._poller.poll()
def poll(self):
with self._lock:
while self._poller.poll(0):
msg = self._read_message()
if msg is not None:
return msg
def close(self):
if not self.closed:
if self._proc:
self._proc.kill()
self._proc = None
self.closed = True
class Output(PortMethods, OutputMethods):
def __init__(self, name=None, autoreset=False, **kwargs):
self.name = name
self.autoreset = autoreset
self.closed = False
self._dev = _get_device(self.name, 'is_output')
def send(self, msg):
proc = subprocess.Popen(['amidi', '--send-hex', msg.hex(),
'-p', self._dev['device']])
proc.wait()
def close(self):
if not self.closed:
if self.autoreset:
self.reset()
self.closed = True
| true | true |
f7f4909fb6c9c23377a7a9b32ab9888dd4b1da6c | 2,308 | py | Python | etc/config.py | madcat1991/imagester | 5383f2a307bd848b0fe8fa9aab750ca170eb4189 | [
"MIT"
] | null | null | null | etc/config.py | madcat1991/imagester | 5383f2a307bd848b0fe8fa9aab750ca170eb4189 | [
"MIT"
] | 4 | 2021-06-08T22:34:24.000Z | 2022-03-12T00:25:38.000Z | etc/config.py | madcat1991/imagester | 5383f2a307bd848b0fe8fa9aab750ca170eb4189 | [
"MIT"
] | null | null | null | # coding: utf-8
import sys
# logging
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'message_only': {
'format': '%(asctime)s: %(message)s',
'datefmt': '%d-%m-%Y %H:%M:%S',
},
'basic': {
'format': '%(asctime)s:%(levelname)s: %(message)s',
},
'verbose': {
'format': '%(asctime)s:%(levelname)s:%(name)s.%(funcName)s: %(message)s',
},
'verbose_with_pid': {
'format': '%(asctime)s:%(levelname)s:%(name)s.%(funcName)s:%(process)s: %(message)s',
},
},
'handlers': {
'basic': {
'class': 'logging.StreamHandler',
'formatter': 'verbose',
'stream': sys.stdout,
},
'debug_stdout': {
'class': 'logging.StreamHandler',
'level': 'DEBUG',
'formatter': 'verbose',
'stream': sys.stdout,
}
},
'loggers': {
'root': {
'handlers': ['basic'],
},
'server': {
'handlers': ['debug_stdout'],
'level': 'INFO',
'propagate': True,
}
}
}
# connection
PSQL_CONFIG = {
"host": None,
"port": None,
"database": "imagester",
"user": "imagester",
"password": None,
"minconn": 1,
"maxconn": 10
}
REDIS_CONFIG = {
'host': 'localhost',
'port': 6379,
'db': 0
}
CLARIFAI_KEY = "clarifai"
INIT_CLARIFAI_KEYS = []
REGAIND_KEY = "regaind"
INIT_REGAIND_KEYS = []
# app level configs
MAX_REQUESTS_PER_USER = 10
MAX_IMAGES_PER_REQUEST = 3
DEFAULT_MAX_IMG_BYTES = 10 * 1024 * 1024 # 10MB
IMG_UPLOAD_DIR = "/tmp"
ALLOWED_IMG_EXTENSIONS = {'png', 'jpg', 'jpeg'}
MAX_IMG_SHAPE_FOR_PROCESSING = (1200, 1200) # pixels
# tags
REL_TAG_URL = None
LOC_TAG_URL = None
MAX_TAGS_TO_MINE = 15
MAX_KWS_TAGS = 20
MAX_LOC_TAGS = 7
KW_TAGS_TTL = 24 * 60 * 60 # 1 day
LOC_TAGS_TTL = 7 * 24 * 60 * 60 # 7 days
LNG_STEP = 0.5
LAT_STEP = 0.5
# quotes
QUOTES_URL = None
QUOTES_TTL = 7 * 24 * 60 * 60 # 7 days
QUOTES_PER_KW = 5
QUOTES_KWS_NUM = 3
MAX_QUOTES = 5
# post time
ENG_DATA_URL = "http://cf.datawrapper.de/Ndpz7/2/data.csv"
ENG_DATA_DIR = "/tmp/eng_data/"
# flask
MAX_CONTENT_LENGTH = MAX_IMAGES_PER_REQUEST * DEFAULT_MAX_IMG_BYTES
| 21.773585 | 97 | 0.54766 |
import sys
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'message_only': {
'format': '%(asctime)s: %(message)s',
'datefmt': '%d-%m-%Y %H:%M:%S',
},
'basic': {
'format': '%(asctime)s:%(levelname)s: %(message)s',
},
'verbose': {
'format': '%(asctime)s:%(levelname)s:%(name)s.%(funcName)s: %(message)s',
},
'verbose_with_pid': {
'format': '%(asctime)s:%(levelname)s:%(name)s.%(funcName)s:%(process)s: %(message)s',
},
},
'handlers': {
'basic': {
'class': 'logging.StreamHandler',
'formatter': 'verbose',
'stream': sys.stdout,
},
'debug_stdout': {
'class': 'logging.StreamHandler',
'level': 'DEBUG',
'formatter': 'verbose',
'stream': sys.stdout,
}
},
'loggers': {
'root': {
'handlers': ['basic'],
},
'server': {
'handlers': ['debug_stdout'],
'level': 'INFO',
'propagate': True,
}
}
}
PSQL_CONFIG = {
"host": None,
"port": None,
"database": "imagester",
"user": "imagester",
"password": None,
"minconn": 1,
"maxconn": 10
}
REDIS_CONFIG = {
'host': 'localhost',
'port': 6379,
'db': 0
}
CLARIFAI_KEY = "clarifai"
INIT_CLARIFAI_KEYS = []
REGAIND_KEY = "regaind"
INIT_REGAIND_KEYS = []
MAX_REQUESTS_PER_USER = 10
MAX_IMAGES_PER_REQUEST = 3
DEFAULT_MAX_IMG_BYTES = 10 * 1024 * 1024
IMG_UPLOAD_DIR = "/tmp"
ALLOWED_IMG_EXTENSIONS = {'png', 'jpg', 'jpeg'}
MAX_IMG_SHAPE_FOR_PROCESSING = (1200, 1200)
REL_TAG_URL = None
LOC_TAG_URL = None
MAX_TAGS_TO_MINE = 15
MAX_KWS_TAGS = 20
MAX_LOC_TAGS = 7
KW_TAGS_TTL = 24 * 60 * 60
LOC_TAGS_TTL = 7 * 24 * 60 * 60
LNG_STEP = 0.5
LAT_STEP = 0.5
QUOTES_URL = None
QUOTES_TTL = 7 * 24 * 60 * 60
QUOTES_PER_KW = 5
QUOTES_KWS_NUM = 3
MAX_QUOTES = 5
ENG_DATA_URL = "http://cf.datawrapper.de/Ndpz7/2/data.csv"
ENG_DATA_DIR = "/tmp/eng_data/"
MAX_CONTENT_LENGTH = MAX_IMAGES_PER_REQUEST * DEFAULT_MAX_IMG_BYTES
| true | true |
f7f493fa99a53ff4aca67b8dcc658ead713cf656 | 289 | py | Python | solutions/1556_thousand_separator.py | YiqunPeng/leetcode_pro | 7e6376984f9baec49a5e827d98330fe3d1b656f0 | [
"MIT"
] | null | null | null | solutions/1556_thousand_separator.py | YiqunPeng/leetcode_pro | 7e6376984f9baec49a5e827d98330fe3d1b656f0 | [
"MIT"
] | null | null | null | solutions/1556_thousand_separator.py | YiqunPeng/leetcode_pro | 7e6376984f9baec49a5e827d98330fe3d1b656f0 | [
"MIT"
] | null | null | null | class Solution:
def thousandSeparator(self, n: int) -> str:
"""String.
"""
res = ''
n = str(n)
for i in range(len(n)):
res += n[i]
if i != len(n) - 1 and (len(n) - i - 1) % 3 == 0:
res += '.'
return res
| 24.083333 | 61 | 0.377163 | class Solution:
def thousandSeparator(self, n: int) -> str:
"""String.
"""
res = ''
n = str(n)
for i in range(len(n)):
res += n[i]
if i != len(n) - 1 and (len(n) - i - 1) % 3 == 0:
res += '.'
return res
| false | true |
f7f494c1677525090b9e250e933973c395f7f18c | 10,267 | py | Python | storage/emulated/0/qpython/lib/python3.2/site-packages/requests/packages/urllib3/util/retry.py | wangkaibiao/SettlersFinancialData3 | 498249e14f24bfa3186f07e8f66ee624d08c6ff1 | [
"MIT"
] | null | null | null | storage/emulated/0/qpython/lib/python3.2/site-packages/requests/packages/urllib3/util/retry.py | wangkaibiao/SettlersFinancialData3 | 498249e14f24bfa3186f07e8f66ee624d08c6ff1 | [
"MIT"
] | null | null | null | storage/emulated/0/qpython/lib/python3.2/site-packages/requests/packages/urllib3/util/retry.py | wangkaibiao/SettlersFinancialData3 | 498249e14f24bfa3186f07e8f66ee624d08c6ff1 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
import time
import logging
from ..exceptions import (
ConnectTimeoutError,
MaxRetryError,
ProtocolError,
ReadTimeoutError,
ResponseError,
)
from ..packages import six
log = logging.getLogger(__name__)
class Retry(object):
""" Retry configuration.
Each retry attempt will create a new Retry object with updated values, so
they can be safely reused.
Retries can be defined as a default for a pool::
retries = Retry(connect=5, read=2, redirect=5)
http = PoolManager(retries=retries)
response = http.request('GET', 'http://example.com/')
Or per-request (which overrides the default for the pool)::
response = http.request('GET', 'http://example.com/', retries=Retry(10))
Retries can be disabled by passing ``False``::
response = http.request('GET', 'http://example.com/', retries=False)
Errors will be wrapped in :class:`~urllib3.exceptions.MaxRetryError` unless
retries are disabled, in which case the causing exception will be raised.
:param int total:
Total number of retries to allow. Takes precedence over other counts.
Set to ``None`` to remove this constraint and fall back on other
counts. It's a good idea to set this to some sensibly-high value to
account for unexpected edge cases and avoid infinite retry loops.
Set to ``0`` to fail on the first retry.
Set to ``False`` to disable and imply ``raise_on_redirect=False``.
:param int connect:
How many connection-related errors to retry on.
These are errors raised before the request is sent to the remote server,
which we assume has not triggered the server to process the request.
Set to ``0`` to fail on the first retry of this type.
:param int read:
How many times to retry on read errors.
These errors are raised after the request was sent to the server, so the
request may have side-effects.
Set to ``0`` to fail on the first retry of this type.
:param int redirect:
How many redirects to perform. Limit this to avoid infinite redirect
loops.
A redirect is a HTTP response with a status code 301, 302, 303, 307 or
308.
Set to ``0`` to fail on the first retry of this type.
Set to ``False`` to disable and imply ``raise_on_redirect=False``.
:param iterable method_whitelist:
Set of uppercased HTTP method verbs that we should retry on.
By default, we only retry on methods which are considered to be
indempotent (multiple requests with the same parameters end with the
same state). See :attr:`Retry.DEFAULT_METHOD_WHITELIST`.
:param iterable status_forcelist:
A set of HTTP status codes that we should force a retry on.
By default, this is disabled with ``None``.
:param float backoff_factor:
A backoff factor to apply between attempts. urllib3 will sleep for::
{backoff factor} * (2 ^ ({number of total retries} - 1))
seconds. If the backoff_factor is 0.1, then :func:`.sleep` will sleep
for [0.1s, 0.2s, 0.4s, ...] between retries. It will never be longer
than :attr:`Retry.BACKOFF_MAX`.
By default, backoff is disabled (set to 0).
:param bool raise_on_redirect: Whether, if the number of redirects is
exhausted, to raise a MaxRetryError, or to return a response with a
response code in the 3xx range.
"""
DEFAULT_METHOD_WHITELIST = frozenset([
'HEAD', 'GET', 'PUT', 'DELETE', 'OPTIONS', 'TRACE'])
#: Maximum backoff time.
BACKOFF_MAX = 120
def __init__(self, total=10, connect=None, read=None, redirect=None,
method_whitelist=DEFAULT_METHOD_WHITELIST, status_forcelist=None,
backoff_factor=0, raise_on_redirect=True, _observed_errors=0):
self.total = total
self.connect = connect
self.read = read
if redirect is False or total is False:
redirect = 0
raise_on_redirect = False
self.redirect = redirect
self.status_forcelist = status_forcelist or set()
self.method_whitelist = method_whitelist
self.backoff_factor = backoff_factor
self.raise_on_redirect = raise_on_redirect
self._observed_errors = _observed_errors # TODO: use .history instead?
def new(self, **kw):
params = dict(
total=self.total,
connect=self.connect, read=self.read, redirect=self.redirect,
method_whitelist=self.method_whitelist,
status_forcelist=self.status_forcelist,
backoff_factor=self.backoff_factor,
raise_on_redirect=self.raise_on_redirect,
_observed_errors=self._observed_errors,
)
params.update(kw)
return type(self)(**params)
@classmethod
def from_int(cls, retries, redirect=True, default=None):
""" Backwards-compatibility for the old retries format."""
if retries is None:
retries = default if default is not None else cls.DEFAULT
if isinstance(retries, Retry):
return retries
redirect = bool(redirect) and None
new_retries = cls(retries, redirect=redirect)
log.debug("Converted retries value: %r -> %r" % (retries, new_retries))
return new_retries
def get_backoff_time(self):
""" Formula for computing the current backoff
:rtype: float
"""
if self._observed_errors <= 1:
return 0
backoff_value = self.backoff_factor * (2 ** (self._observed_errors - 1))
return min(self.BACKOFF_MAX, backoff_value)
def sleep(self):
""" Sleep between retry attempts using an exponential backoff.
By default, the backoff factor is 0 and this method will return
immediately.
"""
backoff = self.get_backoff_time()
if backoff <= 0:
return
time.sleep(backoff)
def _is_connection_error(self, err):
""" Errors when we're fairly sure that the server did not receive the
request, so it should be safe to retry.
"""
return isinstance(err, ConnectTimeoutError)
def _is_read_error(self, err):
""" Errors that occur after the request has been started, so we should
assume that the server began processing it.
"""
return isinstance(err, (ReadTimeoutError, ProtocolError))
def is_forced_retry(self, method, status_code):
""" Is this method/status code retryable? (Based on method/codes whitelists)
"""
if self.method_whitelist and method.upper() not in self.method_whitelist:
return False
return self.status_forcelist and status_code in self.status_forcelist
def is_exhausted(self):
""" Are we out of retries? """
retry_counts = (self.total, self.connect, self.read, self.redirect)
retry_counts = list(filter(None, retry_counts))
if not retry_counts:
return False
return min(retry_counts) < 0
def increment(self, method=None, url=None, response=None, error=None,
_pool=None, _stacktrace=None):
""" Return a new Retry object with incremented retry counters.
:param response: A response object, or None, if the server did not
return a response.
:type response: :class:`~urllib3.response.HTTPResponse`
:param Exception error: An error encountered during the request, or
None if the response was received successfully.
:return: A new ``Retry`` object.
"""
if self.total is False and error:
# Disabled, indicate to re-raise the error.
raise six.reraise(type(error), error, _stacktrace)
total = self.total
if total is not None:
total -= 1
_observed_errors = self._observed_errors
connect = self.connect
read = self.read
redirect = self.redirect
cause = 'unknown'
if error and self._is_connection_error(error):
# Connect retry?
if connect is False:
raise six.reraise(type(error), error, _stacktrace)
elif connect is not None:
connect -= 1
_observed_errors += 1
elif error and self._is_read_error(error):
# Read retry?
if read is False:
raise six.reraise(type(error), error, _stacktrace)
elif read is not None:
read -= 1
_observed_errors += 1
elif response and response.get_redirect_location():
# Redirect retry?
if redirect is not None:
redirect -= 1
cause = 'too many redirects'
else:
# Incrementing because of a server error like a 500 in
# status_forcelist and a the given method is in the whitelist
_observed_errors += 1
cause = ResponseError.GENERIC_ERROR
if response and response.status:
cause = ResponseError.SPECIFIC_ERROR.format(
status_code=response.status)
new_retry = self.new(
total=total,
connect=connect, read=read, redirect=redirect,
_observed_errors=_observed_errors)
if new_retry.is_exhausted():
raise MaxRetryError(_pool, url, error or ResponseError(cause))
log.debug("Incremented Retry for (url='%s'): %r" % (url, new_retry))
return new_retry
def __repr__(self):
return ('{cls.__name__}(total={self.total}, connect={self.connect}, '
'read={self.read}, redirect={self.redirect})').format(
cls=type(self), self=self)
# For backwards compatibility (equivalent to pre-v1.9):
Retry.DEFAULT = Retry(3)
| 35.773519 | 85 | 0.615175 | from __future__ import absolute_import
import time
import logging
from ..exceptions import (
ConnectTimeoutError,
MaxRetryError,
ProtocolError,
ReadTimeoutError,
ResponseError,
)
from ..packages import six
log = logging.getLogger(__name__)
class Retry(object):
DEFAULT_METHOD_WHITELIST = frozenset([
'HEAD', 'GET', 'PUT', 'DELETE', 'OPTIONS', 'TRACE'])
BACKOFF_MAX = 120
def __init__(self, total=10, connect=None, read=None, redirect=None,
method_whitelist=DEFAULT_METHOD_WHITELIST, status_forcelist=None,
backoff_factor=0, raise_on_redirect=True, _observed_errors=0):
self.total = total
self.connect = connect
self.read = read
if redirect is False or total is False:
redirect = 0
raise_on_redirect = False
self.redirect = redirect
self.status_forcelist = status_forcelist or set()
self.method_whitelist = method_whitelist
self.backoff_factor = backoff_factor
self.raise_on_redirect = raise_on_redirect
self._observed_errors = _observed_errors
def new(self, **kw):
params = dict(
total=self.total,
connect=self.connect, read=self.read, redirect=self.redirect,
method_whitelist=self.method_whitelist,
status_forcelist=self.status_forcelist,
backoff_factor=self.backoff_factor,
raise_on_redirect=self.raise_on_redirect,
_observed_errors=self._observed_errors,
)
params.update(kw)
return type(self)(**params)
@classmethod
def from_int(cls, retries, redirect=True, default=None):
if retries is None:
retries = default if default is not None else cls.DEFAULT
if isinstance(retries, Retry):
return retries
redirect = bool(redirect) and None
new_retries = cls(retries, redirect=redirect)
log.debug("Converted retries value: %r -> %r" % (retries, new_retries))
return new_retries
def get_backoff_time(self):
if self._observed_errors <= 1:
return 0
backoff_value = self.backoff_factor * (2 ** (self._observed_errors - 1))
return min(self.BACKOFF_MAX, backoff_value)
def sleep(self):
backoff = self.get_backoff_time()
if backoff <= 0:
return
time.sleep(backoff)
def _is_connection_error(self, err):
return isinstance(err, ConnectTimeoutError)
def _is_read_error(self, err):
return isinstance(err, (ReadTimeoutError, ProtocolError))
def is_forced_retry(self, method, status_code):
if self.method_whitelist and method.upper() not in self.method_whitelist:
return False
return self.status_forcelist and status_code in self.status_forcelist
def is_exhausted(self):
retry_counts = (self.total, self.connect, self.read, self.redirect)
retry_counts = list(filter(None, retry_counts))
if not retry_counts:
return False
return min(retry_counts) < 0
def increment(self, method=None, url=None, response=None, error=None,
_pool=None, _stacktrace=None):
if self.total is False and error:
raise six.reraise(type(error), error, _stacktrace)
total = self.total
if total is not None:
total -= 1
_observed_errors = self._observed_errors
connect = self.connect
read = self.read
redirect = self.redirect
cause = 'unknown'
if error and self._is_connection_error(error):
if connect is False:
raise six.reraise(type(error), error, _stacktrace)
elif connect is not None:
connect -= 1
_observed_errors += 1
elif error and self._is_read_error(error):
if read is False:
raise six.reraise(type(error), error, _stacktrace)
elif read is not None:
read -= 1
_observed_errors += 1
elif response and response.get_redirect_location():
if redirect is not None:
redirect -= 1
cause = 'too many redirects'
else:
_observed_errors += 1
cause = ResponseError.GENERIC_ERROR
if response and response.status:
cause = ResponseError.SPECIFIC_ERROR.format(
status_code=response.status)
new_retry = self.new(
total=total,
connect=connect, read=read, redirect=redirect,
_observed_errors=_observed_errors)
if new_retry.is_exhausted():
raise MaxRetryError(_pool, url, error or ResponseError(cause))
log.debug("Incremented Retry for (url='%s'): %r" % (url, new_retry))
return new_retry
def __repr__(self):
return ('{cls.__name__}(total={self.total}, connect={self.connect}, '
'read={self.read}, redirect={self.redirect})').format(
cls=type(self), self=self)
Retry.DEFAULT = Retry(3)
| true | true |
f7f49512c92e6a844967653ccbe4a65c43cf4d4e | 790 | py | Python | meta/generate-docs-index.py | ukpds/ontologies | 46ca9c5e2c476dec9e8fddd37daebbef2719a2c0 | [
"OML"
] | 1 | 2016-11-09T00:38:06.000Z | 2016-11-09T00:38:06.000Z | meta/generate-docs-index.py | ukpds/ontologies | 46ca9c5e2c476dec9e8fddd37daebbef2719a2c0 | [
"OML"
] | null | null | null | meta/generate-docs-index.py | ukpds/ontologies | 46ca9c5e2c476dec9e8fddd37daebbef2719a2c0 | [
"OML"
] | null | null | null | import os
from datetime import datetime
from pathlib import Path
from jinja2 import Environment, FileSystemLoader, select_autoescape
from markupsafe import Markup
from urllib.parse import urlparse
def stemonly(pathvalue):
return Markup(pathvalue.stem)
env = Environment(
loader=FileSystemLoader("./meta/templates"),
autoescape=select_autoescape(),
cache_size=0,
trim_blocks=True,
lstrip_blocks=True,
)
env.filters["stemonly"] = stemonly
template = env.get_template("ontologies-index.html")
htmlfiles = list(Path(".").glob("./meta/html/*/*.html"))
indexpath = "./meta/html/index.html"
with open(indexpath, "w") as indexfile:
print(" Writing " + indexpath)
indexfile.write(
template.render(
htmlfiles=htmlfiles,
)
)
| 20.789474 | 67 | 0.705063 | import os
from datetime import datetime
from pathlib import Path
from jinja2 import Environment, FileSystemLoader, select_autoescape
from markupsafe import Markup
from urllib.parse import urlparse
def stemonly(pathvalue):
return Markup(pathvalue.stem)
env = Environment(
loader=FileSystemLoader("./meta/templates"),
autoescape=select_autoescape(),
cache_size=0,
trim_blocks=True,
lstrip_blocks=True,
)
env.filters["stemonly"] = stemonly
template = env.get_template("ontologies-index.html")
htmlfiles = list(Path(".").glob("./meta/html/*/*.html"))
indexpath = "./meta/html/index.html"
with open(indexpath, "w") as indexfile:
print(" Writing " + indexpath)
indexfile.write(
template.render(
htmlfiles=htmlfiles,
)
)
| true | true |
f7f4952d8d2d6c3b53ecda8af91d722c5251464a | 1,188 | py | Python | students/migrations/0001_initial.py | SoftwareSecureGroup/students_website | 79403878a0158f56e168ec8d5f43bfa6af1ec86a | [
"MIT"
] | null | null | null | students/migrations/0001_initial.py | SoftwareSecureGroup/students_website | 79403878a0158f56e168ec8d5f43bfa6af1ec86a | [
"MIT"
] | 1 | 2016-09-17T15:45:35.000Z | 2016-09-17T15:45:35.000Z | students/migrations/0001_initial.py | SoftwareSecureGroup/students_website | 79403878a0158f56e168ec8d5f43bfa6af1ec86a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-09-15 14:42
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Admin',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('admin_id', models.CharField(max_length=10)),
('password', models.CharField(max_length=64, null=True)),
],
),
migrations.CreateModel(
name='Student',
fields=[
('student_id', models.CharField(max_length=20, primary_key=True, serialize=False)),
('name', models.CharField(max_length=20)),
('gender', models.CharField(choices=[('M', 'Male'), ('F', 'Female')], max_length=1)),
('grade', models.CharField(max_length=10)),
('photo', models.CharField(max_length=20, null=True)),
('password', models.CharField(max_length=64)),
],
),
]
| 33.942857 | 114 | 0.558923 |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Admin',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('admin_id', models.CharField(max_length=10)),
('password', models.CharField(max_length=64, null=True)),
],
),
migrations.CreateModel(
name='Student',
fields=[
('student_id', models.CharField(max_length=20, primary_key=True, serialize=False)),
('name', models.CharField(max_length=20)),
('gender', models.CharField(choices=[('M', 'Male'), ('F', 'Female')], max_length=1)),
('grade', models.CharField(max_length=10)),
('photo', models.CharField(max_length=20, null=True)),
('password', models.CharField(max_length=64)),
],
),
]
| true | true |
f7f4957c7ff53f1ff1469f8d139d4270f1828c07 | 14,623 | py | Python | tests/filecheck/math.filecheck.py | konstin/jax | c3581a221842c09dc1b2f301012c3a01734f6b43 | [
"Apache-2.0"
] | 1 | 2022-03-18T17:32:13.000Z | 2022-03-18T17:32:13.000Z | tests/filecheck/math.filecheck.py | konstin/jax | c3581a221842c09dc1b2f301012c3a01734f6b43 | [
"Apache-2.0"
] | 2 | 2022-02-14T05:19:04.000Z | 2022-03-28T04:40:54.000Z | tests/filecheck/math.filecheck.py | yotarok/jax | f7df3ee9c4221a202959e67816d485c35eb98102 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Tests for lowerings of elementwise ops to MHLO.
# RUN: %PYTHON %s | FileCheck %s
from absl import app
from functools import partial
import jax
from jax import numpy as jnp
from jax import lax
import numpy as np
from jax.tests.filecheck.jax_filecheck_helpers import print_ir
jax.config.update("jax_enable_mlir", True)
jax.config.update("jax_enable_x64", True)
def main(_):
# CHECK-LABEL: TEST: abs int32[]
# CHECK: mhlo.abs
# CHECK-SAME: tensor<i32>
print_ir(np.int32(0))(lax.abs)
# CHECK-LABEL: TEST: add float32[] float32[]
# CHECK: mhlo.add
# CHECK-SAME: tensor<f32>
print_ir(np.float32(1), np.float32(2))(lax.add)
# CHECK-LABEL: TEST: acos float32[]
# CHECK: mhlo.atan2
# CHECK-SAME: tensor<f32>
print_ir(np.float32(1))(lax.acos)
# CHECK-LABEL: TEST: acosh float32[]
# CHECK: xla_fallback_acosh
# CHECK-SAME: tensor<f32>
print_ir(np.float32(0))(lax.acosh)
# CHECK-LABEL: TEST: asin float32[]
# CHECK: mhlo.atan2
# CHECK-SAME: tensor<f32>
print_ir(np.float32(1))(lax.asin)
# CHECK-LABEL: TEST: asinh float32[]
# CHECK: xla_fallback_asinh
# CHECK-SAME: tensor<f32>
print_ir(np.float32(0))(lax.asinh)
# CHECK-LABEL: TEST: atan float32[]
# CHECK: mhlo.atan2
# CHECK-SAME: tensor<f32>
print_ir(np.float32(1))(lax.atan)
# CHECK-LABEL: TEST: atanh float32[]
# CHECK: xla_fallback_atanh
# CHECK-SAME: tensor<f32>
print_ir(np.float32(0))(lax.atanh)
# CHECK-LABEL: TEST: atan2 float64[] float64[]
# CHECK: mhlo.atan2
# CHECK-SAME: tensor<f64>
print_ir(np.float64(1), np.float64(2))(lax.atan2)
# CHECK-LABEL: TEST: bessel_i0e float32[]
# CHECK: xla_fallback_bessel_i0e
# CHECK-SAME: tensor<f32>
print_ir(np.float32(0))(lax.bessel_i0e)
# CHECK-LABEL: TEST: bessel_i1e float32[]
# CHECK: xla_fallback_bessel_i1e
# CHECK-SAME: tensor<f32>
print_ir(np.float32(0))(lax.bessel_i1e)
# CHECK-LABEL: TEST: betainc float32[] float32[] float32[]
# CHECK: xla_fallback_regularized_incomplete_beta
# CHECK-SAME: tensor<f32>
print_ir(np.float32(0), np.float32(0), np.float32(0))(lax.betainc)
# CHECK-LABEL: TEST: bitcast_convert_type uint32[7]
# CHECK: mhlo.bitcast_convert
# CHECK-SAME: tensor<7xui32>
# CHECK-SAME: tensor<7xf32>
print_ir(np.empty((7,), np.uint32))(
partial(lax.bitcast_convert_type, new_dtype=np.float32))
# CHECK-LABEL: TEST: bitwise_and int32[] int32[]
# CHECK: mhlo.and
# CHECK-SAME: tensor<i32>
print_ir(np.int32(1), np.int32(2))(lax.bitwise_and)
# CHECK-LABEL: TEST: bitwise_and bool[] bool[]
# CHECK: mhlo.and
# CHECK-SAME: tensor<i1>
print_ir(np.bool_(0), np.bool_(0))(lax.bitwise_and)
# CHECK-LABEL: TEST: bitwise_or int32[] int32[]
# CHECK: mhlo.or
# CHECK-SAME: tensor<i32>
print_ir(np.int32(1), np.int32(2))(lax.bitwise_or)
# CHECK-LABEL: TEST: bitwise_or bool[] bool[]
# CHECK: mhlo.or
# CHECK-SAME: tensor<i1>
print_ir(np.bool_(0), np.bool_(0))(lax.bitwise_or)
# CHECK-LABEL: TEST: bitwise_xor int32[] int32[]
# CHECK: mhlo.xor
# CHECK-SAME: tensor<i32>
print_ir(np.int32(1), np.int32(2))(lax.bitwise_xor)
# CHECK-LABEL: TEST: bitwise_xor bool[] bool[]
# CHECK: mhlo.xor
# CHECK-SAME: tensor<i1>
print_ir(np.bool_(0), np.bool_(0))(lax.bitwise_xor)
# CHECK-LABEL: TEST: cbrt bfloat16[]
# CHECK: mhlo.cbrt
# CHECK-SAME: tensor<bf16>
print_ir(jnp.bfloat16(0))(lax.cbrt)
# CHECK-LABEL: TEST: clamp bfloat16[] bfloat16[] bfloat16[]
# CHECK: mhlo.clamp
# CHECK-SAME: tensor<bf16>
print_ir(jnp.bfloat16(0), jnp.bfloat16(0), jnp.bfloat16(0))(lax.clamp)
# CHECK-LABEL: TEST: ceil float16[7]
# CHECK: mhlo.ceil
# CHECK-SAME: tensor<7xf16>
print_ir(np.empty((7,), np.float16))(lax.ceil)
# CHECK-LABEL: TEST: convert_element_type float16[7]
# CHECK: mhlo.convert
# CHECK-SAME: tensor<7xf16>
# CHECK-SAME: tensor<7xf32>
print_ir(np.empty((7,), np.float16))(
partial(lax.convert_element_type, new_dtype=np.float32))
# CHECK-LABEL: TEST: convert_element_type complex64[7]
# CHECK: mhlo.real
# CHECK-SAME: tensor<7xcomplex<f32>>
# CHECK-SAME: tensor<7xf32>
print_ir(np.empty((7,), np.complex64))(
partial(lax.convert_element_type, new_dtype=np.float32))
# CHECK-LABEL: TEST: convert_element_type float32[7]
# CHECK: mhlo.compare
# CHECK-SAME: tensor<7xf32>
# CHECK-SAME: tensor<7xi1>
print_ir(np.empty((7,), np.float32))(
partial(lax.convert_element_type, new_dtype=np.bool_))
# CHECK-LABEL: TEST: clz uint32[]
# CHECK: mhlo.count_leading_zeros
# CHECK-SAME: tensor<ui32>
print_ir(np.uint32(0))(lax.clz)
# CHECK-LABEL: TEST: conj complex64[]
# CHECK-DAG: mhlo.real
# CHECK-DAG: mhlo.imag
# CHECK-DAG: mhlo.neg
# CHECK-DAG: mhlo.complex
# CHECK-SAME: tensor<complex<f32>>
print_ir(np.complex64(0))(lax.conj)
# CHECK-LABEL: TEST: cos float32[]
# CHECK: mhlo.cos
# CHECK-SAME: tensor<f32>
print_ir(np.float32(0))(lax.cos)
# CHECK-LABEL: TEST: cosh float32[]
# CHECK: xla_fallback_cosh
# CHECK-SAME: tensor<f32>
print_ir(np.float32(0))(lax.cosh)
# CHECK-LABEL: TEST: digamma float32[]
# CHECK: chlo.digamma
# CHECK-SAME: tensor<f32>
print_ir(np.float32(0))(lax.digamma)
# CHECK-LABEL: TEST: div float32[] float32[]
# CHECK: mhlo.div
# CHECK-SAME: tensor<f32>
print_ir(np.float32(1), np.float32(2))(lax.div)
# CHECK-LABEL: TEST: eq float32[] float32[]
# CHECK: mhlo.compare
# CHECK-SAME: compare_type = #mhlo<"comparison_type FLOAT">
# CHECK-SAME: comparison_direction = #mhlo<"comparison_direction EQ">
# CHECK-SAME: tensor<f32>
print_ir(np.float32(1), np.float32(2))(lax.eq)
# CHECK-LABEL: TEST: eq complex128[] complex128[]
# CHECK: mhlo.compare
# CHECK-SAME: compare_type = #mhlo<"comparison_type FLOAT">
# CHECK-SAME: comparison_direction = #mhlo<"comparison_direction EQ">
# CHECK-SAME: tensor<complex<f64>>
print_ir(np.complex128(1), np.complex128(2))(lax.eq)
# CHECK-LABEL: TEST: eq int64[] int64[]
# CHECK: mhlo.compare
# CHECK-SAME: compare_type = #mhlo<"comparison_type SIGNED">
# CHECK-SAME: comparison_direction = #mhlo<"comparison_direction EQ">
# CHECK-SAME: tensor<i64>
print_ir(np.int64(1), np.int64(2))(lax.eq)
# CHECK-LABEL: TEST: eq uint16[] uint16[]
# CHECK: mhlo.compare
# CHECK-SAME: compare_type = #mhlo<"comparison_type UNSIGNED">
# CHECK-SAME: comparison_direction = #mhlo<"comparison_direction EQ">
# CHECK-SAME: tensor<ui16>
print_ir(np.uint16(1), np.uint16(2))(lax.eq)
# CHECK-LABEL: TEST: erf float32[]
# CHECK: xla_fallback_erf
# CHECK-SAME: tensor<f32>
print_ir(np.float32(0))(lax.erf)
# CHECK-LABEL: TEST: erfc float32[]
# CHECK: xla_fallback_erfc
# CHECK-SAME: tensor<f32>
print_ir(np.float32(0))(lax.erfc)
# CHECK-LABEL: TEST: erf_inv float32[]
# CHECK: xla_fallback_erf_inv
# CHECK-SAME: tensor<f32>
print_ir(np.float32(0))(lax.erf_inv)
# CHECK-LABEL: TEST: exp float16[]
# CHECK: mhlo.exp
# CHECK-SAME: tensor<f16>
print_ir(np.float16(0))(lax.exp)
# CHECK-LABEL: TEST: expm1 bfloat16[]
# CHECK: mhlo.exponential_minus_one
# CHECK-SAME: tensor<bf16>
print_ir(jnp.bfloat16(0))(lax.expm1)
# CHECK-LABEL: TEST: floor bfloat16[2,3]
# CHECK: mhlo.floor
# CHECK-SAME: tensor<2x3xbf16>
print_ir(np.empty((2, 3), jnp.bfloat16))(lax.floor)
# CHECK-LABEL: TEST: ge float32[] float32[]
# CHECK: mhlo.compare
# CHECK-SAME: compare_type = #mhlo<"comparison_type FLOAT">
# CHECK-SAME: comparison_direction = #mhlo<"comparison_direction GE">
# CHECK-SAME: tensor<f32>
print_ir(np.float32(1), np.float32(2))(lax.ge)
# CHECK-LABEL: TEST: gt float32[] float32[]
# CHECK: mhlo.compare
# CHECK-SAME: compare_type = #mhlo<"comparison_type FLOAT">
# CHECK-SAME: comparison_direction = #mhlo<"comparison_direction GT">
# CHECK-SAME: tensor<f32>
print_ir(np.float32(1), np.float32(2))(lax.gt)
# CHECK-LABEL: TEST: igamma float32[] float32[]
# CHECK: xla_fallback_igamma
# CHECK-SAME: tensor<f32>
print_ir(np.float32(0), np.float32(0))(lax.igamma)
# CHECK-LABEL: TEST: igammac float32[] float32[]
# CHECK: xla_fallback_igammac
# CHECK-SAME: tensor<f32>
print_ir(np.float32(0), np.float32(0))(lax.igammac)
# CHECK-LABEL: TEST: igamma_grad_a float32[] float32[]
# CHECK: xla_fallback_igamma_grad_a
# CHECK-SAME: tensor<f32>
print_ir(np.float32(0), np.float32(0))(lax.igamma_grad_a)
# CHECK-LABEL: TEST: imag complex64[]
# CHECK: mhlo.imag
# CHECK-SAME: tensor<complex<f32>>
print_ir(np.complex64(0))(lax.imag)
# CHECK-LABEL: TEST: integer_pow float32[]
# CHECK-DAG: mhlo.mul
# CHECK-SAME: tensor<f32>
@print_ir(np.float32(1))
def integer_pow(x): return lax.integer_pow(x, 3)
# CHECK-LABEL: TEST: is_finite float64[]
# CHECK: mhlo.is_finite
# CHECK-SAME: tensor<f64>
print_ir(np.float64(0))(lax.is_finite)
# CHECK-LABEL: TEST: le float32[] float32[]
# CHECK: mhlo.compare
# CHECK-SAME: compare_type = #mhlo<"comparison_type FLOAT">
# CHECK-SAME: comparison_direction = #mhlo<"comparison_direction LE">
# CHECK-SAME: tensor<f32>
print_ir(np.float32(1), np.float32(2))(lax.le)
# CHECK-LABEL: TEST: lgamma float32[]
# CHECK: chlo.lgamma
# CHECK-SAME: tensor<f32>
print_ir(np.float32(0))(lax.lgamma)
# CHECK-LABEL: TEST: log float32[]
# CHECK: mhlo.log
# CHECK-SAME: tensor<f32>
print_ir(np.float32(0))(lax.log)
# CHECK-LABEL: TEST: log1p float32[]
# CHECK: mhlo.log_plus_one
# CHECK-SAME: tensor<f32>
print_ir(np.float32(0))(lax.log1p)
# CHECK-LABEL: TEST: lt float32[] float32[]
# CHECK: mhlo.compare
# CHECK-SAME: compare_type = #mhlo<"comparison_type FLOAT">
# CHECK-SAME: comparison_direction = #mhlo<"comparison_direction LT">
# CHECK-SAME: tensor<f32>
print_ir(np.float32(1), np.float32(2))(lax.lt)
# CHECK-LABEL: TEST: max float32[] float32[]
# CHECK: mhlo.max
# CHECK-SAME: tensor<f32>
print_ir(np.float32(1), np.float32(2))(lax.max)
# CHECK-LABEL: TEST: min float32[] float32[]
# CHECK: mhlo.min
# CHECK-SAME: tensor<f32>
print_ir(np.float32(1), np.float32(2))(lax.min)
# CHECK-LABEL: TEST: mul float32[] float32[]
# CHECK: mhlo.mul
# CHECK-SAME: tensor<f32>
print_ir(np.float32(1), np.float32(2))(lax.mul)
# CHECK-LABEL: TEST: ne float32[] float32[]
# CHECK: mhlo.compare
# CHECK-SAME: compare_type = #mhlo<"comparison_type FLOAT">
# CHECK-SAME: comparison_direction = #mhlo<"comparison_direction NE">
# CHECK-SAME: tensor<f32>
print_ir(np.float32(1), np.float32(2))(lax.ne)
# CHECK-LABEL: TEST: neg int64[]
# CHECK: mhlo.negate
# CHECK-SAME: tensor<i64>
print_ir(np.int64(0))(lax.neg)
# CHECK-LABEL: TEST: nextafter float32[] float32[]
# CHECK: chlo.next_after
# CHECK-SAME: tensor<f32>
print_ir(np.float32(0), np.float32(0))(lax.nextafter)
# CHECK-LABEL: TEST: bitwise_not int64[]
# CHECK: mhlo.not
# CHECK-SAME: tensor<i64>
print_ir(np.int64(0))(lax.bitwise_not)
# CHECK-LABEL: TEST: bitwise_not bool[]
# CHECK: mhlo.not
# CHECK-SAME: tensor<i1>
print_ir(np.bool_(0))(lax.bitwise_not)
# CHECK-LABEL: TEST: population_count uint32[]
# CHECK: mhlo.popcnt
# CHECK-SAME: tensor<ui32>
print_ir(np.uint32(0))(lax.population_count)
# CHECK-LABEL: TEST: pow float32[] float32[]
# CHECK: mhlo.power
# CHECK-SAME: tensor<f32>
print_ir(np.float32(1), np.float32(2))(lax.pow)
# CHECK-LABEL: TEST: random_gamma_grad float32[] float32[]
# CHECK: xla_fallback_random_gamma_grad
# CHECK-SAME: tensor<f32>
print_ir(np.float32(0), np.float32(0))(lax.random_gamma_grad)
# CHECK-LABEL: TEST: real complex128[]
# CHECK: mhlo.real
# CHECK-SAME: tensor<complex<f64>>
print_ir(np.complex128(0))(lax.real)
# CHECK-LABEL: TEST: reduce_precision bfloat16[]
# CHECK: mhlo.reduce_precision
# CHECK-SAME: tensor<bf16>
print_ir(jnp.bfloat16(0))(
partial(lax.reduce_precision, exponent_bits=2, mantissa_bits=2))
# CHECK-LABEL: TEST: rem float32[] float32[]
# CHECK: mhlo.rem
# CHECK-SAME: tensor<f32>
print_ir(np.float32(1), np.float32(2))(lax.rem)
# CHECK-LABEL: TEST: round float64[7,1]
# CHECK: mhlo.round
# CHECK-SAME: tensor<7x1xf64>
print_ir(np.empty((7,1), np.float64))(
partial(lax.round, rounding_method=lax.RoundingMethod.AWAY_FROM_ZERO))
# CHECK-LABEL: TEST: rsqrt complex64[]
# CHECK: mhlo.rsqrt
# CHECK-SAME: tensor<complex<f32>>
print_ir(jnp.complex64(0))(lax.rsqrt)
# CHECK-LABEL: TEST: shift_left uint32[] uint32[]
# CHECK: mhlo.shift_left
# CHECK-SAME: tensor<ui32>
print_ir(np.uint32(0), np.uint32(0))(lax.shift_left)
# CHECK-LABEL: TEST: shift_right_arithmetic uint8[] uint8[]
# CHECK: mhlo.shift_right_arithmetic
# CHECK-SAME: tensor<ui8>
print_ir(np.uint8(0), np.uint8(0))(lax.shift_right_arithmetic)
# CHECK-LABEL: TEST: shift_right_logical uint16[] uint16[]
# CHECK: mhlo.shift_right_logical
# CHECK-SAME: tensor<ui16>
print_ir(np.uint16(0), np.uint16(0))(lax.shift_right_logical)
# CHECK-LABEL: TEST: sign int64[]
# CHECK: mhlo.sign
# CHECK-SAME: tensor<i64>
print_ir(np.int64(0))(lax.sign)
# CHECK-LABEL: TEST: sign uint32[]
# CHECK: mhlo.compare
# CHECK-SAME: tensor<ui32>
print_ir(np.uint32(0))(lax.sign)
# CHECK-LABEL: TEST: sin float32[]
# CHECK: mhlo.sin
# CHECK-SAME: tensor<f32>
print_ir(np.float32(0))(lax.sin)
# CHECK-LABEL: TEST: sinh float32[]
# CHECK: xla_fallback_sinh
# CHECK-SAME: tensor<f32>
print_ir(np.float32(0))(lax.sinh)
# CHECK-LABEL: TEST: sub float32[] float32[]
# CHECK: mhlo.sub
# CHECK-SAME: tensor<f32>
print_ir(np.float32(1), np.float32(2))(lax.sub)
# CHECK-LABEL: TEST: sqrt bfloat16[]
# CHECK: mhlo.sqrt
# CHECK-SAME: tensor<bf16>
print_ir(jnp.bfloat16(0))(lax.sqrt)
# CHECK-LABEL: TEST: tan float16[]
# CHECK: mhlo.sine
# CHECK-SAME: tensor<f32>
# CHECK: mhlo.cosine
# CHECK-SAME: tensor<f32>
print_ir(np.float16(0))(lax.tan)
# CHECK-LABEL: TEST: tanh float32[]
# CHECK: mhlo.tanh
# CHECK-SAME: tensor<f32>
print_ir(np.float32(0))(lax.tanh)
if __name__ == "__main__":
app.run(main)
| 30.720588 | 76 | 0.689735 |
from absl import app
from functools import partial
import jax
from jax import numpy as jnp
from jax import lax
import numpy as np
from jax.tests.filecheck.jax_filecheck_helpers import print_ir
jax.config.update("jax_enable_mlir", True)
jax.config.update("jax_enable_x64", True)
def main(_):
print_ir(np.int32(0))(lax.abs)
print_ir(np.float32(1), np.float32(2))(lax.add)
print_ir(np.float32(1))(lax.acos)
print_ir(np.float32(0))(lax.acosh)
print_ir(np.float32(1))(lax.asin)
print_ir(np.float32(0))(lax.asinh)
print_ir(np.float32(1))(lax.atan)
print_ir(np.float32(0))(lax.atanh)
print_ir(np.float64(1), np.float64(2))(lax.atan2)
print_ir(np.float32(0))(lax.bessel_i0e)
print_ir(np.float32(0))(lax.bessel_i1e)
print_ir(np.float32(0), np.float32(0), np.float32(0))(lax.betainc)
print_ir(np.empty((7,), np.uint32))(
partial(lax.bitcast_convert_type, new_dtype=np.float32))
print_ir(np.int32(1), np.int32(2))(lax.bitwise_and)
print_ir(np.bool_(0), np.bool_(0))(lax.bitwise_and)
print_ir(np.int32(1), np.int32(2))(lax.bitwise_or)
print_ir(np.bool_(0), np.bool_(0))(lax.bitwise_or)
print_ir(np.int32(1), np.int32(2))(lax.bitwise_xor)
print_ir(np.bool_(0), np.bool_(0))(lax.bitwise_xor)
print_ir(jnp.bfloat16(0))(lax.cbrt)
print_ir(jnp.bfloat16(0), jnp.bfloat16(0), jnp.bfloat16(0))(lax.clamp)
print_ir(np.empty((7,), np.float16))(lax.ceil)
print_ir(np.empty((7,), np.float16))(
partial(lax.convert_element_type, new_dtype=np.float32))
print_ir(np.empty((7,), np.complex64))(
partial(lax.convert_element_type, new_dtype=np.float32))
print_ir(np.empty((7,), np.float32))(
partial(lax.convert_element_type, new_dtype=np.bool_))
print_ir(np.uint32(0))(lax.clz)
print_ir(np.complex64(0))(lax.conj)
print_ir(np.float32(0))(lax.cos)
print_ir(np.float32(0))(lax.cosh)
print_ir(np.float32(0))(lax.digamma)
print_ir(np.float32(1), np.float32(2))(lax.div)
rint_ir(np.float32(0))(lax.erf)
print_ir(np.float32(0))(lax.erfc)
print_ir(np.float32(0))(lax.erf_inv)
print_ir(np.float16(0))(lax.exp)
print_ir(jnp.bfloat16(0))(lax.expm1)
print_ir(np.empty((2, 3), jnp.bfloat16))(lax.floor)
print_ir(np.float32(0), np.float32(0))(lax.igamma)
print_ir(np.float32(0), np.float32(0))(lax.igammac)
print_ir(np.float32(0), np.float32(0))(lax.igamma_grad_a)
print_ir(np.complex64(0))(lax.imag)
@print_ir(np.float32(1))
def integer_pow(x): return lax.integer_pow(x, 3)
print_ir(np.float64(0))(lax.is_finite)
print_ir(np.float32(0))(lax.lgamma)
print_ir(np.float32(0))(lax.log)
print_ir(np.float32(0))(lax.log1p)
print_ir(np.float32(1), np.float32(2))(lax.max)
print_ir(np.float32(1), np.float32(2))(lax.min)
print_ir(np.float32(1), np.float32(2))(lax.mul)
print_ir(np.int64(0))(lax.neg)
print_ir(np.float32(0), np.float32(0))(lax.nextafter)
print_ir(np.int64(0))(lax.bitwise_not)
print_ir(np.bool_(0))(lax.bitwise_not)
print_ir(np.uint32(0))(lax.population_count)
print_ir(np.float32(1), np.float32(2))(lax.pow)
print_ir(np.float32(0), np.float32(0))(lax.random_gamma_grad)
print_ir(np.complex128(0))(lax.real)
print_ir(jnp.bfloat16(0))(
partial(lax.reduce_precision, exponent_bits=2, mantissa_bits=2))
print_ir(np.float32(1), np.float32(2))(lax.rem)
print_ir(np.empty((7,1), np.float64))(
partial(lax.round, rounding_method=lax.RoundingMethod.AWAY_FROM_ZERO))
print_ir(jnp.complex64(0))(lax.rsqrt)
print_ir(np.uint32(0), np.uint32(0))(lax.shift_left)
print_ir(np.uint8(0), np.uint8(0))(lax.shift_right_arithmetic)
print_ir(np.uint16(0), np.uint16(0))(lax.shift_right_logical)
print_ir(np.int64(0))(lax.sign)
print_ir(np.uint32(0))(lax.sign)
print_ir(np.float32(0))(lax.sin)
print_ir(np.float32(0))(lax.sinh)
print_ir(np.float32(1), np.float32(2))(lax.sub)
print_ir(jnp.bfloat16(0))(lax.sqrt)
print_ir(np.float16(0))(lax.tan)
print_ir(np.float32(0))(lax.tanh)
if __name__ == "__main__":
app.run(main)
| true | true |
f7f495fbab1aed24d7a26f51a90487121dd47172 | 6,331 | py | Python | optuna/storages/base.py | shikiponn/optuna | a151fafc4d816d9ba7d6740adf8892a7832f83a9 | [
"MIT"
] | 1 | 2019-01-16T23:59:31.000Z | 2019-01-16T23:59:31.000Z | optuna/storages/base.py | shikiponn/optuna | a151fafc4d816d9ba7d6740adf8892a7832f83a9 | [
"MIT"
] | null | null | null | optuna/storages/base.py | shikiponn/optuna | a151fafc4d816d9ba7d6740adf8892a7832f83a9 | [
"MIT"
] | 1 | 2022-01-24T11:42:24.000Z | 2022-01-24T11:42:24.000Z | import abc
import numpy as np
import six
from typing import Any # NOQA
from typing import Dict # NOQA
from typing import List # NOQA
from typing import Optional # NOQA
from typing import Tuple # NOQA
from optuna import distributions # NOQA
from optuna import structs # NOQA
DEFAULT_STUDY_NAME_PREFIX = 'no-name-'
@six.add_metaclass(abc.ABCMeta)
class BaseStorage(object):
"""Base class for storages.
This class is not supposed to be directly accessed by library users.
Storage classes abstract a backend database and provide library internal interfaces to
read/write history of studies and trials.
"""
# Basic study manipulation
@abc.abstractmethod
def create_new_study_id(self, study_name=None):
# type: (Optional[str]) -> int
raise NotImplementedError
@abc.abstractmethod
def set_study_user_attr(self, study_id, key, value):
# type: (int, str, Any) -> None
raise NotImplementedError
@ abc.abstractmethod
def set_study_direction(self, study_id, direction):
# type: (int, structs.StudyDirection) -> None
raise NotImplementedError
@abc.abstractmethod
def set_study_system_attr(self, study_id, key, value):
# type: (int, str, Any) -> None
raise NotImplementedError
# Basic study access
@abc.abstractmethod
def get_study_id_from_name(self, study_name):
# type: (str) -> int
raise NotImplementedError
@abc.abstractmethod
def get_study_name_from_id(self, study_id):
# type: (int) -> str
raise NotImplementedError
@ abc.abstractmethod
def get_study_direction(self, study_id):
# type: (int) -> structs.StudyDirection
raise NotImplementedError
@abc.abstractmethod
def get_study_user_attrs(self, study_id):
# type: (int) -> Dict[str, Any]
raise NotImplementedError
@abc.abstractmethod
def get_study_system_attrs(self, study_id):
# type: (int) -> Dict[str, Any]
raise NotImplementedError
@abc.abstractmethod
def get_all_study_summaries(self):
# type: () -> List[structs.StudySummary]
raise NotImplementedError
# Basic trial manipulation
@abc.abstractmethod
def create_new_trial_id(self, study_id):
# type: (int) -> int
raise NotImplementedError
@abc.abstractmethod
def set_trial_state(self, trial_id, state):
# type: (int, structs.TrialState) -> None
raise NotImplementedError
@abc.abstractmethod
def set_trial_param(self, trial_id, param_name, param_value_internal, distribution):
# type: (int, str, float, distributions.BaseDistribution) -> bool
raise NotImplementedError
@abc.abstractmethod
def get_trial_param(self, trial_id, param_name):
# type: (int, str) -> float
raise NotImplementedError
@abc.abstractmethod
def set_trial_value(self, trial_id, value):
# type: (int, float) -> None
raise NotImplementedError
@abc.abstractmethod
def set_trial_intermediate_value(self, trial_id, step, intermediate_value):
# type: (int, int, float) -> bool
raise NotImplementedError
@abc.abstractmethod
def set_trial_user_attr(self, trial_id, key, value):
# type: (int, str, Any) -> None
raise NotImplementedError
@abc.abstractmethod
def set_trial_system_attr(self, trial_id, key, value):
# type: (int, str, Any) -> None
raise NotImplementedError
# Basic trial access
@abc.abstractmethod
def get_trial(self, trial_id):
# type: (int) -> structs.FrozenTrial
raise NotImplementedError
@abc.abstractmethod
def get_all_trials(self, study_id):
# type: (int) -> List[structs.FrozenTrial]
raise NotImplementedError
@abc.abstractmethod
def get_n_trials(self, study_id, state=None):
# type: (int, Optional[structs.TrialState]) -> int
raise NotImplementedError
def get_best_trial(self, study_id):
# type: (int) -> structs.FrozenTrial
all_trials = self.get_all_trials(study_id)
all_trials = [t for t in all_trials if t.state is structs.TrialState.COMPLETE]
if len(all_trials) == 0:
raise ValueError('No trials are completed yet.')
# TODO(sano): Deal with maximize direction.
return min(all_trials, key=lambda t: t.value)
def get_trial_params(self, trial_id):
# type: (int) -> Dict[str, Any]
return self.get_trial(trial_id).params
def get_trial_user_attrs(self, trial_id):
# type: (int) -> Dict[str, Any]
return self.get_trial(trial_id).user_attrs
def get_trial_system_attrs(self, trial_id):
# type: (int) -> Dict[str, Any]
return self.get_trial(trial_id).system_attrs
# Methods for the TPE sampler
def get_trial_param_result_pairs(self, study_id, param_name):
# type: (int, str) -> List[Tuple[float, float]]
# Be careful: this method returns param values in internal representation
all_trials = self.get_all_trials(study_id)
return [
(t.params_in_internal_repr[param_name], t.value)
for t in all_trials
if (t.value is not None and
param_name in t.params and
t.state is structs.TrialState.COMPLETE)
# TODO(Akiba): We also want to use pruned results
]
# Methods for the median pruner
def get_best_intermediate_result_over_steps(self, trial_id):
# type: (int) -> float
return np.nanmin(np.array(
list(self.get_trial(trial_id).intermediate_values.values()),
np.float))
def get_median_intermediate_result_over_trials(self, study_id, step):
# type: (int, int) -> float
all_trials = [t for t in self.get_all_trials(study_id)
if t.state == structs.TrialState.COMPLETE]
if len(all_trials) == 0:
raise ValueError("No trials have been completed.")
return float(np.nanmedian(np.array([
t.intermediate_values[step] for t in all_trials
if step in t.intermediate_values
], np.float)))
def remove_session(self):
# type: () -> None
pass
| 27.406926 | 90 | 0.652977 | import abc
import numpy as np
import six
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
from typing import Tuple
from optuna import distributions
from optuna import structs
DEFAULT_STUDY_NAME_PREFIX = 'no-name-'
@six.add_metaclass(abc.ABCMeta)
class BaseStorage(object):
@abc.abstractmethod
def create_new_study_id(self, study_name=None):
raise NotImplementedError
@abc.abstractmethod
def set_study_user_attr(self, study_id, key, value):
raise NotImplementedError
@ abc.abstractmethod
def set_study_direction(self, study_id, direction):
raise NotImplementedError
@abc.abstractmethod
def set_study_system_attr(self, study_id, key, value):
raise NotImplementedError
@abc.abstractmethod
def get_study_id_from_name(self, study_name):
raise NotImplementedError
@abc.abstractmethod
def get_study_name_from_id(self, study_id):
raise NotImplementedError
@ abc.abstractmethod
def get_study_direction(self, study_id):
raise NotImplementedError
@abc.abstractmethod
def get_study_user_attrs(self, study_id):
raise NotImplementedError
@abc.abstractmethod
def get_study_system_attrs(self, study_id):
raise NotImplementedError
@abc.abstractmethod
def get_all_study_summaries(self):
raise NotImplementedError
@abc.abstractmethod
def create_new_trial_id(self, study_id):
raise NotImplementedError
@abc.abstractmethod
def set_trial_state(self, trial_id, state):
raise NotImplementedError
@abc.abstractmethod
def set_trial_param(self, trial_id, param_name, param_value_internal, distribution):
raise NotImplementedError
@abc.abstractmethod
def get_trial_param(self, trial_id, param_name):
raise NotImplementedError
@abc.abstractmethod
def set_trial_value(self, trial_id, value):
raise NotImplementedError
@abc.abstractmethod
def set_trial_intermediate_value(self, trial_id, step, intermediate_value):
raise NotImplementedError
@abc.abstractmethod
def set_trial_user_attr(self, trial_id, key, value):
raise NotImplementedError
@abc.abstractmethod
def set_trial_system_attr(self, trial_id, key, value):
raise NotImplementedError
@abc.abstractmethod
def get_trial(self, trial_id):
raise NotImplementedError
@abc.abstractmethod
def get_all_trials(self, study_id):
raise NotImplementedError
@abc.abstractmethod
def get_n_trials(self, study_id, state=None):
raise NotImplementedError
def get_best_trial(self, study_id):
all_trials = self.get_all_trials(study_id)
all_trials = [t for t in all_trials if t.state is structs.TrialState.COMPLETE]
if len(all_trials) == 0:
raise ValueError('No trials are completed yet.')
return min(all_trials, key=lambda t: t.value)
def get_trial_params(self, trial_id):
return self.get_trial(trial_id).params
def get_trial_user_attrs(self, trial_id):
return self.get_trial(trial_id).user_attrs
def get_trial_system_attrs(self, trial_id):
return self.get_trial(trial_id).system_attrs
def get_trial_param_result_pairs(self, study_id, param_name):
all_trials = self.get_all_trials(study_id)
return [
(t.params_in_internal_repr[param_name], t.value)
for t in all_trials
if (t.value is not None and
param_name in t.params and
t.state is structs.TrialState.COMPLETE)
]
def get_best_intermediate_result_over_steps(self, trial_id):
return np.nanmin(np.array(
list(self.get_trial(trial_id).intermediate_values.values()),
np.float))
def get_median_intermediate_result_over_trials(self, study_id, step):
all_trials = [t for t in self.get_all_trials(study_id)
if t.state == structs.TrialState.COMPLETE]
if len(all_trials) == 0:
raise ValueError("No trials have been completed.")
return float(np.nanmedian(np.array([
t.intermediate_values[step] for t in all_trials
if step in t.intermediate_values
], np.float)))
def remove_session(self):
pass
| true | true |
f7f4968c66715212e8397add0cd94790f88ab497 | 5,992 | py | Python | fjord/feedback/south_migrations/0043_fix_android_browser.py | DESHRAJ/fjord | 8899b6286b23347c9b024334e61c33fe133e836d | [
"BSD-3-Clause"
] | null | null | null | fjord/feedback/south_migrations/0043_fix_android_browser.py | DESHRAJ/fjord | 8899b6286b23347c9b024334e61c33fe133e836d | [
"BSD-3-Clause"
] | null | null | null | fjord/feedback/south_migrations/0043_fix_android_browser.py | DESHRAJ/fjord | 8899b6286b23347c9b024334e61c33fe133e836d | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import os
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import DataMigration
from django.conf import settings
class Migration(DataMigration):
def forwards(self, orm):
rows = (
orm.Response.objects
.filter(browser='Firefox', browser_platform='Android')
.update(browser='Firefox for Android')
)
if not getattr(settings, 'TEST'):
print (os.path.basename(__file__), 'Updated {0} rows'.format(rows))
def backwards(self, orm):
rows = (
orm.Response.objects
.filter(browser='Firefox for Android', browser_platform='Android')
.update(browser='Firefox')
)
if not getattr(settings, 'TEST'):
print (os.path.basename(__file__), 'Updated {0} rows'.format(rows))
models = {
u'feedback.product': {
'Meta': {'object_name': 'Product'},
'browser_data_browser': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '100', 'blank': 'True'}),
'db_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_file': ('django.db.models.fields.CharField', [], {'default': "u'noimage.png'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'on_dashboard': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'on_picker': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'translation_system': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'})
},
u'feedback.response': {
'Meta': {'ordering': "['-created']", 'object_name': 'Response'},
'api': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'browser': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'browser_platform': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'browser_version': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'campaign': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '50', 'null': 'True', 'blank': 'True'}),
'channel': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '4', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'device': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'happy': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'locale': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}),
'manufacturer': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'product': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'rating': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'source': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'translated_description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'url': ('fjord.base.models.EnhancedURLField', [], {'max_length': '200', 'blank': 'True'}),
'user_agent': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'})
},
u'feedback.responsecontext': {
'Meta': {'object_name': 'ResponseContext'},
'data': ('fjord.base.models.JSONObjectField', [], {'default': '{}'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'opinion': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feedback.Response']"})
},
u'feedback.responseemail': {
'Meta': {'object_name': 'ResponseEmail'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'opinion': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feedback.Response']"})
},
u'feedback.responsepi': {
'Meta': {'object_name': 'ResponsePI'},
'data': ('fjord.base.models.JSONObjectField', [], {'default': '{}'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'opinion': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feedback.Response']"})
}
}
complete_apps = ['feedback']
symmetrical = True
| 63.744681 | 153 | 0.554907 |
import os
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import DataMigration
from django.conf import settings
class Migration(DataMigration):
def forwards(self, orm):
rows = (
orm.Response.objects
.filter(browser='Firefox', browser_platform='Android')
.update(browser='Firefox for Android')
)
if not getattr(settings, 'TEST'):
print (os.path.basename(__file__), 'Updated {0} rows'.format(rows))
def backwards(self, orm):
rows = (
orm.Response.objects
.filter(browser='Firefox for Android', browser_platform='Android')
.update(browser='Firefox')
)
if not getattr(settings, 'TEST'):
print (os.path.basename(__file__), 'Updated {0} rows'.format(rows))
models = {
u'feedback.product': {
'Meta': {'object_name': 'Product'},
'browser_data_browser': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '100', 'blank': 'True'}),
'db_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_file': ('django.db.models.fields.CharField', [], {'default': "u'noimage.png'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'on_dashboard': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'on_picker': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'translation_system': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'})
},
u'feedback.response': {
'Meta': {'ordering': "['-created']", 'object_name': 'Response'},
'api': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'browser': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'browser_platform': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'browser_version': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'campaign': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '50', 'null': 'True', 'blank': 'True'}),
'channel': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '4', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'device': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'happy': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'locale': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}),
'manufacturer': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'product': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'rating': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'source': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'translated_description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'url': ('fjord.base.models.EnhancedURLField', [], {'max_length': '200', 'blank': 'True'}),
'user_agent': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'})
},
u'feedback.responsecontext': {
'Meta': {'object_name': 'ResponseContext'},
'data': ('fjord.base.models.JSONObjectField', [], {'default': '{}'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'opinion': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feedback.Response']"})
},
u'feedback.responseemail': {
'Meta': {'object_name': 'ResponseEmail'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'opinion': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feedback.Response']"})
},
u'feedback.responsepi': {
'Meta': {'object_name': 'ResponsePI'},
'data': ('fjord.base.models.JSONObjectField', [], {'default': '{}'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'opinion': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feedback.Response']"})
}
}
complete_apps = ['feedback']
symmetrical = True
| true | true |
f7f497c39223e7705f5781c0b5fe0ceb076b8ff5 | 3,590 | py | Python | isso/tests/test_html.py | Konzertheld/isso | 2ba721790056b252f0752d535e174bd3ab607df2 | [
"MIT"
] | 4 | 2019-12-27T01:54:04.000Z | 2021-02-03T17:04:58.000Z | isso/tests/test_html.py | Konzertheld/isso | 2ba721790056b252f0752d535e174bd3ab607df2 | [
"MIT"
] | null | null | null | isso/tests/test_html.py | Konzertheld/isso | 2ba721790056b252f0752d535e174bd3ab607df2 | [
"MIT"
] | null | null | null | # -*- encoding: utf-8 -*-
import unittest
import textwrap
from isso import config
from isso.utils import html
class TestHTML(unittest.TestCase):
def test_markdown(self):
convert = html.Markdown(extensions=())
examples = [
("*Ohai!*", "<p><em>Ohai!</em></p>"),
("<em>Hi</em>", "<p><em>Hi</em></p>"),
("http://example.org/", '<p>http://example.org/</p>')]
for (input, expected) in examples:
self.assertEqual(convert(input), expected)
def test_markdown_extensions(self):
convert = html.Markdown(extensions=("strikethrough", "superscript"))
examples = [
("~~strike~~ through", "<p><del>strike</del> through</p>"),
("sup^(script)", "<p>sup<sup>script</sup></p>")]
for (input, expected) in examples:
self.assertEqual(convert(input), expected)
def test_github_flavoured_markdown(self):
convert = html.Markdown(extensions=("fenced-code", ))
# without lang
_in = textwrap.dedent("""\
Hello, World
```
#!/usr/bin/env python
print("Hello, World")""")
_out = textwrap.dedent("""\
<p>Hello, World</p>
<pre><code>#!/usr/bin/env python
print("Hello, World")
</code></pre>""")
self.assertEqual(convert(_in), _out)
# w/ lang
_in = textwrap.dedent("""\
Hello, World
```python
#!/usr/bin/env python
print("Hello, World")""")
_out = textwrap.dedent("""\
<p>Hello, World</p>
<pre><code class="python">#!/usr/bin/env python
print("Hello, World")
</code></pre>""")
def test_sanitizer(self):
sanitizer = html.Sanitizer(elements=[], attributes=[])
examples = [
('Look: <img src="..." />', 'Look: '),
('<a href="http://example.org/">Ha</a>',
['<a href="http://example.org/" rel="nofollow noopener">Ha</a>',
'<a rel="nofollow noopener" href="http://example.org/">Ha</a>']),
('<a href="sms:+1234567890">Ha</a>', '<a>Ha</a>'),
('<p style="visibility: hidden;">Test</p>', '<p>Test</p>'),
('<script>alert("Onoe")</script>', 'alert("Onoe")')]
for (input, expected) in examples:
if isinstance(expected, list):
self.assertIn(sanitizer.sanitize(input), expected)
else:
self.assertEqual(sanitizer.sanitize(input), expected)
def test_sanitizer_extensions(self):
sanitizer = html.Sanitizer(elements=["img"], attributes=["src"])
examples = [
('<img src="cat.gif" />', '<img src="cat.gif">'),
('<script src="doge.js"></script>', '')]
for (input, expected) in examples:
self.assertEqual(sanitizer.sanitize(input), expected)
def test_render(self):
conf = config.new({
"markup": {
"options": "autolink",
"flags": "",
"allowed-elements": "",
"allowed-attributes": ""
}
})
renderer = html.Markup(conf.section("markup")).render
self.assertIn(renderer("http://example.org/ and sms:+1234567890"),
['<p><a href="http://example.org/" rel="nofollow noopener">http://example.org/</a> and sms:+1234567890</p>',
'<p><a rel="nofollow noopener" href="http://example.org/">http://example.org/</a> and sms:+1234567890</p>'])
| 35.544554 | 131 | 0.512813 |
import unittest
import textwrap
from isso import config
from isso.utils import html
class TestHTML(unittest.TestCase):
def test_markdown(self):
convert = html.Markdown(extensions=())
examples = [
("*Ohai!*", "<p><em>Ohai!</em></p>"),
("<em>Hi</em>", "<p><em>Hi</em></p>"),
("http://example.org/", '<p>http://example.org/</p>')]
for (input, expected) in examples:
self.assertEqual(convert(input), expected)
def test_markdown_extensions(self):
convert = html.Markdown(extensions=("strikethrough", "superscript"))
examples = [
("~~strike~~ through", "<p><del>strike</del> through</p>"),
("sup^(script)", "<p>sup<sup>script</sup></p>")]
for (input, expected) in examples:
self.assertEqual(convert(input), expected)
def test_github_flavoured_markdown(self):
convert = html.Markdown(extensions=("fenced-code", ))
_in = textwrap.dedent("""\
Hello, World
```
#!/usr/bin/env python
print("Hello, World")""")
_out = textwrap.dedent("""\
<p>Hello, World</p>
<pre><code>#!/usr/bin/env python
print("Hello, World")
</code></pre>""")
self.assertEqual(convert(_in), _out)
_in = textwrap.dedent("""\
Hello, World
```python
#!/usr/bin/env python
print("Hello, World")""")
_out = textwrap.dedent("""\
<p>Hello, World</p>
<pre><code class="python">#!/usr/bin/env python
print("Hello, World")
</code></pre>""")
def test_sanitizer(self):
sanitizer = html.Sanitizer(elements=[], attributes=[])
examples = [
('Look: <img src="..." />', 'Look: '),
('<a href="http://example.org/">Ha</a>',
['<a href="http://example.org/" rel="nofollow noopener">Ha</a>',
'<a rel="nofollow noopener" href="http://example.org/">Ha</a>']),
('<a href="sms:+1234567890">Ha</a>', '<a>Ha</a>'),
('<p style="visibility: hidden;">Test</p>', '<p>Test</p>'),
('<script>alert("Onoe")</script>', 'alert("Onoe")')]
for (input, expected) in examples:
if isinstance(expected, list):
self.assertIn(sanitizer.sanitize(input), expected)
else:
self.assertEqual(sanitizer.sanitize(input), expected)
def test_sanitizer_extensions(self):
sanitizer = html.Sanitizer(elements=["img"], attributes=["src"])
examples = [
('<img src="cat.gif" />', '<img src="cat.gif">'),
('<script src="doge.js"></script>', '')]
for (input, expected) in examples:
self.assertEqual(sanitizer.sanitize(input), expected)
def test_render(self):
conf = config.new({
"markup": {
"options": "autolink",
"flags": "",
"allowed-elements": "",
"allowed-attributes": ""
}
})
renderer = html.Markup(conf.section("markup")).render
self.assertIn(renderer("http://example.org/ and sms:+1234567890"),
['<p><a href="http://example.org/" rel="nofollow noopener">http://example.org/</a> and sms:+1234567890</p>',
'<p><a rel="nofollow noopener" href="http://example.org/">http://example.org/</a> and sms:+1234567890</p>'])
| true | true |
f7f497c422aedc0ee5dd0263e662fc41984db5bf | 8,334 | py | Python | autoPyTorch/components/networks/feature/shapedmlpnet.py | thomascherickal/Auto-PyTorch | 9e25a3bdef8e836e63979229eef77830cd64bb53 | [
"BSD-3-Clause"
] | 1 | 2019-09-02T00:37:52.000Z | 2019-09-02T00:37:52.000Z | autoPyTorch/components/networks/feature/shapedmlpnet.py | thomascherickal/Auto-PyTorch | 9e25a3bdef8e836e63979229eef77830cd64bb53 | [
"BSD-3-Clause"
] | null | null | null | autoPyTorch/components/networks/feature/shapedmlpnet.py | thomascherickal/Auto-PyTorch | 9e25a3bdef8e836e63979229eef77830cd64bb53 | [
"BSD-3-Clause"
] | 1 | 2019-09-02T00:40:30.000Z | 2019-09-02T00:40:30.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Multilayer Perceptrons in fancy shapes.
"""
import ConfigSpace as CS
import ConfigSpace.hyperparameters as CSH
import torch.nn as nn
from autoPyTorch.components.networks.feature.mlpnet import MlpNet
__author__ = "Max Dippel, Michael Burkart and Matthias Urban"
__version__ = "0.0.1"
__license__ = "BSD"
class ShapedMlpNet(MlpNet):
def __init__(self, *args, **kwargs):
super(ShapedMlpNet, self).__init__(*args, **kwargs)
def _build_net(self, in_features, out_features):
layers = list()
neuron_counts = get_shaped_neuron_counts(self.config['mlp_shape'],
in_features,
out_features,
self.config['max_units'],
self.config['num_layers'])
if self.config["use_dropout"]:
dropout_shape = get_shaped_neuron_counts( self.config['dropout_shape'], 0, 0, 1000, self.config['num_layers'])
previous = in_features
for i in range(self.config['num_layers']-1):
if (i >= len(neuron_counts)):
break
dropout = dropout_shape[i] / 1000 * self.config["max_dropout"] if self.config["use_dropout"] else 0
self._add_layer(layers, previous, neuron_counts[i], dropout)
previous = neuron_counts[i]
layers.append(nn.Linear(previous, out_features))
return nn.Sequential(*layers)
def _add_layer(self, layers, in_features, out_features, dropout):
layers.append(nn.Linear(in_features, out_features))
layers.append(self.activation())
if self.config["use_dropout"]:
layers.append(nn.Dropout(dropout))
@staticmethod
def get_config_space(user_updates=None):
cs = CS.ConfigurationSpace()
range_num_layers=(1, 15)
range_max_num_units=(10, 1024)
possible_activations=('sigmoid', 'tanh', 'relu')
possible_net_shapes=('funnel', 'long_funnel', 'diamond', 'hexagon', 'brick', 'triangle', 'stairs')
possible_dropout_shapes=('funnel', 'long_funnel', 'diamond', 'hexagon', 'brick', 'triangle', 'stairs')
range_max_dropout=(0, 0.8)
layer_shape = CSH.CategoricalHyperparameter('mlp_shape', possible_net_shapes)
cs.add_hyperparameter(layer_shape)
num_layers = CSH.UniformIntegerHyperparameter('num_layers', lower=range_num_layers[0], upper=range_num_layers[1])
cs.add_hyperparameter(num_layers)
max_units = CSH.UniformIntegerHyperparameter("max_units", lower=range_max_num_units[0], upper=range_max_num_units[1], log=True)
cs.add_hyperparameter(max_units)
use_dropout = cs.add_hyperparameter(CS.CategoricalHyperparameter("use_dropout", [True, False], default_value=True))
dropout_shape = cs.add_hyperparameter(CSH.CategoricalHyperparameter('dropout_shape', possible_dropout_shapes))
max_dropout = cs.add_hyperparameter(CSH.UniformFloatHyperparameter("max_dropout", lower=range_max_dropout[0], upper=range_max_dropout[1], default_value=0.2))
cs.add_condition(CS.EqualsCondition(dropout_shape, use_dropout, True))
cs.add_condition(CS.EqualsCondition(max_dropout, use_dropout, True))
cs.add_hyperparameter(CSH.CategoricalHyperparameter('activation', possible_activations))
return(cs)
def get_shaped_neuron_counts(shape, in_feat, out_feat, max_neurons, layer_count):
counts = []
if (layer_count <= 0):
return counts
if (layer_count == 1):
counts.append(out_feat)
return counts
max_neurons = max(in_feat, max_neurons)
# https://mikkokotila.github.io/slate/#shapes
if shape == 'brick':
#
# | |
# | |
# | |
# | |
# | |
# |___ ___|
#
for _ in range(layer_count-1):
counts.append(max_neurons)
counts.append(out_feat)
if shape == 'triangle':
#
# / \
# / \
# / \
# / \
# / \
# /_____ _____\
#
previous = in_feat
step_size = int((max_neurons - previous) / (layer_count-1))
step_size = max(0, step_size)
for _ in range(layer_count-2):
previous = previous + step_size
counts.append(previous)
counts.append(max_neurons)
counts.append(out_feat)
if shape == 'funnel':
#
# \ /
# \ /
# \ /
# \ /
# \ /
# \ /
#
previous = max_neurons
counts.append(previous)
step_size = int((previous - out_feat) / (layer_count-1))
step_size = max(0, step_size)
for _ in range(layer_count-2):
previous = previous - step_size
counts.append(previous)
counts.append(out_feat)
if shape == 'long_funnel':
#
# | |
# | |
# | |
# \ /
# \ /
# \ /
#
brick_layer = int(layer_count / 2)
funnel_layer = layer_count - brick_layer
counts.extend(get_shaped_neuron_counts('brick', in_feat, max_neurons, max_neurons, brick_layer))
counts.extend(get_shaped_neuron_counts('funnel', in_feat, out_feat, max_neurons, funnel_layer))
if (len(counts) != layer_count):
print("\nWarning: long funnel layer count does not match " + str(layer_count) + " != " + str(len(counts)) + "\n")
if shape == 'diamond':
#
# / \
# / \
# / \
# \ /
# \ /
# \ /
#
triangle_layer = int(layer_count / 2) + 1
funnel_layer = layer_count - triangle_layer
counts.extend(get_shaped_neuron_counts('triangle', in_feat, max_neurons, max_neurons, triangle_layer))
remove_triangle_layer = len(counts) > 1
if (remove_triangle_layer):
counts = counts[0:-2] # remove the last two layers since max_neurons == out_features (-> two layers with the same size)
counts.extend(get_shaped_neuron_counts('funnel', max_neurons, out_feat, max_neurons, funnel_layer + (2 if remove_triangle_layer else 0)))
if (len(counts) != layer_count):
print("\nWarning: diamond layer count does not match " + str(layer_count) + " != " + str(len(counts)) + "\n")
if shape == 'hexagon':
#
# / \
# / \
# | |
# | |
# \ /
# \ /
#
triangle_layer = int(layer_count / 3) + 1
funnel_layer = triangle_layer
brick_layer = layer_count - triangle_layer - funnel_layer
counts.extend(get_shaped_neuron_counts('triangle', in_feat, max_neurons, max_neurons, triangle_layer))
counts.extend(get_shaped_neuron_counts('brick', max_neurons, max_neurons, max_neurons, brick_layer))
counts.extend(get_shaped_neuron_counts('funnel', max_neurons, out_feat, max_neurons, funnel_layer))
if (len(counts) != layer_count):
print("\nWarning: hexagon layer count does not match " + str(layer_count) + " != " + str(len(counts)) + "\n")
if shape == 'stairs':
#
# | |
# |_ _|
# | |
# |_ _|
# | |
# | |
#
previous = max_neurons
counts.append(previous)
if layer_count % 2 == 1:
counts.append(previous)
step_size = 2 * int((max_neurons - out_feat) / (layer_count-1))
step_size = max(0, step_size)
for _ in range(int(layer_count / 2 - 1)):
previous = previous - step_size
counts.append(previous)
counts.append(previous)
counts.append(out_feat)
if (len(counts) != layer_count):
print("\nWarning: stairs layer count does not match " + str(layer_count) + " != " + str(len(counts)) + "\n")
return counts
| 37.04 | 165 | 0.562275 |
import ConfigSpace as CS
import ConfigSpace.hyperparameters as CSH
import torch.nn as nn
from autoPyTorch.components.networks.feature.mlpnet import MlpNet
__author__ = "Max Dippel, Michael Burkart and Matthias Urban"
__version__ = "0.0.1"
__license__ = "BSD"
class ShapedMlpNet(MlpNet):
def __init__(self, *args, **kwargs):
super(ShapedMlpNet, self).__init__(*args, **kwargs)
def _build_net(self, in_features, out_features):
layers = list()
neuron_counts = get_shaped_neuron_counts(self.config['mlp_shape'],
in_features,
out_features,
self.config['max_units'],
self.config['num_layers'])
if self.config["use_dropout"]:
dropout_shape = get_shaped_neuron_counts( self.config['dropout_shape'], 0, 0, 1000, self.config['num_layers'])
previous = in_features
for i in range(self.config['num_layers']-1):
if (i >= len(neuron_counts)):
break
dropout = dropout_shape[i] / 1000 * self.config["max_dropout"] if self.config["use_dropout"] else 0
self._add_layer(layers, previous, neuron_counts[i], dropout)
previous = neuron_counts[i]
layers.append(nn.Linear(previous, out_features))
return nn.Sequential(*layers)
def _add_layer(self, layers, in_features, out_features, dropout):
layers.append(nn.Linear(in_features, out_features))
layers.append(self.activation())
if self.config["use_dropout"]:
layers.append(nn.Dropout(dropout))
@staticmethod
def get_config_space(user_updates=None):
cs = CS.ConfigurationSpace()
range_num_layers=(1, 15)
range_max_num_units=(10, 1024)
possible_activations=('sigmoid', 'tanh', 'relu')
possible_net_shapes=('funnel', 'long_funnel', 'diamond', 'hexagon', 'brick', 'triangle', 'stairs')
possible_dropout_shapes=('funnel', 'long_funnel', 'diamond', 'hexagon', 'brick', 'triangle', 'stairs')
range_max_dropout=(0, 0.8)
layer_shape = CSH.CategoricalHyperparameter('mlp_shape', possible_net_shapes)
cs.add_hyperparameter(layer_shape)
num_layers = CSH.UniformIntegerHyperparameter('num_layers', lower=range_num_layers[0], upper=range_num_layers[1])
cs.add_hyperparameter(num_layers)
max_units = CSH.UniformIntegerHyperparameter("max_units", lower=range_max_num_units[0], upper=range_max_num_units[1], log=True)
cs.add_hyperparameter(max_units)
use_dropout = cs.add_hyperparameter(CS.CategoricalHyperparameter("use_dropout", [True, False], default_value=True))
dropout_shape = cs.add_hyperparameter(CSH.CategoricalHyperparameter('dropout_shape', possible_dropout_shapes))
max_dropout = cs.add_hyperparameter(CSH.UniformFloatHyperparameter("max_dropout", lower=range_max_dropout[0], upper=range_max_dropout[1], default_value=0.2))
cs.add_condition(CS.EqualsCondition(dropout_shape, use_dropout, True))
cs.add_condition(CS.EqualsCondition(max_dropout, use_dropout, True))
cs.add_hyperparameter(CSH.CategoricalHyperparameter('activation', possible_activations))
return(cs)
def get_shaped_neuron_counts(shape, in_feat, out_feat, max_neurons, layer_count):
counts = []
if (layer_count <= 0):
return counts
if (layer_count == 1):
counts.append(out_feat)
return counts
max_neurons = max(in_feat, max_neurons)
f shape == 'brick':
for _ in range(layer_count-1):
counts.append(max_neurons)
counts.append(out_feat)
if shape == 'triangle':
previous = in_feat
step_size = int((max_neurons - previous) / (layer_count-1))
step_size = max(0, step_size)
for _ in range(layer_count-2):
previous = previous + step_size
counts.append(previous)
counts.append(max_neurons)
counts.append(out_feat)
if shape == 'funnel':
previous = max_neurons
counts.append(previous)
step_size = int((previous - out_feat) / (layer_count-1))
step_size = max(0, step_size)
for _ in range(layer_count-2):
previous = previous - step_size
counts.append(previous)
counts.append(out_feat)
if shape == 'long_funnel':
brick_layer = int(layer_count / 2)
funnel_layer = layer_count - brick_layer
counts.extend(get_shaped_neuron_counts('brick', in_feat, max_neurons, max_neurons, brick_layer))
counts.extend(get_shaped_neuron_counts('funnel', in_feat, out_feat, max_neurons, funnel_layer))
if (len(counts) != layer_count):
print("\nWarning: long funnel layer count does not match " + str(layer_count) + " != " + str(len(counts)) + "\n")
if shape == 'diamond':
triangle_layer = int(layer_count / 2) + 1
funnel_layer = layer_count - triangle_layer
counts.extend(get_shaped_neuron_counts('triangle', in_feat, max_neurons, max_neurons, triangle_layer))
remove_triangle_layer = len(counts) > 1
if (remove_triangle_layer):
counts = counts[0:-2]
counts.extend(get_shaped_neuron_counts('funnel', max_neurons, out_feat, max_neurons, funnel_layer + (2 if remove_triangle_layer else 0)))
if (len(counts) != layer_count):
print("\nWarning: diamond layer count does not match " + str(layer_count) + " != " + str(len(counts)) + "\n")
if shape == 'hexagon':
triangle_layer = int(layer_count / 3) + 1
funnel_layer = triangle_layer
brick_layer = layer_count - triangle_layer - funnel_layer
counts.extend(get_shaped_neuron_counts('triangle', in_feat, max_neurons, max_neurons, triangle_layer))
counts.extend(get_shaped_neuron_counts('brick', max_neurons, max_neurons, max_neurons, brick_layer))
counts.extend(get_shaped_neuron_counts('funnel', max_neurons, out_feat, max_neurons, funnel_layer))
if (len(counts) != layer_count):
print("\nWarning: hexagon layer count does not match " + str(layer_count) + " != " + str(len(counts)) + "\n")
if shape == 'stairs':
previous = max_neurons
counts.append(previous)
if layer_count % 2 == 1:
counts.append(previous)
step_size = 2 * int((max_neurons - out_feat) / (layer_count-1))
step_size = max(0, step_size)
for _ in range(int(layer_count / 2 - 1)):
previous = previous - step_size
counts.append(previous)
counts.append(previous)
counts.append(out_feat)
if (len(counts) != layer_count):
print("\nWarning: stairs layer count does not match " + str(layer_count) + " != " + str(len(counts)) + "\n")
return counts
| true | true |
f7f498d46d88a2fa701629d90e262b2a95c71211 | 1,128 | py | Python | dumpster.py | Dark-PRINCESS/Dark-PRINCESS- | 0ad9c67960c8f88745442d264fdcd113b9925807 | [
"MIT"
] | 1 | 2020-10-23T09:35:36.000Z | 2020-10-23T09:35:36.000Z | dumpster.py | Dark-PRINCESS/Dark-PRINCESS- | 0ad9c67960c8f88745442d264fdcd113b9925807 | [
"MIT"
] | null | null | null | dumpster.py | Dark-PRINCESS/Dark-PRINCESS- | 0ad9c67960c8f88745442d264fdcd113b9925807 | [
"MIT"
] | null | null | null | from telethon import events
import asyncio
from userbot.utils import admin_cmd
@borg.on(admin_cmd(pattern="dump ?(.*)"))
async def _(message):
try:
obj = message.pattern_match.group(1)
if len(obj) != 3:
raise IndexError
inp = ' '.join(obj)
except IndexError:
inp = "🥞 🎂 🍫"
u, t, g, o, s, n = inp.split(), '🗑', '<(^_^ <)', '(> ^_^)>', '⠀ ', '\n'
h = [(u[0], u[1], u[2]), (u[0], u[1], ''), (u[0], '', '')]
for something in reversed([y for y in ([''.join(x) for x in (
f + (s, g, s + s * f.count(''), t), f + (g, s * 2 + s * f.count(''), t),
f[:i] + (o, f[i], s * 2 + s * f.count(''), t), f[:i] + (s + s * f.count(''), o, f[i], s, t),
f[:i] + (s * 2 + s * f.count(''), o, f[i], t), f[:i] + (s * 3 + s * f.count(''), o, t),
f[:i] + (s * 3 + s * f.count(''), g, t))] for i, f in enumerate(reversed(h)))]):
for something_else in something:
await asyncio.sleep(0.3)
try:
await message.edit(something_else)
except errors.MessageIdInvalidError:
return
| 40.285714 | 97 | 0.446809 | from telethon import events
import asyncio
from userbot.utils import admin_cmd
@borg.on(admin_cmd(pattern="dump ?(.*)"))
async def _(message):
try:
obj = message.pattern_match.group(1)
if len(obj) != 3:
raise IndexError
inp = ' '.join(obj)
except IndexError:
inp = "🥞 🎂 🍫"
u, t, g, o, s, n = inp.split(), '🗑', '<(^_^ <)', '(> ^_^)>', '⠀ ', '\n'
h = [(u[0], u[1], u[2]), (u[0], u[1], ''), (u[0], '', '')]
for something in reversed([y for y in ([''.join(x) for x in (
f + (s, g, s + s * f.count(''), t), f + (g, s * 2 + s * f.count(''), t),
f[:i] + (o, f[i], s * 2 + s * f.count(''), t), f[:i] + (s + s * f.count(''), o, f[i], s, t),
f[:i] + (s * 2 + s * f.count(''), o, f[i], t), f[:i] + (s * 3 + s * f.count(''), o, t),
f[:i] + (s * 3 + s * f.count(''), g, t))] for i, f in enumerate(reversed(h)))]):
for something_else in something:
await asyncio.sleep(0.3)
try:
await message.edit(something_else)
except errors.MessageIdInvalidError:
return
| true | true |
f7f49ac27526cadce47c6bf488bc8e3e0b841214 | 66,594 | py | Python | samtranslator/swagger/swagger.py | JiteshKanojia/serverless-application-model | 034e7b8ac10d9c5aaa3a5f7999db57308ffc1a2f | [
"Apache-2.0"
] | null | null | null | samtranslator/swagger/swagger.py | JiteshKanojia/serverless-application-model | 034e7b8ac10d9c5aaa3a5f7999db57308ffc1a2f | [
"Apache-2.0"
] | null | null | null | samtranslator/swagger/swagger.py | JiteshKanojia/serverless-application-model | 034e7b8ac10d9c5aaa3a5f7999db57308ffc1a2f | [
"Apache-2.0"
] | null | null | null | import copy
import json
import re
from samtranslator.model.intrinsics import ref
from samtranslator.model.intrinsics import make_conditional, fnSub
from samtranslator.model.exceptions import InvalidDocumentException, InvalidTemplateException
from samtranslator.utils.py27hash_fix import Py27Dict, Py27UniStr
class SwaggerEditor(object):
"""
Wrapper class capable of parsing and generating Swagger JSON. This implements Swagger spec just enough that SAM
cares about. It is built to handle "partial Swagger" ie. Swagger that is incomplete and won't
pass the Swagger spec. But this is necessary for SAM because it iteratively builds the Swagger starting from an
empty skeleton.
NOTE (hawflau): To ensure the same logical ID will be generate in Py3 as in Py2 for AWS::Serverless::Api resource,
we have to apply py27hash_fix. For any dictionary that is created within the swagger body, we need to initiate it
with Py27Dict() instead of {}. We also need to add keys into the Py27Dict instance one by one, so that the input
order could be preserved. This is a must for the purpose of preserving the dict key iteration order, which is
essential for generating the same logical ID.
"""
_OPTIONS_METHOD = "options"
_X_APIGW_INTEGRATION = "x-amazon-apigateway-integration"
_X_APIGW_BINARY_MEDIA_TYPES = "x-amazon-apigateway-binary-media-types"
_CONDITIONAL_IF = "Fn::If"
_X_APIGW_GATEWAY_RESPONSES = "x-amazon-apigateway-gateway-responses"
_X_APIGW_POLICY = "x-amazon-apigateway-policy"
_X_ANY_METHOD = "x-amazon-apigateway-any-method"
_X_APIGW_REQUEST_VALIDATORS = "x-amazon-apigateway-request-validators"
_X_APIGW_REQUEST_VALIDATOR = "x-amazon-apigateway-request-validator"
_X_ENDPOINT_CONFIG = "x-amazon-apigateway-endpoint-configuration"
_CACHE_KEY_PARAMETERS = "cacheKeyParameters"
# https://www.w3.org/Protocols/rfc2616/rfc2616-sec9.html
_ALL_HTTP_METHODS = ["OPTIONS", "GET", "HEAD", "POST", "PUT", "DELETE", "PATCH"]
_EXCLUDED_PATHS_FIELDS = ["summary", "description", "parameters"]
_POLICY_TYPE_IAM = "Iam"
_POLICY_TYPE_IP = "Ip"
_POLICY_TYPE_VPC = "Vpc"
def __init__(self, doc):
"""
Initialize the class with a swagger dictionary. This class creates a copy of the Swagger and performs all
modifications on this copy.
:param dict doc: Swagger document as a dictionary
:raises ValueError: If the input Swagger document does not meet the basic Swagger requirements.
"""
if not SwaggerEditor.is_valid(doc):
raise ValueError("Invalid Swagger document")
self._doc = copy.deepcopy(doc)
self.paths = self._doc["paths"]
self.security_definitions = self._doc.get("securityDefinitions", Py27Dict())
self.gateway_responses = self._doc.get(self._X_APIGW_GATEWAY_RESPONSES, Py27Dict())
self.resource_policy = self._doc.get(self._X_APIGW_POLICY, Py27Dict())
self.definitions = self._doc.get("definitions", Py27Dict())
# https://swagger.io/specification/#path-item-object
# According to swagger spec,
# each path item object must be a dict (even it is empty).
# We can do an early path validation on path item objects,
# so we don't need to validate wherever we use them.
for path in self.iter_on_path():
SwaggerEditor.validate_path_item_is_dict(self.get_path(path), path)
def get_path(self, path):
path_dict = self.paths.get(path)
if isinstance(path_dict, dict) and self._CONDITIONAL_IF in path_dict:
path_dict = path_dict[self._CONDITIONAL_IF][1]
return path_dict
def has_path(self, path, method=None):
"""
Returns True if this Swagger has the given path and optional method
:param string path: Path name
:param string method: HTTP method
:return: True, if this path/method is present in the document
"""
method = self._normalize_method_name(method)
path_dict = self.get_path(path)
path_dict_exists = path_dict is not None
if method:
return path_dict_exists and method in path_dict
return path_dict_exists
def method_has_integration(self, method):
"""
Returns true if the given method contains a valid method definition.
This uses the get_method_contents function to handle conditionals.
:param dict method: method dictionary
:return: true if method has one or multiple integrations
"""
for method_definition in self.get_method_contents(method):
if self.method_definition_has_integration(method_definition):
return True
return False
def method_definition_has_integration(self, method_definition):
"""
Checks a method definition to make sure it has an apigw integration
:param dict method_definition: method definition dictionary
:return: True if an integration exists
"""
if method_definition.get(self._X_APIGW_INTEGRATION):
return True
return False
def get_method_contents(self, method):
"""
Returns the swagger contents of the given method. This checks to see if a conditional block
has been used inside of the method, and, if so, returns the method contents that are
inside of the conditional.
:param dict method: method dictionary
:return: list of swagger component dictionaries for the method
"""
if self._CONDITIONAL_IF in method:
return method[self._CONDITIONAL_IF][1:]
return [method]
def add_disable_execute_api_endpoint_extension(self, disable_execute_api_endpoint):
"""Add endpoint configuration to _X_APIGW_ENDPOINT_CONFIG in open api definition as extension
Following this guide:
https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-swagger-extensions-endpoint-configuration.html
:param boolean disable_execute_api_endpoint: Specifies whether clients can invoke your API by using the default execute-api endpoint.
"""
if not self._doc.get(self._X_ENDPOINT_CONFIG):
self._doc[self._X_ENDPOINT_CONFIG] = {}
DISABLE_EXECUTE_API_ENDPOINT = "disableExecuteApiEndpoint"
set_disable_api_endpoint = {DISABLE_EXECUTE_API_ENDPOINT: disable_execute_api_endpoint}
self._doc[self._X_ENDPOINT_CONFIG].update(set_disable_api_endpoint)
def has_integration(self, path, method):
"""
Checks if an API Gateway integration is already present at the given path/method
:param string path: Path name
:param string method: HTTP method
:return: True, if an API Gateway integration is already present
"""
method = self._normalize_method_name(method)
path_dict = self.get_path(path)
return (
self.has_path(path, method)
and isinstance(path_dict[method], dict)
and self.method_has_integration(path_dict[method])
) # Integration present and non-empty
def add_path(self, path, method=None):
"""
Adds the path/method combination to the Swagger, if not already present
:param string path: Path name
:param string method: HTTP method
:raises ValueError: If the value of `path` in Swagger is not a dictionary
"""
method = self._normalize_method_name(method)
path_dict = self.paths.setdefault(path, Py27Dict())
SwaggerEditor.validate_path_item_is_dict(path_dict, path)
if self._CONDITIONAL_IF in path_dict:
path_dict = path_dict[self._CONDITIONAL_IF][1]
path_dict.setdefault(method, Py27Dict())
def add_lambda_integration(
self, path, method, integration_uri, method_auth_config=None, api_auth_config=None, condition=None
):
"""
Adds aws_proxy APIGW integration to the given path+method.
:param string path: Path name
:param string method: HTTP Method
:param string integration_uri: URI for the integration.
"""
method = self._normalize_method_name(method)
if self.has_integration(path, method):
raise ValueError("Lambda integration already exists on Path={}, Method={}".format(path, method))
self.add_path(path, method)
# Wrap the integration_uri in a Condition if one exists on that function
# This is necessary so CFN doesn't try to resolve the integration reference.
if condition:
integration_uri = make_conditional(condition, integration_uri)
path_dict = self.get_path(path)
path_dict[method][self._X_APIGW_INTEGRATION] = Py27Dict()
# insert key one by one to preserce input order
path_dict[method][self._X_APIGW_INTEGRATION]["type"] = "aws_proxy"
path_dict[method][self._X_APIGW_INTEGRATION]["httpMethod"] = "POST"
path_dict[method][self._X_APIGW_INTEGRATION]["uri"] = integration_uri
method_auth_config = method_auth_config or Py27Dict()
api_auth_config = api_auth_config or Py27Dict()
if (
method_auth_config.get("Authorizer") == "AWS_IAM"
or api_auth_config.get("DefaultAuthorizer") == "AWS_IAM"
and not method_auth_config
):
method_invoke_role = method_auth_config.get("InvokeRole")
if not method_invoke_role and "InvokeRole" in method_auth_config:
method_invoke_role = "NONE"
api_invoke_role = api_auth_config.get("InvokeRole")
if not api_invoke_role and "InvokeRole" in api_auth_config:
api_invoke_role = "NONE"
credentials = self._generate_integration_credentials(
method_invoke_role=method_invoke_role, api_invoke_role=api_invoke_role
)
if credentials and credentials != "NONE":
self.paths[path][method][self._X_APIGW_INTEGRATION]["credentials"] = credentials
# If 'responses' key is *not* present, add it with an empty dict as value
path_dict[method].setdefault("responses", Py27Dict())
# If a condition is present, wrap all method contents up into the condition
if condition:
path_dict[method] = make_conditional(condition, path_dict[method])
def add_state_machine_integration(
self,
path,
method,
integration_uri,
credentials,
request_templates=None,
condition=None,
):
"""
Adds aws APIGW integration to the given path+method.
:param string path: Path name
:param string method: HTTP Method
:param string integration_uri: URI for the integration
:param string credentials: Credentials for the integration
:param dict request_templates: A map of templates that are applied on the request payload.
:param bool condition: Condition for the integration
"""
method = self._normalize_method_name(method)
if self.has_integration(path, method):
raise ValueError("Integration already exists on Path={}, Method={}".format(path, method))
self.add_path(path, method)
# Wrap the integration_uri in a Condition if one exists on that state machine
# This is necessary so CFN doesn't try to resolve the integration reference.
if condition:
integration_uri = make_conditional(condition, integration_uri)
path_dict = self.get_path(path)
# Responses
integration_responses = Py27Dict()
# insert key one by one to preserce input order
integration_responses["200"] = Py27Dict({"statusCode": "200"})
integration_responses["400"] = Py27Dict({"statusCode": "400"})
default_method_responses = Py27Dict()
# insert key one by one to preserce input order
default_method_responses["200"] = Py27Dict({"description": "OK"})
default_method_responses["400"] = Py27Dict({"description": "Bad Request"})
path_dict[method][self._X_APIGW_INTEGRATION] = Py27Dict()
# insert key one by one to preserce input order
path_dict[method][self._X_APIGW_INTEGRATION]["type"] = "aws"
path_dict[method][self._X_APIGW_INTEGRATION]["httpMethod"] = "POST"
path_dict[method][self._X_APIGW_INTEGRATION]["uri"] = integration_uri
path_dict[method][self._X_APIGW_INTEGRATION]["responses"] = integration_responses
path_dict[method][self._X_APIGW_INTEGRATION]["credentials"] = credentials
# If 'responses' key is *not* present, add it with an empty dict as value
path_dict[method].setdefault("responses", default_method_responses)
if request_templates:
path_dict[method][self._X_APIGW_INTEGRATION].update({"requestTemplates": request_templates})
# If a condition is present, wrap all method contents up into the condition
if condition:
path_dict[method] = make_conditional(condition, path_dict[method])
def make_path_conditional(self, path, condition):
"""
Wrap entire API path definition in a CloudFormation if condition.
"""
self.paths[path] = make_conditional(condition, self.paths[path])
def _generate_integration_credentials(self, method_invoke_role=None, api_invoke_role=None):
return self._get_invoke_role(method_invoke_role or api_invoke_role)
def _get_invoke_role(self, invoke_role):
CALLER_CREDENTIALS_ARN = "arn:aws:iam::*:user/*"
return invoke_role if invoke_role and invoke_role != "CALLER_CREDENTIALS" else CALLER_CREDENTIALS_ARN
def iter_on_path(self):
"""
Yields all the paths available in the Swagger. As a caller, if you add new paths to Swagger while iterating,
they will not show up in this iterator
:yields string: Path name
"""
for path, value in self.paths.items():
yield path
def add_cors(
self, path, allowed_origins, allowed_headers=None, allowed_methods=None, max_age=None, allow_credentials=None
):
"""
Add CORS configuration to this path. Specifically, we will add a OPTIONS response config to the Swagger that
will return headers required for CORS. Since SAM uses aws_proxy integration, we cannot inject the headers
into the actual response returned from Lambda function. This is something customers have to implement
themselves.
If OPTIONS method is already present for the Path, we will skip adding CORS configuration
Following this guide:
https://docs.aws.amazon.com/apigateway/latest/developerguide/how-to-cors.html#enable-cors-for-resource-using-swagger-importer-tool
:param string path: Path to add the CORS configuration to.
:param string/dict allowed_origins: Comma separate list of allowed origins.
Value can also be an intrinsic function dict.
:param string/dict allowed_headers: Comma separated list of allowed headers.
Value can also be an intrinsic function dict.
:param string/dict allowed_methods: Comma separated list of allowed methods.
Value can also be an intrinsic function dict.
:param integer/dict max_age: Maximum duration to cache the CORS Preflight request. Value is set on
Access-Control-Max-Age header. Value can also be an intrinsic function dict.
:param bool/None allow_credentials: Flags whether request is allowed to contain credentials.
:raises ValueError: When values for one of the allowed_* variables is empty
"""
# Skip if Options is already present
if self.has_path(path, self._OPTIONS_METHOD):
return
if not allowed_origins:
raise InvalidTemplateException("Invalid input. Value for AllowedOrigins is required")
if not allowed_methods:
# AllowMethods is not given. Let's try to generate the list from the given Swagger.
allowed_methods = self._make_cors_allowed_methods_for_path(path)
# APIGW expects the value to be a "string expression". Hence wrap in another quote. Ex: "'GET,POST,DELETE'"
allowed_methods = "'{}'".format(allowed_methods)
if allow_credentials is not True:
allow_credentials = False
# Add the Options method and the CORS response
self.add_path(path, self._OPTIONS_METHOD)
self.get_path(path)[self._OPTIONS_METHOD] = self._options_method_response_for_cors(
allowed_origins, allowed_headers, allowed_methods, max_age, allow_credentials
)
def add_binary_media_types(self, binary_media_types):
"""
Args:
binary_media_types: list
"""
def replace_recursively(bmt):
"""replaces "~1" with "/" for the input binary_media_types recursively"""
if isinstance(bmt, dict):
to_return = Py27Dict()
for k, v in bmt.items():
to_return[Py27UniStr(k.replace("~1", "/"))] = replace_recursively(v)
return to_return
if isinstance(bmt, list):
return [replace_recursively(item) for item in bmt]
if isinstance(bmt, str) or isinstance(bmt, Py27UniStr):
return Py27UniStr(bmt.replace("~1", "/"))
return bmt
bmt = replace_recursively(binary_media_types)
self._doc[self._X_APIGW_BINARY_MEDIA_TYPES] = bmt
def _options_method_response_for_cors(
self, allowed_origins, allowed_headers=None, allowed_methods=None, max_age=None, allow_credentials=None
):
"""
Returns a Swagger snippet containing configuration for OPTIONS HTTP Method to configure CORS.
This snippet is taken from public documentation:
https://docs.aws.amazon.com/apigateway/latest/developerguide/how-to-cors.html#enable-cors-for-resource-using-swagger-importer-tool
:param string/dict allowed_origins: Comma separate list of allowed origins.
Value can also be an intrinsic function dict.
:param string/dict allowed_headers: Comma separated list of allowed headers.
Value can also be an intrinsic function dict.
:param string/dict allowed_methods: Comma separated list of allowed methods.
Value can also be an intrinsic function dict.
:param integer/dict max_age: Maximum duration to cache the CORS Preflight request. Value is set on
Access-Control-Max-Age header. Value can also be an intrinsic function dict.
:param bool allow_credentials: Flags whether request is allowed to contain credentials.
:return dict: Dictionary containing Options method configuration for CORS
"""
ALLOW_ORIGIN = "Access-Control-Allow-Origin"
ALLOW_HEADERS = "Access-Control-Allow-Headers"
ALLOW_METHODS = "Access-Control-Allow-Methods"
MAX_AGE = "Access-Control-Max-Age"
ALLOW_CREDENTIALS = "Access-Control-Allow-Credentials"
HEADER_RESPONSE = lambda x: "method.response.header." + x
response_parameters = Py27Dict(
{
# AllowedOrigin is always required
HEADER_RESPONSE(ALLOW_ORIGIN): allowed_origins
}
)
response_headers = Py27Dict(
{
# Allow Origin is always required
ALLOW_ORIGIN: {"type": "string"}
}
)
# Optional values. Skip the header if value is empty
#
# The values must not be empty string or null. Also, value of '*' is a very recent addition (2017) and
# not supported in all the browsers. So it is important to skip the header if value is not given
# https://fetch.spec.whatwg.org/#http-new-header-syntax
#
if allowed_headers:
response_parameters[HEADER_RESPONSE(ALLOW_HEADERS)] = allowed_headers
response_headers[ALLOW_HEADERS] = {"type": "string"}
if allowed_methods:
response_parameters[HEADER_RESPONSE(ALLOW_METHODS)] = allowed_methods
response_headers[ALLOW_METHODS] = {"type": "string"}
if max_age is not None:
# MaxAge can be set to 0, which is a valid value. So explicitly check against None
response_parameters[HEADER_RESPONSE(MAX_AGE)] = max_age
response_headers[MAX_AGE] = {"type": "integer"}
if allow_credentials is True:
# Allow-Credentials only has a valid value of true, it should be omitted otherwise.
# https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Allow-Credentials
response_parameters[HEADER_RESPONSE(ALLOW_CREDENTIALS)] = "'true'"
response_headers[ALLOW_CREDENTIALS] = {"type": "string"}
# construct snippet and insert key one by one to preserce input order
to_return = Py27Dict()
to_return["summary"] = "CORS support"
to_return["consumes"] = ["application/json"]
to_return["produces"] = ["application/json"]
to_return[self._X_APIGW_INTEGRATION] = Py27Dict()
to_return[self._X_APIGW_INTEGRATION]["type"] = "mock"
to_return[self._X_APIGW_INTEGRATION]["requestTemplates"] = {"application/json": '{\n "statusCode" : 200\n}\n'}
to_return[self._X_APIGW_INTEGRATION]["responses"] = Py27Dict()
to_return[self._X_APIGW_INTEGRATION]["responses"]["default"] = Py27Dict()
to_return[self._X_APIGW_INTEGRATION]["responses"]["default"]["statusCode"] = "200"
to_return[self._X_APIGW_INTEGRATION]["responses"]["default"]["responseParameters"] = response_parameters
to_return[self._X_APIGW_INTEGRATION]["responses"]["default"]["responseTemplates"] = {"application/json": "{}\n"}
to_return["responses"] = Py27Dict()
to_return["responses"]["200"] = Py27Dict()
to_return["responses"]["200"]["description"] = "Default response for CORS method"
to_return["responses"]["200"]["headers"] = response_headers
return to_return
def _make_cors_allowed_methods_for_path(self, path):
"""
Creates the value for Access-Control-Allow-Methods header for given path. All HTTP methods defined for this
path will be included in the result. If the path contains "ANY" method, then *all available* HTTP methods will
be returned as result.
:param string path: Path to generate AllowMethods value for
:return string: String containing the value of AllowMethods, if the path contains any methods.
Empty string, otherwise
"""
if not self.has_path(path):
return ""
# At this point, value of Swagger path should be a dictionary with method names being the keys
methods = list(self.get_path(path).keys())
if self._X_ANY_METHOD in methods:
# API Gateway's ANY method is not a real HTTP method but a wildcard representing all HTTP methods
allow_methods = self._ALL_HTTP_METHODS
else:
allow_methods = methods
allow_methods.append("options") # Always add Options to the CORS methods response
# Clean up the result:
#
# - HTTP Methods **must** be upper case and they are case sensitive.
# (https://tools.ietf.org/html/rfc7231#section-4.1)
# - Convert to set to remove any duplicates
# - Sort to keep this list stable because it could be constructed from dictionary keys which are *not* ordered.
# Therefore we might get back a different list each time the code runs. To prevent any unnecessary
# regression, we sort the list so the returned value is stable.
allow_methods = list({m.upper() for m in allow_methods})
allow_methods.sort()
# Allow-Methods is comma separated string
return ",".join(allow_methods)
def add_authorizers_security_definitions(self, authorizers):
"""
Add Authorizer definitions to the securityDefinitions part of Swagger.
:param list authorizers: List of Authorizer configurations which get translated to securityDefinitions.
"""
self.security_definitions = self.security_definitions or Py27Dict()
for authorizer_name, authorizer in authorizers.items():
self.security_definitions[authorizer_name] = authorizer.generate_swagger()
def add_awsiam_security_definition(self):
"""
Adds AWS_IAM definition to the securityDefinitions part of Swagger.
Note: this method is idempotent
"""
# construct aws_iam_security_definition as Py27Dict and insert key one by one to preserce input order
aws_iam_security_definition = Py27Dict()
aws_iam_security_definition["AWS_IAM"] = Py27Dict()
aws_iam_security_definition["AWS_IAM"]["x-amazon-apigateway-authtype"] = "awsSigv4"
aws_iam_security_definition["AWS_IAM"]["type"] = "apiKey"
aws_iam_security_definition["AWS_IAM"]["name"] = "Authorization"
aws_iam_security_definition["AWS_IAM"]["in"] = "header"
self.security_definitions = self.security_definitions or Py27Dict()
# Only add the security definition if it doesn't exist. This helps ensure
# that we minimize changes to the swagger in the case of user defined swagger
if "AWS_IAM" not in self.security_definitions:
self.security_definitions.update(aws_iam_security_definition)
def add_apikey_security_definition(self):
"""
Adds api_key definition to the securityDefinitions part of Swagger.
Note: this method is idempotent
"""
# construct api_key_security_definiton as py27 dict
# and insert keys one by one to preserve input order
api_key_security_definition = Py27Dict()
api_key_security_definition["api_key"] = Py27Dict()
api_key_security_definition["api_key"]["type"] = "apiKey"
api_key_security_definition["api_key"]["name"] = "x-api-key"
api_key_security_definition["api_key"]["in"] = "header"
self.security_definitions = self.security_definitions or Py27Dict()
# Only add the security definition if it doesn't exist. This helps ensure
# that we minimize changes to the swagger in the case of user defined swagger
if "api_key" not in self.security_definitions:
self.security_definitions.update(api_key_security_definition)
def set_path_default_authorizer(
self, path, default_authorizer, authorizers, add_default_auth_to_preflight=True, api_authorizers=None
):
"""
Adds the default_authorizer to the security block for each method on this path unless an Authorizer
was defined at the Function/Path/Method level. This is intended to be used to set the
authorizer security restriction for all api methods based upon the default configured in the
Serverless API.
:param string path: Path name
:param string default_authorizer: Name of the authorizer to use as the default. Must be a key in the
authorizers param.
:param list authorizers: List of Authorizer configurations defined on the related Api.
:param bool add_default_auth_to_preflight: Bool of whether to add the default
authorizer to OPTIONS preflight requests.
"""
for method_name, method in self.get_path(path).items():
normalized_method_name = self._normalize_method_name(method_name)
# Excluding non-method sections
if normalized_method_name in SwaggerEditor._EXCLUDED_PATHS_FIELDS:
continue
if add_default_auth_to_preflight or normalized_method_name != "options":
SwaggerEditor.validate_is_dict(
method,
'Value of "{}" ({}) for path {} is not a valid dictionary.'.format(method_name, method, path),
)
# It is possible that the method could have two definitions in a Fn::If block.
for method_definition in self.get_method_contents(method):
SwaggerEditor.validate_is_dict(
method_definition,
'Value of "{}" ({}) for path {} is not a valid dictionary.'.format(
method_name, method_definition, path
),
)
# If no integration given, then we don't need to process this definition (could be AWS::NoValue)
if not self.method_definition_has_integration(method_definition):
continue
existing_security = method_definition.get("security", [])
authorizer_list = ["AWS_IAM"]
if authorizers:
authorizer_list.extend(authorizers.keys())
authorizer_names = set(authorizer_list)
existing_non_authorizer_security = []
existing_authorizer_security = []
# Split existing security into Authorizers and everything else
# (e.g. sigv4 (AWS_IAM), api_key (API Key/Usage Plans), NONE (marker for ignoring default))
# We want to ensure only a single Authorizer security entry exists while keeping everything else
for security in existing_security:
SwaggerEditor.validate_is_dict(
security, "{} in Security for path {} is not a valid dictionary.".format(security, path)
)
if authorizer_names.isdisjoint(security.keys()):
existing_non_authorizer_security.append(security)
else:
existing_authorizer_security.append(security)
none_idx = -1
authorizer_security = []
# Check for an existing Authorizer before applying the default. It would be simpler
# if instead we applied the DefaultAuthorizer first and then simply
# overwrote it if necessary, however, the order in which things get
# applied (Function Api Events first; then Api Resource) complicates it.
# Check if Function/Path/Method specified 'NONE' for Authorizer
for idx, security in enumerate(existing_non_authorizer_security):
is_none = any(key == "NONE" for key in security.keys())
if is_none:
none_idx = idx
break
# NONE was found; remove it and don't add the DefaultAuthorizer
if none_idx > -1:
del existing_non_authorizer_security[none_idx]
# Existing Authorizer found (defined at Function/Path/Method); use that instead of default
elif existing_authorizer_security:
authorizer_security = existing_authorizer_security
# No existing Authorizer found; use default
else:
security_dict = Py27Dict()
security_dict[default_authorizer] = self._get_authorization_scopes(
api_authorizers, default_authorizer
)
authorizer_security = [security_dict]
security = existing_non_authorizer_security + authorizer_security
if security:
method_definition["security"] = security
# The first element of the method_definition['security'] should be AWS_IAM
# because authorizer_list = ['AWS_IAM'] is hardcoded above
if "AWS_IAM" in method_definition["security"][0]:
self.add_awsiam_security_definition()
def set_path_default_apikey_required(self, path):
"""
Add the ApiKey security as required for each method on this path unless ApiKeyRequired
was defined at the Function/Path/Method level. This is intended to be used to set the
apikey security restriction for all api methods based upon the default configured in the
Serverless API.
:param string path: Path name
"""
for method_name, method in self.get_path(path).items():
# Excluding non-method sections
if method_name in SwaggerEditor._EXCLUDED_PATHS_FIELDS:
continue
# It is possible that the method could have two definitions in a Fn::If block.
for method_definition in self.get_method_contents(method):
# If no integration given, then we don't need to process this definition (could be AWS::NoValue)
if not self.method_definition_has_integration(method_definition):
continue
existing_security = method_definition.get("security", [])
apikey_security_names = set(["api_key", "api_key_false"])
existing_non_apikey_security = []
existing_apikey_security = []
apikey_security = []
# Split existing security into ApiKey and everything else
# (e.g. sigv4 (AWS_IAM), authorizers, NONE (marker for ignoring default authorizer))
# We want to ensure only a single ApiKey security entry exists while keeping everything else
for security in existing_security:
if apikey_security_names.isdisjoint(security.keys()):
existing_non_apikey_security.append(security)
else:
existing_apikey_security.append(security)
# Check for an existing method level ApiKey setting before applying the default. It would be simpler
# if instead we applied the default first and then simply
# overwrote it if necessary, however, the order in which things get
# applied (Function Api Events first; then Api Resource) complicates it.
# Check if Function/Path/Method specified 'False' for ApiKeyRequired
apikeyfalse_idx = -1
for idx, security in enumerate(existing_apikey_security):
is_none = any(key == "api_key_false" for key in security.keys())
if is_none:
apikeyfalse_idx = idx
break
# api_key_false was found; remove it and don't add default api_key security setting
if apikeyfalse_idx > -1:
del existing_apikey_security[apikeyfalse_idx]
# No existing ApiKey setting found or it's already set to the default
else:
security_dict = Py27Dict()
security_dict["api_key"] = []
apikey_security = [security_dict]
security = existing_non_apikey_security + apikey_security
if security != existing_security:
method_definition["security"] = security
def add_auth_to_method(self, path, method_name, auth, api):
"""
Adds auth settings for this path/method. Auth settings currently consist of Authorizers and ApiKeyRequired
but this method will eventually include setting other auth settings such as Resource Policy, etc.
This is used to configure the security for individual functions.
:param string path: Path name
:param string method_name: Method name
:param dict auth: Auth configuration such as Authorizers, ApiKeyRequired, ResourcePolicy
:param dict api: Reference to the related Api's properties as defined in the template.
"""
method_authorizer = auth and auth.get("Authorizer")
method_scopes = auth and auth.get("AuthorizationScopes")
api_auth = api and api.get("Auth")
authorizers = api_auth and api_auth.get("Authorizers")
if method_authorizer:
self._set_method_authorizer(path, method_name, method_authorizer, authorizers, method_scopes)
method_apikey_required = auth and auth.get("ApiKeyRequired")
if method_apikey_required is not None:
self._set_method_apikey_handling(path, method_name, method_apikey_required)
def _set_method_authorizer(self, path, method_name, authorizer_name, authorizers=None, method_scopes=None):
"""
Adds the authorizer_name to the security block for each method on this path.
This is used to configure the authorizer for individual functions.
:param string path: Path name
:param string method_name: Method name
:param string authorizer_name: Name of the authorizer to use. Must be a key in the
authorizers param.
"""
if authorizers is None:
authorizers = Py27Dict()
normalized_method_name = self._normalize_method_name(method_name)
# It is possible that the method could have two definitions in a Fn::If block.
for method_definition in self.get_method_contents(self.get_path(path)[normalized_method_name]):
# If no integration given, then we don't need to process this definition (could be AWS::NoValue)
if not self.method_definition_has_integration(method_definition):
continue
existing_security = method_definition.get("security", [])
security_dict = Py27Dict()
security_dict[authorizer_name] = []
authorizer_security = [security_dict]
# This assumes there are no autorizers already configured in the existing security block
security = existing_security + authorizer_security
if authorizer_name != "NONE" and authorizers:
method_auth_scopes = authorizers.get(authorizer_name, Py27Dict()).get("AuthorizationScopes")
if method_scopes is not None:
method_auth_scopes = method_scopes
if authorizers.get(authorizer_name) is not None and method_auth_scopes is not None:
security_dict[authorizer_name] = method_auth_scopes
if security:
method_definition["security"] = security
# The first element of the method_definition['security'] should be AWS_IAM
# because authorizer_list = ['AWS_IAM'] is hardcoded above
if "AWS_IAM" in method_definition["security"][0]:
self.add_awsiam_security_definition()
def _set_method_apikey_handling(self, path, method_name, apikey_required):
"""
Adds the apikey setting to the security block for each method on this path.
This is used to configure the authorizer for individual functions.
:param string path: Path name
:param string method_name: Method name
:param bool apikey_required: Whether the apikey security is required
"""
normalized_method_name = self._normalize_method_name(method_name)
# It is possible that the method could have two definitions in a Fn::If block.
for method_definition in self.get_method_contents(self.get_path(path)[normalized_method_name]):
# If no integration given, then we don't need to process this definition (could be AWS::NoValue)
if not self.method_definition_has_integration(method_definition):
continue
existing_security = method_definition.get("security", [])
if apikey_required:
# We want to enable apikey required security
security_dict = Py27Dict()
security_dict["api_key"] = []
apikey_security = [security_dict]
self.add_apikey_security_definition()
else:
# The method explicitly does NOT require apikey and there is an API default
# so let's add a marker 'api_key_false' so that we don't incorrectly override
# with the api default
security_dict = Py27Dict()
security_dict["api_key_false"] = []
apikey_security = [security_dict]
# This assumes there are no autorizers already configured in the existing security block
security = existing_security + apikey_security
if security != existing_security:
method_definition["security"] = security
def add_request_validator_to_method(self, path, method_name, validate_body=False, validate_parameters=False):
"""
Adds request model body parameter for this path/method.
:param string path: Path name
:param string method_name: Method name
:param bool validate_body: Add validator parameter on the body
:param bool validate_parameters: Validate request
"""
normalized_method_name = self._normalize_method_name(method_name)
validator_name = SwaggerEditor.get_validator_name(validate_body, validate_parameters)
# Creating validator as py27 dict
# and insert keys one by one to preserve input order
request_validator_definition = Py27Dict()
request_validator_definition[validator_name] = Py27Dict()
request_validator_definition[validator_name]["validateRequestBody"] = validate_body
request_validator_definition[validator_name]["validateRequestParameters"] = validate_parameters
if not self._doc.get(self._X_APIGW_REQUEST_VALIDATORS):
self._doc[self._X_APIGW_REQUEST_VALIDATORS] = Py27Dict()
if not self._doc[self._X_APIGW_REQUEST_VALIDATORS].get(validator_name):
# Adding only if the validator hasn't been defined already
self._doc[self._X_APIGW_REQUEST_VALIDATORS].update(request_validator_definition)
# It is possible that the method could have two definitions in a Fn::If block.
for path_method_name, method in self.get_path(path).items():
normalized_path_method_name = self._normalize_method_name(path_method_name)
# Adding it to only given method to the path
if normalized_path_method_name == normalized_method_name:
for method_definition in self.get_method_contents(method):
# If no integration given, then we don't need to process this definition (could be AWS::NoValue)
if not self.method_definition_has_integration(method_definition):
continue
set_validator_to_method = Py27Dict({self._X_APIGW_REQUEST_VALIDATOR: validator_name})
# Setting validator to the given method
method_definition.update(set_validator_to_method)
def add_request_model_to_method(self, path, method_name, request_model):
"""
Adds request model body parameter for this path/method.
:param string path: Path name
:param string method_name: Method name
:param dict request_model: Model name
"""
model_name = request_model and request_model.get("Model").lower()
model_required = request_model and request_model.get("Required")
normalized_method_name = self._normalize_method_name(method_name)
# It is possible that the method could have two definitions in a Fn::If block.
for method_definition in self.get_method_contents(self.get_path(path)[normalized_method_name]):
# If no integration given, then we don't need to process this definition (could be AWS::NoValue)
if not self.method_definition_has_integration(method_definition):
continue
if self._doc.get("swagger") is not None:
existing_parameters = method_definition.get("parameters", [])
# construct parameter as py27 dict
# and insert keys one by one to preserve input order
parameter = Py27Dict()
parameter["in"] = "body"
parameter["name"] = model_name
parameter["schema"] = {"$ref": "#/definitions/{}".format(model_name)}
if model_required is not None:
parameter["required"] = model_required
existing_parameters.append(parameter)
method_definition["parameters"] = existing_parameters
elif self._doc.get("openapi") and SwaggerEditor.safe_compare_regex_with_string(
SwaggerEditor.get_openapi_version_3_regex(), self._doc["openapi"]
):
method_definition["requestBody"] = {
"content": {"application/json": {"schema": {"$ref": "#/components/schemas/{}".format(model_name)}}}
}
if model_required is not None:
method_definition["requestBody"]["required"] = model_required
def add_gateway_responses(self, gateway_responses):
"""
Add Gateway Response definitions to Swagger.
:param dict gateway_responses: Dictionary of GatewayResponse configuration which gets translated.
"""
self.gateway_responses = self.gateway_responses or Py27Dict()
for response_type, response in gateway_responses.items():
self.gateway_responses[response_type] = response.generate_swagger()
def add_models(self, models):
"""
Add Model definitions to Swagger.
:param dict models: Dictionary of Model schemas which gets translated
:return:
"""
self.definitions = self.definitions or Py27Dict()
for model_name, schema in models.items():
model_type = schema.get("type")
model_properties = schema.get("properties")
if not model_type:
raise InvalidDocumentException([InvalidTemplateException("'Models' schema is missing 'type'.")])
if not model_properties:
raise InvalidDocumentException([InvalidTemplateException("'Models' schema is missing 'properties'.")])
self.definitions[model_name.lower()] = schema
def add_resource_policy(self, resource_policy, path, stage):
"""
Add resource policy definition to Swagger.
:param dict resource_policy: Dictionary of resource_policy statements which gets translated
:return:
"""
if resource_policy is None:
return
SwaggerEditor.validate_is_dict(resource_policy, "Resource Policy is not a valid dictionary.")
aws_account_whitelist = resource_policy.get("AwsAccountWhitelist")
aws_account_blacklist = resource_policy.get("AwsAccountBlacklist")
ip_range_whitelist = resource_policy.get("IpRangeWhitelist")
ip_range_blacklist = resource_policy.get("IpRangeBlacklist")
source_vpc_whitelist = resource_policy.get("SourceVpcWhitelist")
source_vpc_blacklist = resource_policy.get("SourceVpcBlacklist")
# Intrinsic's supported in these properties
source_vpc_intrinsic_whitelist = resource_policy.get("IntrinsicVpcWhitelist")
source_vpce_intrinsic_whitelist = resource_policy.get("IntrinsicVpceWhitelist")
source_vpc_intrinsic_blacklist = resource_policy.get("IntrinsicVpcBlacklist")
source_vpce_intrinsic_blacklist = resource_policy.get("IntrinsicVpceBlacklist")
if aws_account_whitelist is not None:
resource_list = self._get_method_path_uri_list(path, stage)
self._add_iam_resource_policy_for_method(aws_account_whitelist, "Allow", resource_list)
if aws_account_blacklist is not None:
resource_list = self._get_method_path_uri_list(path, stage)
self._add_iam_resource_policy_for_method(aws_account_blacklist, "Deny", resource_list)
if ip_range_whitelist is not None:
resource_list = self._get_method_path_uri_list(path, stage)
self._add_ip_resource_policy_for_method(ip_range_whitelist, "NotIpAddress", resource_list)
if ip_range_blacklist is not None:
resource_list = self._get_method_path_uri_list(path, stage)
self._add_ip_resource_policy_for_method(ip_range_blacklist, "IpAddress", resource_list)
if not SwaggerEditor._validate_list_property_is_resolved(source_vpc_blacklist):
raise InvalidDocumentException(
[
InvalidTemplateException(
"SourceVpcBlacklist must be a list of strings. Use IntrinsicVpcBlacklist instead for values that use Intrinsic Functions"
)
]
)
# FIXME: check if this requires py27 dict?
blacklist_dict = {
"StringEndpointList": source_vpc_blacklist,
"IntrinsicVpcList": source_vpc_intrinsic_blacklist,
"IntrinsicVpceList": source_vpce_intrinsic_blacklist,
}
resource_list = self._get_method_path_uri_list(path, stage)
self._add_vpc_resource_policy_for_method(blacklist_dict, "StringEquals", resource_list)
if not SwaggerEditor._validate_list_property_is_resolved(source_vpc_whitelist):
raise InvalidDocumentException(
[
InvalidTemplateException(
"SourceVpcWhitelist must be a list of strings. Use IntrinsicVpcWhitelist instead for values that use Intrinsic Functions"
)
]
)
whitelist_dict = {
"StringEndpointList": source_vpc_whitelist,
"IntrinsicVpcList": source_vpc_intrinsic_whitelist,
"IntrinsicVpceList": source_vpce_intrinsic_whitelist,
}
self._add_vpc_resource_policy_for_method(whitelist_dict, "StringNotEquals", resource_list)
self._doc[self._X_APIGW_POLICY] = self.resource_policy
def add_custom_statements(self, custom_statements):
self._add_custom_statement(custom_statements)
self._doc[self._X_APIGW_POLICY] = self.resource_policy
def _add_iam_resource_policy_for_method(self, policy_list, effect, resource_list):
"""
This method generates a policy statement to grant/deny specific IAM users access to the API method and
appends it to the swagger under `x-amazon-apigateway-policy`
:raises ValueError: If the effect passed in does not match the allowed values.
"""
if not policy_list:
return
if effect not in ["Allow", "Deny"]:
raise ValueError("Effect must be one of {}".format(["Allow", "Deny"]))
if not isinstance(policy_list, (dict, list)):
raise InvalidDocumentException(
[InvalidTemplateException("Type of '{}' must be a list or dictionary".format(policy_list))]
)
if not isinstance(policy_list, list):
policy_list = [policy_list]
self.resource_policy["Version"] = "2012-10-17"
policy_statement = Py27Dict()
policy_statement["Effect"] = effect
policy_statement["Action"] = "execute-api:Invoke"
policy_statement["Resource"] = resource_list
policy_statement["Principal"] = Py27Dict({"AWS": policy_list})
if self.resource_policy.get("Statement") is None:
self.resource_policy["Statement"] = policy_statement
else:
statement = self.resource_policy["Statement"]
if not isinstance(statement, list):
statement = [statement]
statement.extend([policy_statement])
self.resource_policy["Statement"] = statement
def _get_method_path_uri_list(self, path, stage):
"""
It turns out that APIGW doesn't like trailing slashes in paths (#665)
and removes as a part of their behavior, but this isn't documented.
The regex removes the trailing slash to ensure the permission works as intended
"""
methods = list(self.get_path(path).keys())
uri_list = []
path = SwaggerEditor.get_path_without_trailing_slash(path)
for m in methods:
method = "*" if (m.lower() == self._X_ANY_METHOD or m.lower() == "any") else m.upper()
resource = "execute-api:/${__Stage__}/" + method + path
resource = (
Py27UniStr(resource) if isinstance(method, Py27UniStr) or isinstance(path, Py27UniStr) else resource
)
resource = fnSub(resource, {"__Stage__": stage})
uri_list.extend([resource])
return uri_list
def _add_ip_resource_policy_for_method(self, ip_list, conditional, resource_list):
"""
This method generates a policy statement to grant/deny specific IP address ranges access to the API method and
appends it to the swagger under `x-amazon-apigateway-policy`
:raises ValueError: If the conditional passed in does not match the allowed values.
"""
if not ip_list:
return
if not isinstance(ip_list, list):
ip_list = [ip_list]
if conditional not in ["IpAddress", "NotIpAddress"]:
raise ValueError("Conditional must be one of {}".format(["IpAddress", "NotIpAddress"]))
self.resource_policy["Version"] = "2012-10-17"
allow_statement = Py27Dict()
allow_statement["Effect"] = "Allow"
allow_statement["Action"] = "execute-api:Invoke"
allow_statement["Resource"] = resource_list
allow_statement["Principal"] = "*"
deny_statement = Py27Dict()
deny_statement["Effect"] = "Deny"
deny_statement["Action"] = "execute-api:Invoke"
deny_statement["Resource"] = resource_list
deny_statement["Principal"] = "*"
deny_statement["Condition"] = {conditional: {"aws:SourceIp": ip_list}}
if self.resource_policy.get("Statement") is None:
self.resource_policy["Statement"] = [allow_statement, deny_statement]
else:
statement = self.resource_policy["Statement"]
if not isinstance(statement, list):
statement = [statement]
if allow_statement not in statement:
statement.extend([allow_statement])
if deny_statement not in statement:
statement.extend([deny_statement])
self.resource_policy["Statement"] = statement
def _add_vpc_resource_policy_for_method(self, endpoint_dict, conditional, resource_list):
"""
This method generates a policy statement to grant/deny specific VPC/VPCE access to the API method and
appends it to the swagger under `x-amazon-apigateway-policy`
:raises ValueError: If the conditional passed in does not match the allowed values.
"""
if conditional not in ["StringNotEquals", "StringEquals"]:
raise ValueError("Conditional must be one of {}".format(["StringNotEquals", "StringEquals"]))
condition = Py27Dict()
string_endpoint_list = endpoint_dict.get("StringEndpointList")
intrinsic_vpc_endpoint_list = endpoint_dict.get("IntrinsicVpcList")
intrinsic_vpce_endpoint_list = endpoint_dict.get("IntrinsicVpceList")
if string_endpoint_list is not None:
vpce_regex = r"^vpce-"
vpc_regex = r"^vpc-"
vpc_list = []
vpce_list = []
for endpoint in string_endpoint_list:
if re.match(vpce_regex, endpoint):
vpce_list.append(endpoint)
if re.match(vpc_regex, endpoint):
vpc_list.append(endpoint)
if vpc_list:
condition.setdefault("aws:SourceVpc", []).extend(vpc_list)
if vpce_list:
condition.setdefault("aws:SourceVpce", []).extend(vpce_list)
if intrinsic_vpc_endpoint_list is not None:
condition.setdefault("aws:SourceVpc", []).extend(intrinsic_vpc_endpoint_list)
if intrinsic_vpce_endpoint_list is not None:
condition.setdefault("aws:SourceVpce", []).extend(intrinsic_vpce_endpoint_list)
# Skip writing to transformed template if both vpc and vpce endpoint lists are empty
if (not condition.get("aws:SourceVpc", [])) and (not condition.get("aws:SourceVpce", [])):
return
self.resource_policy["Version"] = "2012-10-17"
allow_statement = Py27Dict()
allow_statement["Effect"] = "Allow"
allow_statement["Action"] = "execute-api:Invoke"
allow_statement["Resource"] = resource_list
allow_statement["Principal"] = "*"
deny_statement = Py27Dict()
deny_statement["Effect"] = "Deny"
deny_statement["Action"] = "execute-api:Invoke"
deny_statement["Resource"] = resource_list
deny_statement["Principal"] = "*"
deny_statement["Condition"] = {conditional: condition}
if self.resource_policy.get("Statement") is None:
self.resource_policy["Statement"] = [allow_statement, deny_statement]
else:
statement = self.resource_policy["Statement"]
if not isinstance(statement, list):
statement = [statement]
if allow_statement not in statement:
statement.extend([allow_statement])
if deny_statement not in statement:
statement.extend([deny_statement])
self.resource_policy["Statement"] = statement
def _add_custom_statement(self, custom_statements):
if custom_statements is None:
return
self.resource_policy["Version"] = "2012-10-17"
if self.resource_policy.get("Statement") is None:
self.resource_policy["Statement"] = custom_statements
else:
if not isinstance(custom_statements, list):
custom_statements = [custom_statements]
statement = self.resource_policy["Statement"]
if not isinstance(statement, list):
statement = [statement]
for s in custom_statements:
if s not in statement:
statement.append(s)
self.resource_policy["Statement"] = statement
def add_request_parameters_to_method(self, path, method_name, request_parameters):
"""
Add Parameters to Swagger.
:param string path: Path name
:param string method_name: Method name
:param list request_parameters: Dictionary of Parameters
:return:
"""
normalized_method_name = self._normalize_method_name(method_name)
# It is possible that the method could have two definitions in a Fn::If block.
for method_definition in self.get_method_contents(self.get_path(path)[normalized_method_name]):
# If no integration given, then we don't need to process this definition (could be AWS::NoValue)
if not self.method_definition_has_integration(method_definition):
continue
existing_parameters = method_definition.get("parameters", [])
for request_parameter in request_parameters:
parameter_name = request_parameter["Name"]
location_name = parameter_name.replace("method.request.", "")
location, name = location_name.split(".", 1)
if location == "querystring":
location = "query"
# create parameter as py27 dict
# and insert keys one by one to preserve input orders
parameter = Py27Dict()
parameter["in"] = location
parameter["name"] = name
parameter["required"] = request_parameter["Required"]
parameter["type"] = "string"
existing_parameters.append(parameter)
if request_parameter["Caching"]:
integration = method_definition[self._X_APIGW_INTEGRATION]
cache_parameters = integration.get(self._CACHE_KEY_PARAMETERS, [])
cache_parameters.append(parameter_name)
integration[self._CACHE_KEY_PARAMETERS] = cache_parameters
method_definition["parameters"] = existing_parameters
@property
def swagger(self):
"""
Returns a **copy** of the Swagger document as a dictionary.
:return dict: Dictionary containing the Swagger document
"""
# Make sure any changes to the paths are reflected back in output
# iterate keys to make sure if "paths" is of Py27UniStr type, it won't be overriden as str
for key in self._doc.keys():
if key == "paths":
self._doc[key] = self.paths
if self.security_definitions:
self._doc["securityDefinitions"] = self.security_definitions
if self.gateway_responses:
self._doc[self._X_APIGW_GATEWAY_RESPONSES] = self.gateway_responses
if self.definitions:
self._doc["definitions"] = self.definitions
return copy.deepcopy(self._doc)
@staticmethod
def is_valid(data):
"""
Checks if the input data is a Swagger document
:param dict data: Data to be validated
:return: True, if data is a Swagger
"""
if bool(data) and isinstance(data, dict) and isinstance(data.get("paths"), dict):
if bool(data.get("swagger")):
return True
elif bool(data.get("openapi")):
return SwaggerEditor.safe_compare_regex_with_string(
SwaggerEditor.get_openapi_version_3_regex(), data["openapi"]
)
return False
@staticmethod
def validate_is_dict(obj, exception_message):
"""
Throws exception if obj is not a dict
:param obj: object being validated
:param exception_message: message to include in exception if obj is not a dict
"""
if not isinstance(obj, dict):
raise InvalidDocumentException([InvalidTemplateException(exception_message)])
@staticmethod
def validate_path_item_is_dict(path_item, path):
"""
Throws exception if path_item is not a dict
:param path_item: path_item (value at the path) being validated
:param path: path name
"""
SwaggerEditor.validate_is_dict(
path_item, "Value of '{}' path must be a dictionary according to Swagger spec.".format(path)
)
@staticmethod
def gen_skeleton():
"""
Method to make an empty swagger file, with just some basic structure. Just enough to pass validator.
:return dict: Dictionary of a skeleton swagger document
"""
skeleton = Py27Dict()
skeleton["swagger"] = "2.0"
skeleton["info"] = Py27Dict()
skeleton["info"]["version"] = "1.0"
skeleton["info"]["title"] = ref("AWS::StackName")
skeleton["paths"] = Py27Dict()
return skeleton
@staticmethod
def _get_authorization_scopes(authorizers, default_authorizer):
"""
Returns auth scopes for an authorizer if present
:param authorizers: authorizer definitions
:param default_authorizer: name of the default authorizer
"""
if authorizers is not None:
if (
authorizers.get(default_authorizer)
and authorizers[default_authorizer].get("AuthorizationScopes") is not None
):
return authorizers[default_authorizer].get("AuthorizationScopes")
return []
@staticmethod
def _normalize_method_name(method):
"""
Returns a lower case, normalized version of HTTP Method. It also know how to handle API Gateway specific methods
like "ANY"
NOTE: Always normalize before using the `method` value passed in as input
:param string method: Name of the HTTP Method
:return string: Normalized method name
"""
if not method or not isinstance(method, str):
return method
method = method.lower()
if method == "any":
return SwaggerEditor._X_ANY_METHOD
else:
return method
@staticmethod
def get_openapi_versions_supported_regex():
openapi_version_supported_regex = r"\A[2-3](\.\d)(\.\d)?$"
return openapi_version_supported_regex
@staticmethod
def get_openapi_version_3_regex():
openapi_version_3_regex = r"\A3(\.\d)(\.\d)?$"
return openapi_version_3_regex
@staticmethod
def safe_compare_regex_with_string(regex, data):
return re.match(regex, str(data)) is not None
@staticmethod
def get_path_without_trailing_slash(path):
# convert greedy paths to such as {greedy+}, {proxy+} to "*"
sub = re.sub(r"{([a-zA-Z0-9._-]+|[a-zA-Z0-9._-]+\+|proxy\+)}", "*", path)
if isinstance(path, Py27UniStr):
return Py27UniStr(sub)
return sub
@staticmethod
def get_validator_name(validate_body, validate_parameters):
"""
Get a readable path name to use as validator name
:param boolean validate_body: Boolean if validate body
:param boolean validate_request: Boolean if validate request
:return string: Normalized validator name
"""
if validate_body and validate_parameters:
return "body-and-params"
if validate_body and not validate_parameters:
return "body-only"
if not validate_body and validate_parameters:
return "params-only"
return "no-validation"
@staticmethod
def _validate_list_property_is_resolved(property_list):
"""
Validate if the values of a Property List are all of type string
:param property_list: Value of a Property List
:return bool: True if the property_list is all of type string otherwise False
"""
if property_list is not None and not all(isinstance(x, str) for x in property_list):
return False
return True
| 46.149688 | 145 | 0.652326 | import copy
import json
import re
from samtranslator.model.intrinsics import ref
from samtranslator.model.intrinsics import make_conditional, fnSub
from samtranslator.model.exceptions import InvalidDocumentException, InvalidTemplateException
from samtranslator.utils.py27hash_fix import Py27Dict, Py27UniStr
class SwaggerEditor(object):
"""
Wrapper class capable of parsing and generating Swagger JSON. This implements Swagger spec just enough that SAM
cares about. It is built to handle "partial Swagger" ie. Swagger that is incomplete and won't
pass the Swagger spec. But this is necessary for SAM because it iteratively builds the Swagger starting from an
empty skeleton.
NOTE (hawflau): To ensure the same logical ID will be generate in Py3 as in Py2 for AWS::Serverless::Api resource,
we have to apply py27hash_fix. For any dictionary that is created within the swagger body, we need to initiate it
with Py27Dict() instead of {}. We also need to add keys into the Py27Dict instance one by one, so that the input
order could be preserved. This is a must for the purpose of preserving the dict key iteration order, which is
essential for generating the same logical ID.
"""
_OPTIONS_METHOD = "options"
_X_APIGW_INTEGRATION = "x-amazon-apigateway-integration"
_X_APIGW_BINARY_MEDIA_TYPES = "x-amazon-apigateway-binary-media-types"
_CONDITIONAL_IF = "Fn::If"
_X_APIGW_GATEWAY_RESPONSES = "x-amazon-apigateway-gateway-responses"
_X_APIGW_POLICY = "x-amazon-apigateway-policy"
_X_ANY_METHOD = "x-amazon-apigateway-any-method"
_X_APIGW_REQUEST_VALIDATORS = "x-amazon-apigateway-request-validators"
_X_APIGW_REQUEST_VALIDATOR = "x-amazon-apigateway-request-validator"
_X_ENDPOINT_CONFIG = "x-amazon-apigateway-endpoint-configuration"
_CACHE_KEY_PARAMETERS = "cacheKeyParameters"
# https://www.w3.org/Protocols/rfc2616/rfc2616-sec9.html
_ALL_HTTP_METHODS = ["OPTIONS", "GET", "HEAD", "POST", "PUT", "DELETE", "PATCH"]
_EXCLUDED_PATHS_FIELDS = ["summary", "description", "parameters"]
_POLICY_TYPE_IAM = "Iam"
_POLICY_TYPE_IP = "Ip"
_POLICY_TYPE_VPC = "Vpc"
def __init__(self, doc):
"""
Initialize the class with a swagger dictionary. This class creates a copy of the Swagger and performs all
modifications on this copy.
:param dict doc: Swagger document as a dictionary
:raises ValueError: If the input Swagger document does not meet the basic Swagger requirements.
"""
if not SwaggerEditor.is_valid(doc):
raise ValueError("Invalid Swagger document")
self._doc = copy.deepcopy(doc)
self.paths = self._doc["paths"]
self.security_definitions = self._doc.get("securityDefinitions", Py27Dict())
self.gateway_responses = self._doc.get(self._X_APIGW_GATEWAY_RESPONSES, Py27Dict())
self.resource_policy = self._doc.get(self._X_APIGW_POLICY, Py27Dict())
self.definitions = self._doc.get("definitions", Py27Dict())
# https://swagger.io/specification/#path-item-object
# According to swagger spec,
# each path item object must be a dict (even it is empty).
# We can do an early path validation on path item objects,
# so we don't need to validate wherever we use them.
for path in self.iter_on_path():
SwaggerEditor.validate_path_item_is_dict(self.get_path(path), path)
def get_path(self, path):
path_dict = self.paths.get(path)
if isinstance(path_dict, dict) and self._CONDITIONAL_IF in path_dict:
path_dict = path_dict[self._CONDITIONAL_IF][1]
return path_dict
def has_path(self, path, method=None):
"""
Returns True if this Swagger has the given path and optional method
:param string path: Path name
:param string method: HTTP method
:return: True, if this path/method is present in the document
"""
method = self._normalize_method_name(method)
path_dict = self.get_path(path)
path_dict_exists = path_dict is not None
if method:
return path_dict_exists and method in path_dict
return path_dict_exists
def method_has_integration(self, method):
"""
Returns true if the given method contains a valid method definition.
This uses the get_method_contents function to handle conditionals.
:param dict method: method dictionary
:return: true if method has one or multiple integrations
"""
for method_definition in self.get_method_contents(method):
if self.method_definition_has_integration(method_definition):
return True
return False
def method_definition_has_integration(self, method_definition):
"""
Checks a method definition to make sure it has an apigw integration
:param dict method_definition: method definition dictionary
:return: True if an integration exists
"""
if method_definition.get(self._X_APIGW_INTEGRATION):
return True
return False
def get_method_contents(self, method):
"""
Returns the swagger contents of the given method. This checks to see if a conditional block
has been used inside of the method, and, if so, returns the method contents that are
inside of the conditional.
:param dict method: method dictionary
:return: list of swagger component dictionaries for the method
"""
if self._CONDITIONAL_IF in method:
return method[self._CONDITIONAL_IF][1:]
return [method]
def add_disable_execute_api_endpoint_extension(self, disable_execute_api_endpoint):
"""Add endpoint configuration to _X_APIGW_ENDPOINT_CONFIG in open api definition as extension
Following this guide:
https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-swagger-extensions-endpoint-configuration.html
:param boolean disable_execute_api_endpoint: Specifies whether clients can invoke your API by using the default execute-api endpoint.
"""
if not self._doc.get(self._X_ENDPOINT_CONFIG):
self._doc[self._X_ENDPOINT_CONFIG] = {}
DISABLE_EXECUTE_API_ENDPOINT = "disableExecuteApiEndpoint"
set_disable_api_endpoint = {DISABLE_EXECUTE_API_ENDPOINT: disable_execute_api_endpoint}
self._doc[self._X_ENDPOINT_CONFIG].update(set_disable_api_endpoint)
def has_integration(self, path, method):
"""
Checks if an API Gateway integration is already present at the given path/method
:param string path: Path name
:param string method: HTTP method
:return: True, if an API Gateway integration is already present
"""
method = self._normalize_method_name(method)
path_dict = self.get_path(path)
return (
self.has_path(path, method)
and isinstance(path_dict[method], dict)
and self.method_has_integration(path_dict[method])
)
def add_path(self, path, method=None):
"""
Adds the path/method combination to the Swagger, if not already present
:param string path: Path name
:param string method: HTTP method
:raises ValueError: If the value of `path` in Swagger is not a dictionary
"""
method = self._normalize_method_name(method)
path_dict = self.paths.setdefault(path, Py27Dict())
SwaggerEditor.validate_path_item_is_dict(path_dict, path)
if self._CONDITIONAL_IF in path_dict:
path_dict = path_dict[self._CONDITIONAL_IF][1]
path_dict.setdefault(method, Py27Dict())
def add_lambda_integration(
self, path, method, integration_uri, method_auth_config=None, api_auth_config=None, condition=None
):
"""
Adds aws_proxy APIGW integration to the given path+method.
:param string path: Path name
:param string method: HTTP Method
:param string integration_uri: URI for the integration.
"""
method = self._normalize_method_name(method)
if self.has_integration(path, method):
raise ValueError("Lambda integration already exists on Path={}, Method={}".format(path, method))
self.add_path(path, method)
if condition:
integration_uri = make_conditional(condition, integration_uri)
path_dict = self.get_path(path)
path_dict[method][self._X_APIGW_INTEGRATION] = Py27Dict()
# insert key one by one to preserce input order
path_dict[method][self._X_APIGW_INTEGRATION]["type"] = "aws_proxy"
path_dict[method][self._X_APIGW_INTEGRATION]["httpMethod"] = "POST"
path_dict[method][self._X_APIGW_INTEGRATION]["uri"] = integration_uri
method_auth_config = method_auth_config or Py27Dict()
api_auth_config = api_auth_config or Py27Dict()
if (
method_auth_config.get("Authorizer") == "AWS_IAM"
or api_auth_config.get("DefaultAuthorizer") == "AWS_IAM"
and not method_auth_config
):
method_invoke_role = method_auth_config.get("InvokeRole")
if not method_invoke_role and "InvokeRole" in method_auth_config:
method_invoke_role = "NONE"
api_invoke_role = api_auth_config.get("InvokeRole")
if not api_invoke_role and "InvokeRole" in api_auth_config:
api_invoke_role = "NONE"
credentials = self._generate_integration_credentials(
method_invoke_role=method_invoke_role, api_invoke_role=api_invoke_role
)
if credentials and credentials != "NONE":
self.paths[path][method][self._X_APIGW_INTEGRATION]["credentials"] = credentials
# If 'responses' key is *not* present, add it with an empty dict as value
path_dict[method].setdefault("responses", Py27Dict())
# If a condition is present, wrap all method contents up into the condition
if condition:
path_dict[method] = make_conditional(condition, path_dict[method])
def add_state_machine_integration(
self,
path,
method,
integration_uri,
credentials,
request_templates=None,
condition=None,
):
"""
Adds aws APIGW integration to the given path+method.
:param string path: Path name
:param string method: HTTP Method
:param string integration_uri: URI for the integration
:param string credentials: Credentials for the integration
:param dict request_templates: A map of templates that are applied on the request payload.
:param bool condition: Condition for the integration
"""
method = self._normalize_method_name(method)
if self.has_integration(path, method):
raise ValueError("Integration already exists on Path={}, Method={}".format(path, method))
self.add_path(path, method)
# Wrap the integration_uri in a Condition if one exists on that state machine
# This is necessary so CFN doesn't try to resolve the integration reference.
if condition:
integration_uri = make_conditional(condition, integration_uri)
path_dict = self.get_path(path)
integration_responses = Py27Dict()
integration_responses["200"] = Py27Dict({"statusCode": "200"})
integration_responses["400"] = Py27Dict({"statusCode": "400"})
default_method_responses = Py27Dict()
default_method_responses["200"] = Py27Dict({"description": "OK"})
default_method_responses["400"] = Py27Dict({"description": "Bad Request"})
path_dict[method][self._X_APIGW_INTEGRATION] = Py27Dict()
path_dict[method][self._X_APIGW_INTEGRATION]["type"] = "aws"
path_dict[method][self._X_APIGW_INTEGRATION]["httpMethod"] = "POST"
path_dict[method][self._X_APIGW_INTEGRATION]["uri"] = integration_uri
path_dict[method][self._X_APIGW_INTEGRATION]["responses"] = integration_responses
path_dict[method][self._X_APIGW_INTEGRATION]["credentials"] = credentials
path_dict[method].setdefault("responses", default_method_responses)
if request_templates:
path_dict[method][self._X_APIGW_INTEGRATION].update({"requestTemplates": request_templates})
if condition:
path_dict[method] = make_conditional(condition, path_dict[method])
def make_path_conditional(self, path, condition):
"""
Wrap entire API path definition in a CloudFormation if condition.
"""
self.paths[path] = make_conditional(condition, self.paths[path])
def _generate_integration_credentials(self, method_invoke_role=None, api_invoke_role=None):
return self._get_invoke_role(method_invoke_role or api_invoke_role)
def _get_invoke_role(self, invoke_role):
CALLER_CREDENTIALS_ARN = "arn:aws:iam::*:user/*"
return invoke_role if invoke_role and invoke_role != "CALLER_CREDENTIALS" else CALLER_CREDENTIALS_ARN
def iter_on_path(self):
"""
Yields all the paths available in the Swagger. As a caller, if you add new paths to Swagger while iterating,
they will not show up in this iterator
:yields string: Path name
"""
for path, value in self.paths.items():
yield path
def add_cors(
self, path, allowed_origins, allowed_headers=None, allowed_methods=None, max_age=None, allow_credentials=None
):
"""
Add CORS configuration to this path. Specifically, we will add a OPTIONS response config to the Swagger that
will return headers required for CORS. Since SAM uses aws_proxy integration, we cannot inject the headers
into the actual response returned from Lambda function. This is something customers have to implement
themselves.
If OPTIONS method is already present for the Path, we will skip adding CORS configuration
Following this guide:
https://docs.aws.amazon.com/apigateway/latest/developerguide/how-to-cors.html#enable-cors-for-resource-using-swagger-importer-tool
:param string path: Path to add the CORS configuration to.
:param string/dict allowed_origins: Comma separate list of allowed origins.
Value can also be an intrinsic function dict.
:param string/dict allowed_headers: Comma separated list of allowed headers.
Value can also be an intrinsic function dict.
:param string/dict allowed_methods: Comma separated list of allowed methods.
Value can also be an intrinsic function dict.
:param integer/dict max_age: Maximum duration to cache the CORS Preflight request. Value is set on
Access-Control-Max-Age header. Value can also be an intrinsic function dict.
:param bool/None allow_credentials: Flags whether request is allowed to contain credentials.
:raises ValueError: When values for one of the allowed_* variables is empty
"""
if self.has_path(path, self._OPTIONS_METHOD):
return
if not allowed_origins:
raise InvalidTemplateException("Invalid input. Value for AllowedOrigins is required")
if not allowed_methods:
allowed_methods = self._make_cors_allowed_methods_for_path(path)
# APIGW expects the value to be a "string expression". Hence wrap in another quote. Ex: "'GET,POST,DELETE'"
allowed_methods = "'{}'".format(allowed_methods)
if allow_credentials is not True:
allow_credentials = False
# Add the Options method and the CORS response
self.add_path(path, self._OPTIONS_METHOD)
self.get_path(path)[self._OPTIONS_METHOD] = self._options_method_response_for_cors(
allowed_origins, allowed_headers, allowed_methods, max_age, allow_credentials
)
def add_binary_media_types(self, binary_media_types):
"""
Args:
binary_media_types: list
"""
def replace_recursively(bmt):
"""replaces "~1" with "/" for the input binary_media_types recursively"""
if isinstance(bmt, dict):
to_return = Py27Dict()
for k, v in bmt.items():
to_return[Py27UniStr(k.replace("~1", "/"))] = replace_recursively(v)
return to_return
if isinstance(bmt, list):
return [replace_recursively(item) for item in bmt]
if isinstance(bmt, str) or isinstance(bmt, Py27UniStr):
return Py27UniStr(bmt.replace("~1", "/"))
return bmt
bmt = replace_recursively(binary_media_types)
self._doc[self._X_APIGW_BINARY_MEDIA_TYPES] = bmt
def _options_method_response_for_cors(
self, allowed_origins, allowed_headers=None, allowed_methods=None, max_age=None, allow_credentials=None
):
"""
Returns a Swagger snippet containing configuration for OPTIONS HTTP Method to configure CORS.
This snippet is taken from public documentation:
https://docs.aws.amazon.com/apigateway/latest/developerguide/how-to-cors.html#enable-cors-for-resource-using-swagger-importer-tool
:param string/dict allowed_origins: Comma separate list of allowed origins.
Value can also be an intrinsic function dict.
:param string/dict allowed_headers: Comma separated list of allowed headers.
Value can also be an intrinsic function dict.
:param string/dict allowed_methods: Comma separated list of allowed methods.
Value can also be an intrinsic function dict.
:param integer/dict max_age: Maximum duration to cache the CORS Preflight request. Value is set on
Access-Control-Max-Age header. Value can also be an intrinsic function dict.
:param bool allow_credentials: Flags whether request is allowed to contain credentials.
:return dict: Dictionary containing Options method configuration for CORS
"""
ALLOW_ORIGIN = "Access-Control-Allow-Origin"
ALLOW_HEADERS = "Access-Control-Allow-Headers"
ALLOW_METHODS = "Access-Control-Allow-Methods"
MAX_AGE = "Access-Control-Max-Age"
ALLOW_CREDENTIALS = "Access-Control-Allow-Credentials"
HEADER_RESPONSE = lambda x: "method.response.header." + x
response_parameters = Py27Dict(
{
# AllowedOrigin is always required
HEADER_RESPONSE(ALLOW_ORIGIN): allowed_origins
}
)
response_headers = Py27Dict(
{
# Allow Origin is always required
ALLOW_ORIGIN: {"type": "string"}
}
)
# Optional values. Skip the header if value is empty
#
# The values must not be empty string or null. Also, value of '*' is a very recent addition (2017) and
# not supported in all the browsers. So it is important to skip the header if value is not given
# https://fetch.spec.whatwg.org/#http-new-header-syntax
#
if allowed_headers:
response_parameters[HEADER_RESPONSE(ALLOW_HEADERS)] = allowed_headers
response_headers[ALLOW_HEADERS] = {"type": "string"}
if allowed_methods:
response_parameters[HEADER_RESPONSE(ALLOW_METHODS)] = allowed_methods
response_headers[ALLOW_METHODS] = {"type": "string"}
if max_age is not None:
# MaxAge can be set to 0, which is a valid value. So explicitly check against None
response_parameters[HEADER_RESPONSE(MAX_AGE)] = max_age
response_headers[MAX_AGE] = {"type": "integer"}
if allow_credentials is True:
# Allow-Credentials only has a valid value of true, it should be omitted otherwise.
# https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Allow-Credentials
response_parameters[HEADER_RESPONSE(ALLOW_CREDENTIALS)] = "'true'"
response_headers[ALLOW_CREDENTIALS] = {"type": "string"}
# construct snippet and insert key one by one to preserce input order
to_return = Py27Dict()
to_return["summary"] = "CORS support"
to_return["consumes"] = ["application/json"]
to_return["produces"] = ["application/json"]
to_return[self._X_APIGW_INTEGRATION] = Py27Dict()
to_return[self._X_APIGW_INTEGRATION]["type"] = "mock"
to_return[self._X_APIGW_INTEGRATION]["requestTemplates"] = {"application/json": '{\n "statusCode" : 200\n}\n'}
to_return[self._X_APIGW_INTEGRATION]["responses"] = Py27Dict()
to_return[self._X_APIGW_INTEGRATION]["responses"]["default"] = Py27Dict()
to_return[self._X_APIGW_INTEGRATION]["responses"]["default"]["statusCode"] = "200"
to_return[self._X_APIGW_INTEGRATION]["responses"]["default"]["responseParameters"] = response_parameters
to_return[self._X_APIGW_INTEGRATION]["responses"]["default"]["responseTemplates"] = {"application/json": "{}\n"}
to_return["responses"] = Py27Dict()
to_return["responses"]["200"] = Py27Dict()
to_return["responses"]["200"]["description"] = "Default response for CORS method"
to_return["responses"]["200"]["headers"] = response_headers
return to_return
def _make_cors_allowed_methods_for_path(self, path):
"""
Creates the value for Access-Control-Allow-Methods header for given path. All HTTP methods defined for this
path will be included in the result. If the path contains "ANY" method, then *all available* HTTP methods will
be returned as result.
:param string path: Path to generate AllowMethods value for
:return string: String containing the value of AllowMethods, if the path contains any methods.
Empty string, otherwise
"""
if not self.has_path(path):
return ""
# At this point, value of Swagger path should be a dictionary with method names being the keys
methods = list(self.get_path(path).keys())
if self._X_ANY_METHOD in methods:
# API Gateway's ANY method is not a real HTTP method but a wildcard representing all HTTP methods
allow_methods = self._ALL_HTTP_METHODS
else:
allow_methods = methods
allow_methods.append("options")
allow_methods = list({m.upper() for m in allow_methods})
allow_methods.sort()
return ",".join(allow_methods)
def add_authorizers_security_definitions(self, authorizers):
"""
Add Authorizer definitions to the securityDefinitions part of Swagger.
:param list authorizers: List of Authorizer configurations which get translated to securityDefinitions.
"""
self.security_definitions = self.security_definitions or Py27Dict()
for authorizer_name, authorizer in authorizers.items():
self.security_definitions[authorizer_name] = authorizer.generate_swagger()
def add_awsiam_security_definition(self):
"""
Adds AWS_IAM definition to the securityDefinitions part of Swagger.
Note: this method is idempotent
"""
aws_iam_security_definition = Py27Dict()
aws_iam_security_definition["AWS_IAM"] = Py27Dict()
aws_iam_security_definition["AWS_IAM"]["x-amazon-apigateway-authtype"] = "awsSigv4"
aws_iam_security_definition["AWS_IAM"]["type"] = "apiKey"
aws_iam_security_definition["AWS_IAM"]["name"] = "Authorization"
aws_iam_security_definition["AWS_IAM"]["in"] = "header"
self.security_definitions = self.security_definitions or Py27Dict()
# that we minimize changes to the swagger in the case of user defined swagger
if "AWS_IAM" not in self.security_definitions:
self.security_definitions.update(aws_iam_security_definition)
def add_apikey_security_definition(self):
"""
Adds api_key definition to the securityDefinitions part of Swagger.
Note: this method is idempotent
"""
# construct api_key_security_definiton as py27 dict
# and insert keys one by one to preserve input order
api_key_security_definition = Py27Dict()
api_key_security_definition["api_key"] = Py27Dict()
api_key_security_definition["api_key"]["type"] = "apiKey"
api_key_security_definition["api_key"]["name"] = "x-api-key"
api_key_security_definition["api_key"]["in"] = "header"
self.security_definitions = self.security_definitions or Py27Dict()
# Only add the security definition if it doesn't exist. This helps ensure
if "api_key" not in self.security_definitions:
self.security_definitions.update(api_key_security_definition)
def set_path_default_authorizer(
self, path, default_authorizer, authorizers, add_default_auth_to_preflight=True, api_authorizers=None
):
"""
Adds the default_authorizer to the security block for each method on this path unless an Authorizer
was defined at the Function/Path/Method level. This is intended to be used to set the
authorizer security restriction for all api methods based upon the default configured in the
Serverless API.
:param string path: Path name
:param string default_authorizer: Name of the authorizer to use as the default. Must be a key in the
authorizers param.
:param list authorizers: List of Authorizer configurations defined on the related Api.
:param bool add_default_auth_to_preflight: Bool of whether to add the default
authorizer to OPTIONS preflight requests.
"""
for method_name, method in self.get_path(path).items():
normalized_method_name = self._normalize_method_name(method_name)
if normalized_method_name in SwaggerEditor._EXCLUDED_PATHS_FIELDS:
continue
if add_default_auth_to_preflight or normalized_method_name != "options":
SwaggerEditor.validate_is_dict(
method,
'Value of "{}" ({}) for path {} is not a valid dictionary.'.format(method_name, method, path),
)
for method_definition in self.get_method_contents(method):
SwaggerEditor.validate_is_dict(
method_definition,
'Value of "{}" ({}) for path {} is not a valid dictionary.'.format(
method_name, method_definition, path
),
)
if not self.method_definition_has_integration(method_definition):
continue
existing_security = method_definition.get("security", [])
authorizer_list = ["AWS_IAM"]
if authorizers:
authorizer_list.extend(authorizers.keys())
authorizer_names = set(authorizer_list)
existing_non_authorizer_security = []
existing_authorizer_security = []
# Split existing security into Authorizers and everything else
# (e.g. sigv4 (AWS_IAM), api_key (API Key/Usage Plans), NONE (marker for ignoring default))
# We want to ensure only a single Authorizer security entry exists while keeping everything else
for security in existing_security:
SwaggerEditor.validate_is_dict(
security, "{} in Security for path {} is not a valid dictionary.".format(security, path)
)
if authorizer_names.isdisjoint(security.keys()):
existing_non_authorizer_security.append(security)
else:
existing_authorizer_security.append(security)
none_idx = -1
authorizer_security = []
# Check for an existing Authorizer before applying the default. It would be simpler
# if instead we applied the DefaultAuthorizer first and then simply
# overwrote it if necessary, however, the order in which things get
# applied (Function Api Events first; then Api Resource) complicates it.
# Check if Function/Path/Method specified 'NONE' for Authorizer
for idx, security in enumerate(existing_non_authorizer_security):
is_none = any(key == "NONE" for key in security.keys())
if is_none:
none_idx = idx
break
# NONE was found; remove it and don't add the DefaultAuthorizer
if none_idx > -1:
del existing_non_authorizer_security[none_idx]
elif existing_authorizer_security:
authorizer_security = existing_authorizer_security
else:
security_dict = Py27Dict()
security_dict[default_authorizer] = self._get_authorization_scopes(
api_authorizers, default_authorizer
)
authorizer_security = [security_dict]
security = existing_non_authorizer_security + authorizer_security
if security:
method_definition["security"] = security
if "AWS_IAM" in method_definition["security"][0]:
self.add_awsiam_security_definition()
def set_path_default_apikey_required(self, path):
"""
Add the ApiKey security as required for each method on this path unless ApiKeyRequired
was defined at the Function/Path/Method level. This is intended to be used to set the
apikey security restriction for all api methods based upon the default configured in the
Serverless API.
:param string path: Path name
"""
for method_name, method in self.get_path(path).items():
if method_name in SwaggerEditor._EXCLUDED_PATHS_FIELDS:
continue
for method_definition in self.get_method_contents(method):
if not self.method_definition_has_integration(method_definition):
continue
existing_security = method_definition.get("security", [])
apikey_security_names = set(["api_key", "api_key_false"])
existing_non_apikey_security = []
existing_apikey_security = []
apikey_security = []
# Split existing security into ApiKey and everything else
# (e.g. sigv4 (AWS_IAM), authorizers, NONE (marker for ignoring default authorizer))
# We want to ensure only a single ApiKey security entry exists while keeping everything else
for security in existing_security:
if apikey_security_names.isdisjoint(security.keys()):
existing_non_apikey_security.append(security)
else:
existing_apikey_security.append(security)
# Check for an existing method level ApiKey setting before applying the default. It would be simpler
# if instead we applied the default first and then simply
# overwrote it if necessary, however, the order in which things get
# applied (Function Api Events first; then Api Resource) complicates it.
# Check if Function/Path/Method specified 'False' for ApiKeyRequired
apikeyfalse_idx = -1
for idx, security in enumerate(existing_apikey_security):
is_none = any(key == "api_key_false" for key in security.keys())
if is_none:
apikeyfalse_idx = idx
break
# api_key_false was found; remove it and don't add default api_key security setting
if apikeyfalse_idx > -1:
del existing_apikey_security[apikeyfalse_idx]
else:
security_dict = Py27Dict()
security_dict["api_key"] = []
apikey_security = [security_dict]
security = existing_non_apikey_security + apikey_security
if security != existing_security:
method_definition["security"] = security
def add_auth_to_method(self, path, method_name, auth, api):
"""
Adds auth settings for this path/method. Auth settings currently consist of Authorizers and ApiKeyRequired
but this method will eventually include setting other auth settings such as Resource Policy, etc.
This is used to configure the security for individual functions.
:param string path: Path name
:param string method_name: Method name
:param dict auth: Auth configuration such as Authorizers, ApiKeyRequired, ResourcePolicy
:param dict api: Reference to the related Api's properties as defined in the template.
"""
method_authorizer = auth and auth.get("Authorizer")
method_scopes = auth and auth.get("AuthorizationScopes")
api_auth = api and api.get("Auth")
authorizers = api_auth and api_auth.get("Authorizers")
if method_authorizer:
self._set_method_authorizer(path, method_name, method_authorizer, authorizers, method_scopes)
method_apikey_required = auth and auth.get("ApiKeyRequired")
if method_apikey_required is not None:
self._set_method_apikey_handling(path, method_name, method_apikey_required)
def _set_method_authorizer(self, path, method_name, authorizer_name, authorizers=None, method_scopes=None):
"""
Adds the authorizer_name to the security block for each method on this path.
This is used to configure the authorizer for individual functions.
:param string path: Path name
:param string method_name: Method name
:param string authorizer_name: Name of the authorizer to use. Must be a key in the
authorizers param.
"""
if authorizers is None:
authorizers = Py27Dict()
normalized_method_name = self._normalize_method_name(method_name)
for method_definition in self.get_method_contents(self.get_path(path)[normalized_method_name]):
if not self.method_definition_has_integration(method_definition):
continue
existing_security = method_definition.get("security", [])
security_dict = Py27Dict()
security_dict[authorizer_name] = []
authorizer_security = [security_dict]
# This assumes there are no autorizers already configured in the existing security block
security = existing_security + authorizer_security
if authorizer_name != "NONE" and authorizers:
method_auth_scopes = authorizers.get(authorizer_name, Py27Dict()).get("AuthorizationScopes")
if method_scopes is not None:
method_auth_scopes = method_scopes
if authorizers.get(authorizer_name) is not None and method_auth_scopes is not None:
security_dict[authorizer_name] = method_auth_scopes
if security:
method_definition["security"] = security
# The first element of the method_definition['security'] should be AWS_IAM
# because authorizer_list = ['AWS_IAM'] is hardcoded above
if "AWS_IAM" in method_definition["security"][0]:
self.add_awsiam_security_definition()
def _set_method_apikey_handling(self, path, method_name, apikey_required):
"""
Adds the apikey setting to the security block for each method on this path.
This is used to configure the authorizer for individual functions.
:param string path: Path name
:param string method_name: Method name
:param bool apikey_required: Whether the apikey security is required
"""
normalized_method_name = self._normalize_method_name(method_name)
# It is possible that the method could have two definitions in a Fn::If block.
for method_definition in self.get_method_contents(self.get_path(path)[normalized_method_name]):
# If no integration given, then we don't need to process this definition (could be AWS::NoValue)
if not self.method_definition_has_integration(method_definition):
continue
existing_security = method_definition.get("security", [])
if apikey_required:
security_dict = Py27Dict()
security_dict["api_key"] = []
apikey_security = [security_dict]
self.add_apikey_security_definition()
else:
security_dict = Py27Dict()
security_dict["api_key_false"] = []
apikey_security = [security_dict]
security = existing_security + apikey_security
if security != existing_security:
method_definition["security"] = security
def add_request_validator_to_method(self, path, method_name, validate_body=False, validate_parameters=False):
"""
Adds request model body parameter for this path/method.
:param string path: Path name
:param string method_name: Method name
:param bool validate_body: Add validator parameter on the body
:param bool validate_parameters: Validate request
"""
normalized_method_name = self._normalize_method_name(method_name)
validator_name = SwaggerEditor.get_validator_name(validate_body, validate_parameters)
request_validator_definition = Py27Dict()
request_validator_definition[validator_name] = Py27Dict()
request_validator_definition[validator_name]["validateRequestBody"] = validate_body
request_validator_definition[validator_name]["validateRequestParameters"] = validate_parameters
if not self._doc.get(self._X_APIGW_REQUEST_VALIDATORS):
self._doc[self._X_APIGW_REQUEST_VALIDATORS] = Py27Dict()
if not self._doc[self._X_APIGW_REQUEST_VALIDATORS].get(validator_name):
self._doc[self._X_APIGW_REQUEST_VALIDATORS].update(request_validator_definition)
# It is possible that the method could have two definitions in a Fn::If block.
for path_method_name, method in self.get_path(path).items():
normalized_path_method_name = self._normalize_method_name(path_method_name)
# Adding it to only given method to the path
if normalized_path_method_name == normalized_method_name:
for method_definition in self.get_method_contents(method):
# If no integration given, then we don't need to process this definition (could be AWS::NoValue)
if not self.method_definition_has_integration(method_definition):
continue
set_validator_to_method = Py27Dict({self._X_APIGW_REQUEST_VALIDATOR: validator_name})
method_definition.update(set_validator_to_method)
def add_request_model_to_method(self, path, method_name, request_model):
"""
Adds request model body parameter for this path/method.
:param string path: Path name
:param string method_name: Method name
:param dict request_model: Model name
"""
model_name = request_model and request_model.get("Model").lower()
model_required = request_model and request_model.get("Required")
normalized_method_name = self._normalize_method_name(method_name)
for method_definition in self.get_method_contents(self.get_path(path)[normalized_method_name]):
if not self.method_definition_has_integration(method_definition):
continue
if self._doc.get("swagger") is not None:
existing_parameters = method_definition.get("parameters", [])
# construct parameter as py27 dict
# and insert keys one by one to preserve input order
parameter = Py27Dict()
parameter["in"] = "body"
parameter["name"] = model_name
parameter["schema"] = {"$ref": "#/definitions/{}".format(model_name)}
if model_required is not None:
parameter["required"] = model_required
existing_parameters.append(parameter)
method_definition["parameters"] = existing_parameters
elif self._doc.get("openapi") and SwaggerEditor.safe_compare_regex_with_string(
SwaggerEditor.get_openapi_version_3_regex(), self._doc["openapi"]
):
method_definition["requestBody"] = {
"content": {"application/json": {"schema": {"$ref": "#/components/schemas/{}".format(model_name)}}}
}
if model_required is not None:
method_definition["requestBody"]["required"] = model_required
def add_gateway_responses(self, gateway_responses):
"""
Add Gateway Response definitions to Swagger.
:param dict gateway_responses: Dictionary of GatewayResponse configuration which gets translated.
"""
self.gateway_responses = self.gateway_responses or Py27Dict()
for response_type, response in gateway_responses.items():
self.gateway_responses[response_type] = response.generate_swagger()
def add_models(self, models):
"""
Add Model definitions to Swagger.
:param dict models: Dictionary of Model schemas which gets translated
:return:
"""
self.definitions = self.definitions or Py27Dict()
for model_name, schema in models.items():
model_type = schema.get("type")
model_properties = schema.get("properties")
if not model_type:
raise InvalidDocumentException([InvalidTemplateException("'Models' schema is missing 'type'.")])
if not model_properties:
raise InvalidDocumentException([InvalidTemplateException("'Models' schema is missing 'properties'.")])
self.definitions[model_name.lower()] = schema
def add_resource_policy(self, resource_policy, path, stage):
"""
Add resource policy definition to Swagger.
:param dict resource_policy: Dictionary of resource_policy statements which gets translated
:return:
"""
if resource_policy is None:
return
SwaggerEditor.validate_is_dict(resource_policy, "Resource Policy is not a valid dictionary.")
aws_account_whitelist = resource_policy.get("AwsAccountWhitelist")
aws_account_blacklist = resource_policy.get("AwsAccountBlacklist")
ip_range_whitelist = resource_policy.get("IpRangeWhitelist")
ip_range_blacklist = resource_policy.get("IpRangeBlacklist")
source_vpc_whitelist = resource_policy.get("SourceVpcWhitelist")
source_vpc_blacklist = resource_policy.get("SourceVpcBlacklist")
# Intrinsic's supported in these properties
source_vpc_intrinsic_whitelist = resource_policy.get("IntrinsicVpcWhitelist")
source_vpce_intrinsic_whitelist = resource_policy.get("IntrinsicVpceWhitelist")
source_vpc_intrinsic_blacklist = resource_policy.get("IntrinsicVpcBlacklist")
source_vpce_intrinsic_blacklist = resource_policy.get("IntrinsicVpceBlacklist")
if aws_account_whitelist is not None:
resource_list = self._get_method_path_uri_list(path, stage)
self._add_iam_resource_policy_for_method(aws_account_whitelist, "Allow", resource_list)
if aws_account_blacklist is not None:
resource_list = self._get_method_path_uri_list(path, stage)
self._add_iam_resource_policy_for_method(aws_account_blacklist, "Deny", resource_list)
if ip_range_whitelist is not None:
resource_list = self._get_method_path_uri_list(path, stage)
self._add_ip_resource_policy_for_method(ip_range_whitelist, "NotIpAddress", resource_list)
if ip_range_blacklist is not None:
resource_list = self._get_method_path_uri_list(path, stage)
self._add_ip_resource_policy_for_method(ip_range_blacklist, "IpAddress", resource_list)
if not SwaggerEditor._validate_list_property_is_resolved(source_vpc_blacklist):
raise InvalidDocumentException(
[
InvalidTemplateException(
"SourceVpcBlacklist must be a list of strings. Use IntrinsicVpcBlacklist instead for values that use Intrinsic Functions"
)
]
)
blacklist_dict = {
"StringEndpointList": source_vpc_blacklist,
"IntrinsicVpcList": source_vpc_intrinsic_blacklist,
"IntrinsicVpceList": source_vpce_intrinsic_blacklist,
}
resource_list = self._get_method_path_uri_list(path, stage)
self._add_vpc_resource_policy_for_method(blacklist_dict, "StringEquals", resource_list)
if not SwaggerEditor._validate_list_property_is_resolved(source_vpc_whitelist):
raise InvalidDocumentException(
[
InvalidTemplateException(
"SourceVpcWhitelist must be a list of strings. Use IntrinsicVpcWhitelist instead for values that use Intrinsic Functions"
)
]
)
whitelist_dict = {
"StringEndpointList": source_vpc_whitelist,
"IntrinsicVpcList": source_vpc_intrinsic_whitelist,
"IntrinsicVpceList": source_vpce_intrinsic_whitelist,
}
self._add_vpc_resource_policy_for_method(whitelist_dict, "StringNotEquals", resource_list)
self._doc[self._X_APIGW_POLICY] = self.resource_policy
def add_custom_statements(self, custom_statements):
self._add_custom_statement(custom_statements)
self._doc[self._X_APIGW_POLICY] = self.resource_policy
def _add_iam_resource_policy_for_method(self, policy_list, effect, resource_list):
"""
This method generates a policy statement to grant/deny specific IAM users access to the API method and
appends it to the swagger under `x-amazon-apigateway-policy`
:raises ValueError: If the effect passed in does not match the allowed values.
"""
if not policy_list:
return
if effect not in ["Allow", "Deny"]:
raise ValueError("Effect must be one of {}".format(["Allow", "Deny"]))
if not isinstance(policy_list, (dict, list)):
raise InvalidDocumentException(
[InvalidTemplateException("Type of '{}' must be a list or dictionary".format(policy_list))]
)
if not isinstance(policy_list, list):
policy_list = [policy_list]
self.resource_policy["Version"] = "2012-10-17"
policy_statement = Py27Dict()
policy_statement["Effect"] = effect
policy_statement["Action"] = "execute-api:Invoke"
policy_statement["Resource"] = resource_list
policy_statement["Principal"] = Py27Dict({"AWS": policy_list})
if self.resource_policy.get("Statement") is None:
self.resource_policy["Statement"] = policy_statement
else:
statement = self.resource_policy["Statement"]
if not isinstance(statement, list):
statement = [statement]
statement.extend([policy_statement])
self.resource_policy["Statement"] = statement
def _get_method_path_uri_list(self, path, stage):
"""
It turns out that APIGW doesn't like trailing slashes in paths (#665)
and removes as a part of their behavior, but this isn't documented.
The regex removes the trailing slash to ensure the permission works as intended
"""
methods = list(self.get_path(path).keys())
uri_list = []
path = SwaggerEditor.get_path_without_trailing_slash(path)
for m in methods:
method = "*" if (m.lower() == self._X_ANY_METHOD or m.lower() == "any") else m.upper()
resource = "execute-api:/${__Stage__}/" + method + path
resource = (
Py27UniStr(resource) if isinstance(method, Py27UniStr) or isinstance(path, Py27UniStr) else resource
)
resource = fnSub(resource, {"__Stage__": stage})
uri_list.extend([resource])
return uri_list
def _add_ip_resource_policy_for_method(self, ip_list, conditional, resource_list):
"""
This method generates a policy statement to grant/deny specific IP address ranges access to the API method and
appends it to the swagger under `x-amazon-apigateway-policy`
:raises ValueError: If the conditional passed in does not match the allowed values.
"""
if not ip_list:
return
if not isinstance(ip_list, list):
ip_list = [ip_list]
if conditional not in ["IpAddress", "NotIpAddress"]:
raise ValueError("Conditional must be one of {}".format(["IpAddress", "NotIpAddress"]))
self.resource_policy["Version"] = "2012-10-17"
allow_statement = Py27Dict()
allow_statement["Effect"] = "Allow"
allow_statement["Action"] = "execute-api:Invoke"
allow_statement["Resource"] = resource_list
allow_statement["Principal"] = "*"
deny_statement = Py27Dict()
deny_statement["Effect"] = "Deny"
deny_statement["Action"] = "execute-api:Invoke"
deny_statement["Resource"] = resource_list
deny_statement["Principal"] = "*"
deny_statement["Condition"] = {conditional: {"aws:SourceIp": ip_list}}
if self.resource_policy.get("Statement") is None:
self.resource_policy["Statement"] = [allow_statement, deny_statement]
else:
statement = self.resource_policy["Statement"]
if not isinstance(statement, list):
statement = [statement]
if allow_statement not in statement:
statement.extend([allow_statement])
if deny_statement not in statement:
statement.extend([deny_statement])
self.resource_policy["Statement"] = statement
def _add_vpc_resource_policy_for_method(self, endpoint_dict, conditional, resource_list):
"""
This method generates a policy statement to grant/deny specific VPC/VPCE access to the API method and
appends it to the swagger under `x-amazon-apigateway-policy`
:raises ValueError: If the conditional passed in does not match the allowed values.
"""
if conditional not in ["StringNotEquals", "StringEquals"]:
raise ValueError("Conditional must be one of {}".format(["StringNotEquals", "StringEquals"]))
condition = Py27Dict()
string_endpoint_list = endpoint_dict.get("StringEndpointList")
intrinsic_vpc_endpoint_list = endpoint_dict.get("IntrinsicVpcList")
intrinsic_vpce_endpoint_list = endpoint_dict.get("IntrinsicVpceList")
if string_endpoint_list is not None:
vpce_regex = r"^vpce-"
vpc_regex = r"^vpc-"
vpc_list = []
vpce_list = []
for endpoint in string_endpoint_list:
if re.match(vpce_regex, endpoint):
vpce_list.append(endpoint)
if re.match(vpc_regex, endpoint):
vpc_list.append(endpoint)
if vpc_list:
condition.setdefault("aws:SourceVpc", []).extend(vpc_list)
if vpce_list:
condition.setdefault("aws:SourceVpce", []).extend(vpce_list)
if intrinsic_vpc_endpoint_list is not None:
condition.setdefault("aws:SourceVpc", []).extend(intrinsic_vpc_endpoint_list)
if intrinsic_vpce_endpoint_list is not None:
condition.setdefault("aws:SourceVpce", []).extend(intrinsic_vpce_endpoint_list)
if (not condition.get("aws:SourceVpc", [])) and (not condition.get("aws:SourceVpce", [])):
return
self.resource_policy["Version"] = "2012-10-17"
allow_statement = Py27Dict()
allow_statement["Effect"] = "Allow"
allow_statement["Action"] = "execute-api:Invoke"
allow_statement["Resource"] = resource_list
allow_statement["Principal"] = "*"
deny_statement = Py27Dict()
deny_statement["Effect"] = "Deny"
deny_statement["Action"] = "execute-api:Invoke"
deny_statement["Resource"] = resource_list
deny_statement["Principal"] = "*"
deny_statement["Condition"] = {conditional: condition}
if self.resource_policy.get("Statement") is None:
self.resource_policy["Statement"] = [allow_statement, deny_statement]
else:
statement = self.resource_policy["Statement"]
if not isinstance(statement, list):
statement = [statement]
if allow_statement not in statement:
statement.extend([allow_statement])
if deny_statement not in statement:
statement.extend([deny_statement])
self.resource_policy["Statement"] = statement
def _add_custom_statement(self, custom_statements):
if custom_statements is None:
return
self.resource_policy["Version"] = "2012-10-17"
if self.resource_policy.get("Statement") is None:
self.resource_policy["Statement"] = custom_statements
else:
if not isinstance(custom_statements, list):
custom_statements = [custom_statements]
statement = self.resource_policy["Statement"]
if not isinstance(statement, list):
statement = [statement]
for s in custom_statements:
if s not in statement:
statement.append(s)
self.resource_policy["Statement"] = statement
def add_request_parameters_to_method(self, path, method_name, request_parameters):
"""
Add Parameters to Swagger.
:param string path: Path name
:param string method_name: Method name
:param list request_parameters: Dictionary of Parameters
:return:
"""
normalized_method_name = self._normalize_method_name(method_name)
for method_definition in self.get_method_contents(self.get_path(path)[normalized_method_name]):
if not self.method_definition_has_integration(method_definition):
continue
existing_parameters = method_definition.get("parameters", [])
for request_parameter in request_parameters:
parameter_name = request_parameter["Name"]
location_name = parameter_name.replace("method.request.", "")
location, name = location_name.split(".", 1)
if location == "querystring":
location = "query"
# create parameter as py27 dict
# and insert keys one by one to preserve input orders
parameter = Py27Dict()
parameter["in"] = location
parameter["name"] = name
parameter["required"] = request_parameter["Required"]
parameter["type"] = "string"
existing_parameters.append(parameter)
if request_parameter["Caching"]:
integration = method_definition[self._X_APIGW_INTEGRATION]
cache_parameters = integration.get(self._CACHE_KEY_PARAMETERS, [])
cache_parameters.append(parameter_name)
integration[self._CACHE_KEY_PARAMETERS] = cache_parameters
method_definition["parameters"] = existing_parameters
@property
def swagger(self):
"""
Returns a **copy** of the Swagger document as a dictionary.
:return dict: Dictionary containing the Swagger document
"""
# Make sure any changes to the paths are reflected back in output
# iterate keys to make sure if "paths" is of Py27UniStr type, it won't be overriden as str
for key in self._doc.keys():
if key == "paths":
self._doc[key] = self.paths
if self.security_definitions:
self._doc["securityDefinitions"] = self.security_definitions
if self.gateway_responses:
self._doc[self._X_APIGW_GATEWAY_RESPONSES] = self.gateway_responses
if self.definitions:
self._doc["definitions"] = self.definitions
return copy.deepcopy(self._doc)
@staticmethod
def is_valid(data):
"""
Checks if the input data is a Swagger document
:param dict data: Data to be validated
:return: True, if data is a Swagger
"""
if bool(data) and isinstance(data, dict) and isinstance(data.get("paths"), dict):
if bool(data.get("swagger")):
return True
elif bool(data.get("openapi")):
return SwaggerEditor.safe_compare_regex_with_string(
SwaggerEditor.get_openapi_version_3_regex(), data["openapi"]
)
return False
@staticmethod
def validate_is_dict(obj, exception_message):
"""
Throws exception if obj is not a dict
:param obj: object being validated
:param exception_message: message to include in exception if obj is not a dict
"""
if not isinstance(obj, dict):
raise InvalidDocumentException([InvalidTemplateException(exception_message)])
@staticmethod
def validate_path_item_is_dict(path_item, path):
"""
Throws exception if path_item is not a dict
:param path_item: path_item (value at the path) being validated
:param path: path name
"""
SwaggerEditor.validate_is_dict(
path_item, "Value of '{}' path must be a dictionary according to Swagger spec.".format(path)
)
@staticmethod
def gen_skeleton():
"""
Method to make an empty swagger file, with just some basic structure. Just enough to pass validator.
:return dict: Dictionary of a skeleton swagger document
"""
skeleton = Py27Dict()
skeleton["swagger"] = "2.0"
skeleton["info"] = Py27Dict()
skeleton["info"]["version"] = "1.0"
skeleton["info"]["title"] = ref("AWS::StackName")
skeleton["paths"] = Py27Dict()
return skeleton
@staticmethod
def _get_authorization_scopes(authorizers, default_authorizer):
"""
Returns auth scopes for an authorizer if present
:param authorizers: authorizer definitions
:param default_authorizer: name of the default authorizer
"""
if authorizers is not None:
if (
authorizers.get(default_authorizer)
and authorizers[default_authorizer].get("AuthorizationScopes") is not None
):
return authorizers[default_authorizer].get("AuthorizationScopes")
return []
@staticmethod
def _normalize_method_name(method):
"""
Returns a lower case, normalized version of HTTP Method. It also know how to handle API Gateway specific methods
like "ANY"
NOTE: Always normalize before using the `method` value passed in as input
:param string method: Name of the HTTP Method
:return string: Normalized method name
"""
if not method or not isinstance(method, str):
return method
method = method.lower()
if method == "any":
return SwaggerEditor._X_ANY_METHOD
else:
return method
@staticmethod
def get_openapi_versions_supported_regex():
openapi_version_supported_regex = r"\A[2-3](\.\d)(\.\d)?$"
return openapi_version_supported_regex
@staticmethod
def get_openapi_version_3_regex():
openapi_version_3_regex = r"\A3(\.\d)(\.\d)?$"
return openapi_version_3_regex
@staticmethod
def safe_compare_regex_with_string(regex, data):
return re.match(regex, str(data)) is not None
@staticmethod
def get_path_without_trailing_slash(path):
sub = re.sub(r"{([a-zA-Z0-9._-]+|[a-zA-Z0-9._-]+\+|proxy\+)}", "*", path)
if isinstance(path, Py27UniStr):
return Py27UniStr(sub)
return sub
@staticmethod
def get_validator_name(validate_body, validate_parameters):
"""
Get a readable path name to use as validator name
:param boolean validate_body: Boolean if validate body
:param boolean validate_request: Boolean if validate request
:return string: Normalized validator name
"""
if validate_body and validate_parameters:
return "body-and-params"
if validate_body and not validate_parameters:
return "body-only"
if not validate_body and validate_parameters:
return "params-only"
return "no-validation"
@staticmethod
def _validate_list_property_is_resolved(property_list):
"""
Validate if the values of a Property List are all of type string
:param property_list: Value of a Property List
:return bool: True if the property_list is all of type string otherwise False
"""
if property_list is not None and not all(isinstance(x, str) for x in property_list):
return False
return True
| false | true |
f7f49ae0612ea6ce2c2d85e5627d99592aab56dd | 9,240 | py | Python | venv/lib/python3.9/site-packages/jupyter_client/threaded.py | CMU-IDS-2022/final-project-the-evaluators | 3b9262ad1a0f7315208a94a05ea1ce38e679d01d | [
"BSD-3-Clause"
] | null | null | null | venv/lib/python3.9/site-packages/jupyter_client/threaded.py | CMU-IDS-2022/final-project-the-evaluators | 3b9262ad1a0f7315208a94a05ea1ce38e679d01d | [
"BSD-3-Clause"
] | null | null | null | venv/lib/python3.9/site-packages/jupyter_client/threaded.py | CMU-IDS-2022/final-project-the-evaluators | 3b9262ad1a0f7315208a94a05ea1ce38e679d01d | [
"BSD-3-Clause"
] | null | null | null | """ Defines a KernelClient that provides thread-safe sockets with async callbacks on message
replies.
"""
import asyncio
import atexit
import errno
import time
from threading import Event
from threading import Thread
from typing import Any
from typing import Awaitable
from typing import Dict
from typing import List
from typing import Optional
from typing import Union
import zmq
from traitlets import Instance # type: ignore
from traitlets import Type
from zmq import ZMQError
from zmq.eventloop import ioloop
from zmq.eventloop import zmqstream
from .session import Session
from jupyter_client import KernelClient
from jupyter_client.channels import HBChannel
# Local imports
# import ZMQError in top-level namespace, to avoid ugly attribute-error messages
# during garbage collection of threads at exit
async def get_msg(msg: Awaitable) -> Union[List[bytes], List[zmq.Message]]:
return await msg
class ThreadedZMQSocketChannel(object):
"""A ZMQ socket invoking a callback in the ioloop"""
session = None
socket = None
ioloop = None
stream = None
_inspect = None
def __init__(
self,
socket: Optional[zmq.Socket],
session: Optional[Session],
loop: Optional[zmq.eventloop.ioloop.ZMQIOLoop],
) -> None:
"""Create a channel.
Parameters
----------
socket : :class:`zmq.Socket`
The ZMQ socket to use.
session : :class:`session.Session`
The session to use.
loop
A pyzmq ioloop to connect the socket to using a ZMQStream
"""
super().__init__()
self.socket = socket
self.session = session
self.ioloop = loop
evt = Event()
def setup_stream():
self.stream = zmqstream.ZMQStream(self.socket, self.ioloop)
self.stream.on_recv(self._handle_recv)
evt.set()
assert self.ioloop is not None
self.ioloop.add_callback(setup_stream)
evt.wait()
_is_alive = False
def is_alive(self) -> bool:
return self._is_alive
def start(self) -> None:
self._is_alive = True
def stop(self) -> None:
self._is_alive = False
def close(self) -> None:
if self.socket is not None:
try:
self.socket.close(linger=0)
except Exception:
pass
self.socket = None
def send(self, msg: Dict[str, Any]) -> None:
"""Queue a message to be sent from the IOLoop's thread.
Parameters
----------
msg : message to send
This is threadsafe, as it uses IOLoop.add_callback to give the loop's
thread control of the action.
"""
def thread_send():
self.session.send(self.stream, msg)
assert self.ioloop is not None
self.ioloop.add_callback(thread_send)
def _handle_recv(self, future_msg: Awaitable) -> None:
"""Callback for stream.on_recv.
Unpacks message, and calls handlers with it.
"""
assert self.ioloop is not None
msg_list = self.ioloop._asyncio_event_loop.run_until_complete(get_msg(future_msg))
assert self.session is not None
ident, smsg = self.session.feed_identities(msg_list)
msg = self.session.deserialize(smsg)
# let client inspect messages
if self._inspect:
self._inspect(msg)
self.call_handlers(msg)
def call_handlers(self, msg: Dict[str, Any]) -> None:
"""This method is called in the ioloop thread when a message arrives.
Subclasses should override this method to handle incoming messages.
It is important to remember that this method is called in the thread
so that some logic must be done to ensure that the application level
handlers are called in the application thread.
"""
pass
def process_events(self) -> None:
"""Subclasses should override this with a method
processing any pending GUI events.
"""
pass
def flush(self, timeout: float = 1.0) -> None:
"""Immediately processes all pending messages on this channel.
This is only used for the IOPub channel.
Callers should use this method to ensure that :meth:`call_handlers`
has been called for all messages that have been received on the
0MQ SUB socket of this channel.
This method is thread safe.
Parameters
----------
timeout : float, optional
The maximum amount of time to spend flushing, in seconds. The
default is one second.
"""
# We do the IOLoop callback process twice to ensure that the IOLoop
# gets to perform at least one full poll.
stop_time = time.time() + timeout
assert self.ioloop is not None
for _ in range(2):
self._flushed = False
self.ioloop.add_callback(self._flush)
while not self._flushed and time.time() < stop_time:
time.sleep(0.01)
def _flush(self) -> None:
"""Callback for :method:`self.flush`."""
assert self.stream is not None
self.stream.flush()
self._flushed = True
class IOLoopThread(Thread):
"""Run a pyzmq ioloop in a thread to send and receive messages"""
_exiting = False
ioloop = None
def __init__(self):
super().__init__()
self.daemon = True
@staticmethod
@atexit.register
def _notice_exit() -> None:
# Class definitions can be torn down during interpreter shutdown.
# We only need to set _exiting flag if this hasn't happened.
if IOLoopThread is not None:
IOLoopThread._exiting = True
def start(self) -> None:
"""Start the IOLoop thread
Don't return until self.ioloop is defined,
which is created in the thread
"""
self._start_event = Event()
Thread.start(self)
self._start_event.wait()
def run(self) -> None:
"""Run my loop, ignoring EINTR events in the poller"""
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
self.ioloop = ioloop.IOLoop()
self.ioloop._asyncio_event_loop = loop
# signal that self.ioloop is defined
self._start_event.set()
while True:
try:
self.ioloop.start()
except ZMQError as e:
if e.errno == errno.EINTR:
continue
else:
raise
except Exception:
if self._exiting:
break
else:
raise
else:
break
def stop(self) -> None:
"""Stop the channel's event loop and join its thread.
This calls :meth:`~threading.Thread.join` and returns when the thread
terminates. :class:`RuntimeError` will be raised if
:meth:`~threading.Thread.start` is called again.
"""
if self.ioloop is not None:
self.ioloop.add_callback(self.ioloop.stop)
self.join()
self.close()
self.ioloop = None
def close(self) -> None:
if self.ioloop is not None:
try:
self.ioloop.close(all_fds=True)
except Exception:
pass
class ThreadedKernelClient(KernelClient):
"""A KernelClient that provides thread-safe sockets with async callbacks on message replies."""
@property
def ioloop(self):
return self.ioloop_thread.ioloop
ioloop_thread = Instance(IOLoopThread, allow_none=True)
def start_channels(
self,
shell: bool = True,
iopub: bool = True,
stdin: bool = True,
hb: bool = True,
control: bool = True,
) -> None:
self.ioloop_thread = IOLoopThread()
self.ioloop_thread.start()
if shell:
self.shell_channel._inspect = self._check_kernel_info_reply
super().start_channels(shell, iopub, stdin, hb, control)
def _check_kernel_info_reply(self, msg: Dict[str, Any]) -> None:
"""This is run in the ioloop thread when the kernel info reply is received"""
if msg["msg_type"] == "kernel_info_reply":
self._handle_kernel_info_reply(msg)
self.shell_channel._inspect = None
def stop_channels(self) -> None:
super().stop_channels()
if self.ioloop_thread.is_alive():
self.ioloop_thread.stop()
iopub_channel_class = Type(ThreadedZMQSocketChannel)
shell_channel_class = Type(ThreadedZMQSocketChannel)
stdin_channel_class = Type(ThreadedZMQSocketChannel)
hb_channel_class = Type(HBChannel)
control_channel_class = Type(ThreadedZMQSocketChannel)
def is_alive(self) -> bool:
"""Is the kernel process still running?"""
if self._hb_channel is not None:
# We don't have access to the KernelManager,
# so we use the heartbeat.
return self._hb_channel.is_beating()
# no heartbeat and not local, we can't tell if it's running,
# so naively return True
return True
| 30.394737 | 99 | 0.616667 | import asyncio
import atexit
import errno
import time
from threading import Event
from threading import Thread
from typing import Any
from typing import Awaitable
from typing import Dict
from typing import List
from typing import Optional
from typing import Union
import zmq
from traitlets import Instance
from traitlets import Type
from zmq import ZMQError
from zmq.eventloop import ioloop
from zmq.eventloop import zmqstream
from .session import Session
from jupyter_client import KernelClient
from jupyter_client.channels import HBChannel
async def get_msg(msg: Awaitable) -> Union[List[bytes], List[zmq.Message]]:
return await msg
class ThreadedZMQSocketChannel(object):
session = None
socket = None
ioloop = None
stream = None
_inspect = None
def __init__(
self,
socket: Optional[zmq.Socket],
session: Optional[Session],
loop: Optional[zmq.eventloop.ioloop.ZMQIOLoop],
) -> None:
super().__init__()
self.socket = socket
self.session = session
self.ioloop = loop
evt = Event()
def setup_stream():
self.stream = zmqstream.ZMQStream(self.socket, self.ioloop)
self.stream.on_recv(self._handle_recv)
evt.set()
assert self.ioloop is not None
self.ioloop.add_callback(setup_stream)
evt.wait()
_is_alive = False
def is_alive(self) -> bool:
return self._is_alive
def start(self) -> None:
self._is_alive = True
def stop(self) -> None:
self._is_alive = False
def close(self) -> None:
if self.socket is not None:
try:
self.socket.close(linger=0)
except Exception:
pass
self.socket = None
def send(self, msg: Dict[str, Any]) -> None:
def thread_send():
self.session.send(self.stream, msg)
assert self.ioloop is not None
self.ioloop.add_callback(thread_send)
def _handle_recv(self, future_msg: Awaitable) -> None:
assert self.ioloop is not None
msg_list = self.ioloop._asyncio_event_loop.run_until_complete(get_msg(future_msg))
assert self.session is not None
ident, smsg = self.session.feed_identities(msg_list)
msg = self.session.deserialize(smsg)
if self._inspect:
self._inspect(msg)
self.call_handlers(msg)
def call_handlers(self, msg: Dict[str, Any]) -> None:
pass
def process_events(self) -> None:
pass
def flush(self, timeout: float = 1.0) -> None:
stop_time = time.time() + timeout
assert self.ioloop is not None
for _ in range(2):
self._flushed = False
self.ioloop.add_callback(self._flush)
while not self._flushed and time.time() < stop_time:
time.sleep(0.01)
def _flush(self) -> None:
assert self.stream is not None
self.stream.flush()
self._flushed = True
class IOLoopThread(Thread):
_exiting = False
ioloop = None
def __init__(self):
super().__init__()
self.daemon = True
@staticmethod
@atexit.register
def _notice_exit() -> None:
if IOLoopThread is not None:
IOLoopThread._exiting = True
def start(self) -> None:
self._start_event = Event()
Thread.start(self)
self._start_event.wait()
def run(self) -> None:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
self.ioloop = ioloop.IOLoop()
self.ioloop._asyncio_event_loop = loop
# signal that self.ioloop is defined
self._start_event.set()
while True:
try:
self.ioloop.start()
except ZMQError as e:
if e.errno == errno.EINTR:
continue
else:
raise
except Exception:
if self._exiting:
break
else:
raise
else:
break
def stop(self) -> None:
if self.ioloop is not None:
self.ioloop.add_callback(self.ioloop.stop)
self.join()
self.close()
self.ioloop = None
def close(self) -> None:
if self.ioloop is not None:
try:
self.ioloop.close(all_fds=True)
except Exception:
pass
class ThreadedKernelClient(KernelClient):
@property
def ioloop(self):
return self.ioloop_thread.ioloop
ioloop_thread = Instance(IOLoopThread, allow_none=True)
def start_channels(
self,
shell: bool = True,
iopub: bool = True,
stdin: bool = True,
hb: bool = True,
control: bool = True,
) -> None:
self.ioloop_thread = IOLoopThread()
self.ioloop_thread.start()
if shell:
self.shell_channel._inspect = self._check_kernel_info_reply
super().start_channels(shell, iopub, stdin, hb, control)
def _check_kernel_info_reply(self, msg: Dict[str, Any]) -> None:
if msg["msg_type"] == "kernel_info_reply":
self._handle_kernel_info_reply(msg)
self.shell_channel._inspect = None
def stop_channels(self) -> None:
super().stop_channels()
if self.ioloop_thread.is_alive():
self.ioloop_thread.stop()
iopub_channel_class = Type(ThreadedZMQSocketChannel)
shell_channel_class = Type(ThreadedZMQSocketChannel)
stdin_channel_class = Type(ThreadedZMQSocketChannel)
hb_channel_class = Type(HBChannel)
control_channel_class = Type(ThreadedZMQSocketChannel)
def is_alive(self) -> bool:
if self._hb_channel is not None:
# We don't have access to the KernelManager,
return self._hb_channel.is_beating()
return True
| true | true |
f7f49b05f568e898e2e49fcbc9f65349029a35c5 | 5,589 | py | Python | tests/python/gaia-ui-tests/gaiatest/tests/functional/ftu/test_ftu_with_tour.py | BReduardokramer/gaia | c00302cdcd435ab193e8365917cfc6abac9e4f2e | [
"Apache-2.0"
] | null | null | null | tests/python/gaia-ui-tests/gaiatest/tests/functional/ftu/test_ftu_with_tour.py | BReduardokramer/gaia | c00302cdcd435ab193e8365917cfc6abac9e4f2e | [
"Apache-2.0"
] | null | null | null | tests/python/gaia-ui-tests/gaiatest/tests/functional/ftu/test_ftu_with_tour.py | BReduardokramer/gaia | c00302cdcd435ab193e8365917cfc6abac9e4f2e | [
"Apache-2.0"
] | null | null | null | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from marionette.by import By
from gaiatest import GaiaTestCase
class TestFtu(GaiaTestCase):
_next_button_locator = (By.ID, 'forward')
_section_languages_locator = (By.ID, 'languages')
_section_cell_data_locator = (By.ID, 'data_3g')
_section_wifi_locator = (By.ID, 'wifi')
_found_wifi_networks_locator = (By.CSS_SELECTOR, 'ul#networks-list li')
_section_date_time_locator = (By.ID, 'date_and_time')
_section_geolocation_locator = (By.ID, 'geolocation')
_section_import_contacts_locator = (By.ID, 'import_contacts')
_section_ayr_locator = (By.ID, 'about-your-rights')
_section_welcome_browser_locator = (By.ID, 'welcome_browser')
_section_browser_privacy_locator = (By.ID, 'browser_privacy')
_section_finish_locator = (By.ID, 'finish-screen')
_take_tour_button_locator = (By.ID, 'lets-go-button')
# Section Tour
_step1_header_locator = (By.ID, 'step1Header')
_step2_header_locator = (By.ID, 'step2Header')
_step3_header_locator = (By.ID, 'step3Header')
_step4_header_locator = (By.ID, 'step4Header')
_tour_next_button_locator = (By.ID, 'forwardTutorial')
_tour_back_button_locator = (By.ID, 'backTutorial')
# Section Tutorial Finish
_section_tutorial_finish_locator = (By.ID, 'tutorialFinish')
_lets_go_button_locator = (By.ID, 'tutorialFinished')
def setUp(self):
GaiaTestCase.setUp(self)
# launch the First Time User app
self.app = self.apps.launch('FTU')
self.wait_for_condition(lambda m: self.data_layer.is_wifi_enabled)
def test_ftu_with_tour(self):
# Go through the FTU setup as quickly as possible to get to the Tour section
self.wait_for_element_displayed(*self._section_languages_locator)
# Tap next
self.marionette.find_element(*self._next_button_locator).tap()
self.wait_for_element_displayed(*self._section_cell_data_locator)
# Tap next
self.marionette.find_element(*self._next_button_locator).tap()
self.wait_for_element_displayed(*self._section_wifi_locator)
# The scanning for networks messes with the timing of taps
self.wait_for_condition(lambda m: len(m.find_elements(*self._found_wifi_networks_locator)) > 0)
# Tap next
self.marionette.find_element(*self._next_button_locator).tap()
self.wait_for_element_displayed(*self._section_date_time_locator)
# Tap next
self.marionette.find_element(*self._next_button_locator).tap()
self.wait_for_element_displayed(*self._section_geolocation_locator)
# Tap next
self.marionette.find_element(*self._next_button_locator).tap()
self.wait_for_element_displayed(*self._section_import_contacts_locator)
# Tap next
self.marionette.find_element(*self._next_button_locator).tap()
self.wait_for_element_displayed(*self._section_welcome_browser_locator)
# Tap next
self.marionette.find_element(*self._next_button_locator).tap()
self.wait_for_element_displayed(*self._section_browser_privacy_locator)
# Tap next
self.marionette.find_element(*self._next_button_locator).tap()
self.wait_for_element_displayed(*self._section_finish_locator)
# Take the tour
self.marionette.find_element(*self._take_tour_button_locator).tap()
# Walk through the tour
self.wait_for_element_displayed(*self._step1_header_locator)
self.assertEqual(self.marionette.find_element(*self._step1_header_locator).text,
"Swipe from right to left to browse your apps.")
# First time we see the next button we need to wait for it
self.wait_for_element_displayed(*self._tour_next_button_locator)
self.marionette.find_element(*self._tour_next_button_locator).tap()
self.wait_for_element_displayed(*self._step2_header_locator)
self.assertEqual(self.marionette.find_element(*self._step2_header_locator).text,
"Tap and hold on an icon to delete or move it.")
self.marionette.find_element(*self._tour_next_button_locator).tap()
self.wait_for_element_displayed(*self._step3_header_locator)
self.assertEqual(self.marionette.find_element(*self._step3_header_locator).text,
"Swipe down to access recent notifications, credit information and settings.")
self.marionette.find_element(*self._tour_next_button_locator).tap()
self.wait_for_element_displayed(*self._step4_header_locator)
self.assertEqual(self.marionette.find_element(*self._step4_header_locator).text,
"Tap and hold the home button to browse and close recent apps.")
# Try going back a step
self.marionette.find_element(*self._tour_back_button_locator).tap()
self.wait_for_element_displayed(*self._step3_header_locator)
self.marionette.find_element(*self._tour_next_button_locator).tap()
self.wait_for_element_displayed(*self._step4_header_locator)
self.marionette.find_element(*self._tour_next_button_locator).tap()
self.wait_for_element_displayed(*self._section_tutorial_finish_locator)
self.marionette.find_element(*self._lets_go_button_locator).tap()
# Switch back to top level now that FTU app is gone
self.marionette.switch_to_frame()
| 47.769231 | 103 | 0.724638 |
from marionette.by import By
from gaiatest import GaiaTestCase
class TestFtu(GaiaTestCase):
_next_button_locator = (By.ID, 'forward')
_section_languages_locator = (By.ID, 'languages')
_section_cell_data_locator = (By.ID, 'data_3g')
_section_wifi_locator = (By.ID, 'wifi')
_found_wifi_networks_locator = (By.CSS_SELECTOR, 'ul#networks-list li')
_section_date_time_locator = (By.ID, 'date_and_time')
_section_geolocation_locator = (By.ID, 'geolocation')
_section_import_contacts_locator = (By.ID, 'import_contacts')
_section_ayr_locator = (By.ID, 'about-your-rights')
_section_welcome_browser_locator = (By.ID, 'welcome_browser')
_section_browser_privacy_locator = (By.ID, 'browser_privacy')
_section_finish_locator = (By.ID, 'finish-screen')
_take_tour_button_locator = (By.ID, 'lets-go-button')
_step1_header_locator = (By.ID, 'step1Header')
_step2_header_locator = (By.ID, 'step2Header')
_step3_header_locator = (By.ID, 'step3Header')
_step4_header_locator = (By.ID, 'step4Header')
_tour_next_button_locator = (By.ID, 'forwardTutorial')
_tour_back_button_locator = (By.ID, 'backTutorial')
_section_tutorial_finish_locator = (By.ID, 'tutorialFinish')
_lets_go_button_locator = (By.ID, 'tutorialFinished')
def setUp(self):
GaiaTestCase.setUp(self)
self.app = self.apps.launch('FTU')
self.wait_for_condition(lambda m: self.data_layer.is_wifi_enabled)
def test_ftu_with_tour(self):
self.wait_for_element_displayed(*self._section_languages_locator)
self.marionette.find_element(*self._next_button_locator).tap()
self.wait_for_element_displayed(*self._section_cell_data_locator)
self.marionette.find_element(*self._next_button_locator).tap()
self.wait_for_element_displayed(*self._section_wifi_locator)
self.wait_for_condition(lambda m: len(m.find_elements(*self._found_wifi_networks_locator)) > 0)
self.marionette.find_element(*self._next_button_locator).tap()
self.wait_for_element_displayed(*self._section_date_time_locator)
self.marionette.find_element(*self._next_button_locator).tap()
self.wait_for_element_displayed(*self._section_geolocation_locator)
self.marionette.find_element(*self._next_button_locator).tap()
self.wait_for_element_displayed(*self._section_import_contacts_locator)
self.marionette.find_element(*self._next_button_locator).tap()
self.wait_for_element_displayed(*self._section_welcome_browser_locator)
self.marionette.find_element(*self._next_button_locator).tap()
self.wait_for_element_displayed(*self._section_browser_privacy_locator)
self.marionette.find_element(*self._next_button_locator).tap()
self.wait_for_element_displayed(*self._section_finish_locator)
self.marionette.find_element(*self._take_tour_button_locator).tap()
self.wait_for_element_displayed(*self._step1_header_locator)
self.assertEqual(self.marionette.find_element(*self._step1_header_locator).text,
"Swipe from right to left to browse your apps.")
self.wait_for_element_displayed(*self._tour_next_button_locator)
self.marionette.find_element(*self._tour_next_button_locator).tap()
self.wait_for_element_displayed(*self._step2_header_locator)
self.assertEqual(self.marionette.find_element(*self._step2_header_locator).text,
"Tap and hold on an icon to delete or move it.")
self.marionette.find_element(*self._tour_next_button_locator).tap()
self.wait_for_element_displayed(*self._step3_header_locator)
self.assertEqual(self.marionette.find_element(*self._step3_header_locator).text,
"Swipe down to access recent notifications, credit information and settings.")
self.marionette.find_element(*self._tour_next_button_locator).tap()
self.wait_for_element_displayed(*self._step4_header_locator)
self.assertEqual(self.marionette.find_element(*self._step4_header_locator).text,
"Tap and hold the home button to browse and close recent apps.")
self.marionette.find_element(*self._tour_back_button_locator).tap()
self.wait_for_element_displayed(*self._step3_header_locator)
self.marionette.find_element(*self._tour_next_button_locator).tap()
self.wait_for_element_displayed(*self._step4_header_locator)
self.marionette.find_element(*self._tour_next_button_locator).tap()
self.wait_for_element_displayed(*self._section_tutorial_finish_locator)
self.marionette.find_element(*self._lets_go_button_locator).tap()
self.marionette.switch_to_frame()
| true | true |
f7f49b6c543f031d1c2df30b1dd79d02325a9495 | 6,252 | py | Python | research/maskgan/losses/losses.py | jdavidagudelo/tensorflow-models | 6f019beec73b01861363bf717706e27f4210b979 | [
"Apache-2.0"
] | 1 | 2021-05-17T01:42:29.000Z | 2021-05-17T01:42:29.000Z | research/maskgan/losses/losses.py | jdavidagudelo/tensorflow-models | 6f019beec73b01861363bf717706e27f4210b979 | [
"Apache-2.0"
] | null | null | null | research/maskgan/losses/losses.py | jdavidagudelo/tensorflow-models | 6f019beec73b01861363bf717706e27f4210b979 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Losses for Generator and Discriminator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
def discriminator_loss(predictions, labels, missing_tokens):
"""Discriminator loss based on predictions and labels.
Args:
predictions: Discriminator linear predictions Tensor of shape [batch_size,
sequence_length]
labels: Labels for predictions, Tensor of shape [batch_size,
sequence_length]
missing_tokens: Indicator for the missing tokens. Evaluate the loss only
on the tokens that were missing.
Returns:
loss: Scalar tf.float32 loss.
"""
loss = tf.losses.sigmoid_cross_entropy(labels,
predictions,
weights=missing_tokens)
loss = tf.Print(
loss, [loss, labels, missing_tokens],
message='loss, labels, missing_tokens',
summarize=25,
first_n=25)
return loss
def cross_entropy_loss_matrix(gen_labels, gen_logits):
"""Computes the cross entropy loss for G.
Args:
gen_labels: Labels for the correct token.
gen_logits: Generator logits.
Returns:
loss_matrix: Loss matrix of shape [batch_size, sequence_length].
"""
cross_entropy_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=gen_labels, logits=gen_logits)
return cross_entropy_loss
def GAN_loss_matrix(dis_predictions):
"""Computes the cross entropy loss for G.
Args:
dis_predictions: Discriminator predictions.
Returns:
loss_matrix: Loss matrix of shape [batch_size, sequence_length].
"""
eps = tf.constant(1e-7, tf.float32)
gan_loss_matrix = -tf.log(dis_predictions + eps)
return gan_loss_matrix
def generator_GAN_loss(predictions):
"""Generator GAN loss based on Discriminator predictions."""
return -tf.log(tf.reduce_mean(predictions))
def generator_blended_forward_loss(gen_logits, gen_labels, dis_predictions,
is_real_input):
"""Computes the masked-loss for G. This will be a blend of cross-entropy
loss where the true label is known and GAN loss where the true label has been
masked.
Args:
gen_logits: Generator logits.
gen_labels: Labels for the correct token.
dis_predictions: Discriminator predictions.
is_real_input: Tensor indicating whether the label is present.
Returns:
loss: Scalar tf.float32 total loss.
"""
cross_entropy_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=gen_labels, logits=gen_logits)
gan_loss = -tf.log(dis_predictions)
loss_matrix = tf.where(is_real_input, cross_entropy_loss, gan_loss)
return tf.reduce_mean(loss_matrix)
def wasserstein_generator_loss(gen_logits, gen_labels, dis_values,
is_real_input):
"""Computes the masked-loss for G. This will be a blend of cross-entropy
loss where the true label is known and GAN loss where the true label is
missing.
Args:
gen_logits: Generator logits.
gen_labels: Labels for the correct token.
dis_values: Discriminator values Tensor of shape [batch_size,
sequence_length].
is_real_input: Tensor indicating whether the label is present.
Returns:
loss: Scalar tf.float32 total loss.
"""
cross_entropy_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=gen_labels, logits=gen_logits)
# Maximize the dis_values (minimize the negative)
gan_loss = -dis_values
loss_matrix = tf.where(is_real_input, cross_entropy_loss, gan_loss)
loss = tf.reduce_mean(loss_matrix)
return loss
def wasserstein_discriminator_loss(real_values, fake_values):
"""Wasserstein discriminator loss.
Args:
real_values: Value given by the Wasserstein Discriminator to real data.
fake_values: Value given by the Wasserstein Discriminator to fake data.
Returns:
loss: Scalar tf.float32 loss.
"""
real_avg = tf.reduce_mean(real_values)
fake_avg = tf.reduce_mean(fake_values)
wasserstein_loss = real_avg - fake_avg
return wasserstein_loss
def wasserstein_discriminator_loss_intrabatch(values, is_real_input):
"""Wasserstein discriminator loss. This is an odd variant where the value
difference is between the real tokens and the fake tokens within a single
batch.
Args:
values: Value given by the Wasserstein Discriminator of shape [batch_size,
sequence_length] to an imputed batch (real and fake).
is_real_input: tf.bool Tensor of shape [batch_size, sequence_length]. If
true, it indicates that the label is known.
Returns:
wasserstein_loss: Scalar tf.float32 loss.
"""
zero_tensor = tf.constant(0., dtype=tf.float32, shape=[])
present = tf.cast(is_real_input, tf.float32)
missing = tf.cast(1 - present, tf.float32)
# Counts for real and fake tokens.
real_count = tf.reduce_sum(present)
fake_count = tf.reduce_sum(missing)
# Averages for real and fake token values.
real = tf.mul(values, present)
fake = tf.mul(values, missing)
real_avg = tf.reduce_sum(real) / real_count
fake_avg = tf.reduce_sum(fake) / fake_count
# If there are no real or fake entries in the batch, we assign an average
# value of zero.
real_avg = tf.where(tf.equal(real_count, 0), zero_tensor, real_avg)
fake_avg = tf.where(tf.equal(fake_count, 0), zero_tensor, fake_avg)
wasserstein_loss = real_avg - fake_avg
return wasserstein_loss
| 33.433155 | 80 | 0.709853 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
def discriminator_loss(predictions, labels, missing_tokens):
loss = tf.losses.sigmoid_cross_entropy(labels,
predictions,
weights=missing_tokens)
loss = tf.Print(
loss, [loss, labels, missing_tokens],
message='loss, labels, missing_tokens',
summarize=25,
first_n=25)
return loss
def cross_entropy_loss_matrix(gen_labels, gen_logits):
cross_entropy_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=gen_labels, logits=gen_logits)
return cross_entropy_loss
def GAN_loss_matrix(dis_predictions):
eps = tf.constant(1e-7, tf.float32)
gan_loss_matrix = -tf.log(dis_predictions + eps)
return gan_loss_matrix
def generator_GAN_loss(predictions):
return -tf.log(tf.reduce_mean(predictions))
def generator_blended_forward_loss(gen_logits, gen_labels, dis_predictions,
is_real_input):
cross_entropy_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=gen_labels, logits=gen_logits)
gan_loss = -tf.log(dis_predictions)
loss_matrix = tf.where(is_real_input, cross_entropy_loss, gan_loss)
return tf.reduce_mean(loss_matrix)
def wasserstein_generator_loss(gen_logits, gen_labels, dis_values,
is_real_input):
cross_entropy_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=gen_labels, logits=gen_logits)
gan_loss = -dis_values
loss_matrix = tf.where(is_real_input, cross_entropy_loss, gan_loss)
loss = tf.reduce_mean(loss_matrix)
return loss
def wasserstein_discriminator_loss(real_values, fake_values):
real_avg = tf.reduce_mean(real_values)
fake_avg = tf.reduce_mean(fake_values)
wasserstein_loss = real_avg - fake_avg
return wasserstein_loss
def wasserstein_discriminator_loss_intrabatch(values, is_real_input):
zero_tensor = tf.constant(0., dtype=tf.float32, shape=[])
present = tf.cast(is_real_input, tf.float32)
missing = tf.cast(1 - present, tf.float32)
real_count = tf.reduce_sum(present)
fake_count = tf.reduce_sum(missing)
real = tf.mul(values, present)
fake = tf.mul(values, missing)
real_avg = tf.reduce_sum(real) / real_count
fake_avg = tf.reduce_sum(fake) / fake_count
real_avg = tf.where(tf.equal(real_count, 0), zero_tensor, real_avg)
fake_avg = tf.where(tf.equal(fake_count, 0), zero_tensor, fake_avg)
wasserstein_loss = real_avg - fake_avg
return wasserstein_loss
| true | true |
f7f49d47138b60513d496a49ccdf62a4af486eeb | 11,164 | py | Python | tests/attacks/evasion/test_dpatch_robust.py | monshri/adversarial-robustness-toolbox | 6465240cb6a71bc376dae52459a7133e403df8d2 | [
"MIT"
] | 1,350 | 2020-07-14T08:06:55.000Z | 2022-03-31T19:22:25.000Z | tests/attacks/evasion/test_dpatch_robust.py | monshri/adversarial-robustness-toolbox | 6465240cb6a71bc376dae52459a7133e403df8d2 | [
"MIT"
] | 936 | 2020-07-14T03:33:00.000Z | 2022-03-31T23:05:29.000Z | tests/attacks/evasion/test_dpatch_robust.py | monshri/adversarial-robustness-toolbox | 6465240cb6a71bc376dae52459a7133e403df8d2 | [
"MIT"
] | 413 | 2020-07-16T16:00:16.000Z | 2022-03-29T10:31:12.000Z | # MIT License
#
# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2020
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
import numpy as np
import pytest
from art.attacks.evasion import RobustDPatch
from art.estimators.estimator import BaseEstimator, LossGradientsMixin
from art.estimators.object_detection.object_detector import ObjectDetectorMixin
from tests.attacks.utils import backend_test_classifier_type_check_fail
from tests.utils import ARTTestException
logger = logging.getLogger(__name__)
@pytest.fixture()
def fix_get_mnist_subset(get_mnist_dataset):
(x_train_mnist, y_train_mnist), (x_test_mnist, y_test_mnist) = get_mnist_dataset
n_train = 10
n_test = 10
yield x_train_mnist[:n_train], y_train_mnist[:n_train], x_test_mnist[:n_test], y_test_mnist[:n_test]
@pytest.mark.skip_framework("keras", "scikitlearn", "mxnet", "kerastf")
def test_generate(art_warning, fix_get_mnist_subset, fix_get_rcnn, framework):
try:
(_, _, x_test_mnist, y_test_mnist) = fix_get_mnist_subset
if framework == "pytorch":
x_test_mnist = np.transpose(x_test_mnist, (0, 2, 3, 1))
frcnn = fix_get_rcnn
attack = RobustDPatch(
frcnn,
patch_shape=(4, 4, 1),
patch_location=(2, 2),
crop_range=(0, 0),
brightness_range=(1.0, 1.0),
rotation_weights=(0.25, 0.25, 0.25, 0.25),
sample_size=1,
learning_rate=1.0,
max_iter=1,
batch_size=1,
verbose=False,
)
patch = attack.generate(x=x_test_mnist)
assert patch.shape == (4, 4, 1)
with pytest.raises(ValueError):
_ = attack.generate(x=np.repeat(x_test_mnist, axis=3, repeats=2))
with pytest.raises(ValueError):
_ = attack.generate(x=x_test_mnist, y=y_test_mnist)
except ARTTestException as e:
art_warning(e)
@pytest.mark.skip_framework("keras", "scikitlearn", "mxnet", "kerastf")
def test_generate_targeted(art_warning, fix_get_mnist_subset, fix_get_rcnn, framework):
try:
(_, _, x_test_mnist, _) = fix_get_mnist_subset
if framework == "pytorch":
x_test_mnist = np.transpose(x_test_mnist, (0, 2, 3, 1))
frcnn = fix_get_rcnn
attack = RobustDPatch(
frcnn,
patch_shape=(4, 4, 1),
patch_location=(2, 2),
crop_range=(0, 0),
brightness_range=(1.0, 1.0),
rotation_weights=(1, 0, 0, 0),
sample_size=1,
learning_rate=1.0,
max_iter=1,
batch_size=1,
targeted=True,
verbose=False,
)
y = frcnn.predict(x_test_mnist)
patch = attack.generate(x=x_test_mnist, y=y)
assert patch.shape == (4, 4, 1)
with pytest.raises(ValueError):
_ = attack.generate(x=x_test_mnist, y=None)
except ARTTestException as e:
art_warning(e)
@pytest.mark.parametrize("image_format", ["NHWC", "NCHW"])
@pytest.mark.skip_framework("keras", "scikitlearn", "mxnet", "kerastf")
def test_augment_images_with_patch(art_warning, image_format, fix_get_rcnn):
try:
frcnn = fix_get_rcnn
attack = RobustDPatch(
frcnn,
patch_shape=(4, 4, 1),
patch_location=(2, 2),
crop_range=(0, 0),
brightness_range=(1.0, 1.0),
rotation_weights=(1, 0, 0, 0),
sample_size=1,
learning_rate=1.0,
max_iter=1,
batch_size=1,
verbose=False,
)
if image_format == "NHWC":
patch = np.ones(shape=(4, 4, 1))
x = np.zeros(shape=(1, 10, 10, 1))
channels_first = False
elif image_format == "NCHW":
patch = np.ones(shape=(1, 4, 4))
x = np.zeros(shape=(1, 1, 10, 10))
channels_first = True
patched_images, _, transformations = attack._augment_images_with_patch(
x=x, y=None, patch=patch, channels_first=channels_first
)
transformation_expected = {"crop_x": 0, "crop_y": 0, "rot90": 0, "brightness": 1.0}
patch_sum_expected = 16.0
complement_sum_expected = 0.0
if image_format == "NHWC":
patch_sum = np.sum(patched_images[0, 2:7, 2:7, :])
elif image_format == "NCHW":
patch_sum = np.sum(patched_images[0, :, 2:7, 2:7])
complement_sum = np.sum(patched_images[0]) - patch_sum
assert transformations == transformation_expected
assert patch_sum == patch_sum_expected
assert complement_sum == complement_sum_expected
except ARTTestException as e:
art_warning(e)
@pytest.mark.skip_framework("keras", "scikitlearn", "mxnet", "kerastf")
def test_apply_patch(art_warning, fix_get_rcnn):
try:
frcnn = fix_get_rcnn
attack = RobustDPatch(
frcnn,
patch_shape=(4, 4, 1),
patch_location=(2, 2),
crop_range=(0, 0),
brightness_range=(1.0, 1.0),
rotation_weights=(1, 0, 0, 0),
sample_size=1,
learning_rate=1.0,
max_iter=1,
batch_size=1,
verbose=False,
)
patch = np.ones(shape=(4, 4, 1))
x = np.zeros(shape=(1, 10, 10, 1))
patched_images = attack.apply_patch(x=x, patch_external=patch)
patch_sum_expected = 16.0
complement_sum_expected = 0.0
patch_sum = np.sum(patched_images[0, 2:7, 2:7, :])
complement_sum = np.sum(patched_images[0]) - patch_sum
assert patch_sum == patch_sum_expected
assert complement_sum == complement_sum_expected
except ARTTestException as e:
art_warning(e)
@pytest.mark.skip_framework("keras", "scikitlearn", "mxnet", "kerastf")
def test_untransform_gradients(art_warning, fix_get_rcnn):
try:
frcnn = fix_get_rcnn
attack = RobustDPatch(
frcnn,
patch_shape=(4, 4, 1),
patch_location=(2, 2),
crop_range=(0, 0),
brightness_range=(1.0, 1.0),
rotation_weights=(1, 0, 0, 0),
sample_size=1,
learning_rate=1.0,
max_iter=1,
batch_size=1,
verbose=False,
)
gradients = np.zeros(shape=(1, 10, 10, 1))
gradients[:, 2:7, 2:7, :] = 1
crop_x = 1
crop_y = 1
rot90 = 3
brightness = 1.0
gradients = gradients[:, crop_x : 10 - crop_x, crop_y : 10 - crop_y, :]
gradients = np.rot90(gradients, rot90, (1, 2))
transforms = {"crop_x": crop_x, "crop_y": crop_y, "rot90": rot90, "brightness": brightness}
gradients = attack._untransform_gradients(gradients=gradients, transforms=transforms, channels_first=False)
gradients_sum = np.sum(gradients[0])
gradients_sum_expected = 16.0
assert gradients_sum == gradients_sum_expected
except ARTTestException as e:
art_warning(e)
@pytest.mark.framework_agnostic
def test_check_params(art_warning, fix_get_rcnn):
try:
frcnn = fix_get_rcnn
# with pytest.raises(TypeError):
# _ = RobustDPatch(frcnn, patch_shape=(1.0, 2.0, 3.0))
# with pytest.raises(ValueError):
# _ = RobustDPatch(frcnn, patch_shape=(1, 2, 3, 4))
with pytest.raises(ValueError):
_ = RobustDPatch(frcnn, learning_rate=1)
with pytest.raises(ValueError):
_ = RobustDPatch(frcnn, learning_rate=-1.0)
with pytest.raises(ValueError):
_ = RobustDPatch(frcnn, max_iter=1.0)
with pytest.raises(ValueError):
_ = RobustDPatch(frcnn, max_iter=-1)
with pytest.raises(ValueError):
_ = RobustDPatch(frcnn, batch_size=1.0)
with pytest.raises(ValueError):
_ = RobustDPatch(frcnn, batch_size=-1)
with pytest.raises(ValueError):
_ = RobustDPatch(frcnn, verbose="true")
with pytest.raises(ValueError):
_ = RobustDPatch(frcnn, patch_location="true")
with pytest.raises(ValueError):
_ = RobustDPatch(frcnn, patch_location=(1, 2, 3))
with pytest.raises(ValueError):
_ = RobustDPatch(frcnn, crop_range="true")
with pytest.raises(ValueError):
_ = RobustDPatch(frcnn, crop_range=(1, 2, 3))
with pytest.raises(ValueError):
_ = RobustDPatch(frcnn, crop_range=(2, 1))
with pytest.raises(ValueError):
_ = RobustDPatch(frcnn, patch_location=(0, 1), crop_range=(1, 2))
with pytest.raises(ValueError):
_ = RobustDPatch(frcnn, brightness_range=(1, 2, 3))
with pytest.raises(ValueError):
_ = RobustDPatch(frcnn, brightness_range=(1.0, 2.0, 3.0))
with pytest.raises(ValueError):
_ = RobustDPatch(frcnn, brightness_range=(-1.0, 1.0))
with pytest.raises(ValueError):
_ = RobustDPatch(frcnn, brightness_range=(2.0, 1.0))
with pytest.raises(ValueError):
_ = RobustDPatch(frcnn, rotation_weights=("1", "2", "3"))
with pytest.raises(ValueError):
_ = RobustDPatch(frcnn, rotation_weights=(1, 2, 3))
with pytest.raises(ValueError):
_ = RobustDPatch(frcnn, rotation_weights=(-1, -2, -3, -4))
with pytest.raises(ValueError):
_ = RobustDPatch(frcnn, rotation_weights=(0.0, 0.0, 0.0, 0.0))
with pytest.raises(ValueError):
_ = RobustDPatch(frcnn, sample_size=1.0)
with pytest.raises(ValueError):
_ = RobustDPatch(frcnn, sample_size=-1)
with pytest.raises(ValueError):
_ = RobustDPatch(frcnn, targeted="true")
except ARTTestException as e:
art_warning(e)
@pytest.mark.framework_agnostic
def test_classifier_type_check_fail(art_warning):
try:
backend_test_classifier_type_check_fail(RobustDPatch, [BaseEstimator, LossGradientsMixin, ObjectDetectorMixin])
except ARTTestException as e:
art_warning(e)
| 34.996865 | 120 | 0.620924 |
import logging
import numpy as np
import pytest
from art.attacks.evasion import RobustDPatch
from art.estimators.estimator import BaseEstimator, LossGradientsMixin
from art.estimators.object_detection.object_detector import ObjectDetectorMixin
from tests.attacks.utils import backend_test_classifier_type_check_fail
from tests.utils import ARTTestException
logger = logging.getLogger(__name__)
@pytest.fixture()
def fix_get_mnist_subset(get_mnist_dataset):
(x_train_mnist, y_train_mnist), (x_test_mnist, y_test_mnist) = get_mnist_dataset
n_train = 10
n_test = 10
yield x_train_mnist[:n_train], y_train_mnist[:n_train], x_test_mnist[:n_test], y_test_mnist[:n_test]
@pytest.mark.skip_framework("keras", "scikitlearn", "mxnet", "kerastf")
def test_generate(art_warning, fix_get_mnist_subset, fix_get_rcnn, framework):
try:
(_, _, x_test_mnist, y_test_mnist) = fix_get_mnist_subset
if framework == "pytorch":
x_test_mnist = np.transpose(x_test_mnist, (0, 2, 3, 1))
frcnn = fix_get_rcnn
attack = RobustDPatch(
frcnn,
patch_shape=(4, 4, 1),
patch_location=(2, 2),
crop_range=(0, 0),
brightness_range=(1.0, 1.0),
rotation_weights=(0.25, 0.25, 0.25, 0.25),
sample_size=1,
learning_rate=1.0,
max_iter=1,
batch_size=1,
verbose=False,
)
patch = attack.generate(x=x_test_mnist)
assert patch.shape == (4, 4, 1)
with pytest.raises(ValueError):
_ = attack.generate(x=np.repeat(x_test_mnist, axis=3, repeats=2))
with pytest.raises(ValueError):
_ = attack.generate(x=x_test_mnist, y=y_test_mnist)
except ARTTestException as e:
art_warning(e)
@pytest.mark.skip_framework("keras", "scikitlearn", "mxnet", "kerastf")
def test_generate_targeted(art_warning, fix_get_mnist_subset, fix_get_rcnn, framework):
try:
(_, _, x_test_mnist, _) = fix_get_mnist_subset
if framework == "pytorch":
x_test_mnist = np.transpose(x_test_mnist, (0, 2, 3, 1))
frcnn = fix_get_rcnn
attack = RobustDPatch(
frcnn,
patch_shape=(4, 4, 1),
patch_location=(2, 2),
crop_range=(0, 0),
brightness_range=(1.0, 1.0),
rotation_weights=(1, 0, 0, 0),
sample_size=1,
learning_rate=1.0,
max_iter=1,
batch_size=1,
targeted=True,
verbose=False,
)
y = frcnn.predict(x_test_mnist)
patch = attack.generate(x=x_test_mnist, y=y)
assert patch.shape == (4, 4, 1)
with pytest.raises(ValueError):
_ = attack.generate(x=x_test_mnist, y=None)
except ARTTestException as e:
art_warning(e)
@pytest.mark.parametrize("image_format", ["NHWC", "NCHW"])
@pytest.mark.skip_framework("keras", "scikitlearn", "mxnet", "kerastf")
def test_augment_images_with_patch(art_warning, image_format, fix_get_rcnn):
try:
frcnn = fix_get_rcnn
attack = RobustDPatch(
frcnn,
patch_shape=(4, 4, 1),
patch_location=(2, 2),
crop_range=(0, 0),
brightness_range=(1.0, 1.0),
rotation_weights=(1, 0, 0, 0),
sample_size=1,
learning_rate=1.0,
max_iter=1,
batch_size=1,
verbose=False,
)
if image_format == "NHWC":
patch = np.ones(shape=(4, 4, 1))
x = np.zeros(shape=(1, 10, 10, 1))
channels_first = False
elif image_format == "NCHW":
patch = np.ones(shape=(1, 4, 4))
x = np.zeros(shape=(1, 1, 10, 10))
channels_first = True
patched_images, _, transformations = attack._augment_images_with_patch(
x=x, y=None, patch=patch, channels_first=channels_first
)
transformation_expected = {"crop_x": 0, "crop_y": 0, "rot90": 0, "brightness": 1.0}
patch_sum_expected = 16.0
complement_sum_expected = 0.0
if image_format == "NHWC":
patch_sum = np.sum(patched_images[0, 2:7, 2:7, :])
elif image_format == "NCHW":
patch_sum = np.sum(patched_images[0, :, 2:7, 2:7])
complement_sum = np.sum(patched_images[0]) - patch_sum
assert transformations == transformation_expected
assert patch_sum == patch_sum_expected
assert complement_sum == complement_sum_expected
except ARTTestException as e:
art_warning(e)
@pytest.mark.skip_framework("keras", "scikitlearn", "mxnet", "kerastf")
def test_apply_patch(art_warning, fix_get_rcnn):
try:
frcnn = fix_get_rcnn
attack = RobustDPatch(
frcnn,
patch_shape=(4, 4, 1),
patch_location=(2, 2),
crop_range=(0, 0),
brightness_range=(1.0, 1.0),
rotation_weights=(1, 0, 0, 0),
sample_size=1,
learning_rate=1.0,
max_iter=1,
batch_size=1,
verbose=False,
)
patch = np.ones(shape=(4, 4, 1))
x = np.zeros(shape=(1, 10, 10, 1))
patched_images = attack.apply_patch(x=x, patch_external=patch)
patch_sum_expected = 16.0
complement_sum_expected = 0.0
patch_sum = np.sum(patched_images[0, 2:7, 2:7, :])
complement_sum = np.sum(patched_images[0]) - patch_sum
assert patch_sum == patch_sum_expected
assert complement_sum == complement_sum_expected
except ARTTestException as e:
art_warning(e)
@pytest.mark.skip_framework("keras", "scikitlearn", "mxnet", "kerastf")
def test_untransform_gradients(art_warning, fix_get_rcnn):
try:
frcnn = fix_get_rcnn
attack = RobustDPatch(
frcnn,
patch_shape=(4, 4, 1),
patch_location=(2, 2),
crop_range=(0, 0),
brightness_range=(1.0, 1.0),
rotation_weights=(1, 0, 0, 0),
sample_size=1,
learning_rate=1.0,
max_iter=1,
batch_size=1,
verbose=False,
)
gradients = np.zeros(shape=(1, 10, 10, 1))
gradients[:, 2:7, 2:7, :] = 1
crop_x = 1
crop_y = 1
rot90 = 3
brightness = 1.0
gradients = gradients[:, crop_x : 10 - crop_x, crop_y : 10 - crop_y, :]
gradients = np.rot90(gradients, rot90, (1, 2))
transforms = {"crop_x": crop_x, "crop_y": crop_y, "rot90": rot90, "brightness": brightness}
gradients = attack._untransform_gradients(gradients=gradients, transforms=transforms, channels_first=False)
gradients_sum = np.sum(gradients[0])
gradients_sum_expected = 16.0
assert gradients_sum == gradients_sum_expected
except ARTTestException as e:
art_warning(e)
@pytest.mark.framework_agnostic
def test_check_params(art_warning, fix_get_rcnn):
try:
frcnn = fix_get_rcnn
with pytest.raises(ValueError):
_ = RobustDPatch(frcnn, learning_rate=1)
with pytest.raises(ValueError):
_ = RobustDPatch(frcnn, learning_rate=-1.0)
with pytest.raises(ValueError):
_ = RobustDPatch(frcnn, max_iter=1.0)
with pytest.raises(ValueError):
_ = RobustDPatch(frcnn, max_iter=-1)
with pytest.raises(ValueError):
_ = RobustDPatch(frcnn, batch_size=1.0)
with pytest.raises(ValueError):
_ = RobustDPatch(frcnn, batch_size=-1)
with pytest.raises(ValueError):
_ = RobustDPatch(frcnn, verbose="true")
with pytest.raises(ValueError):
_ = RobustDPatch(frcnn, patch_location="true")
with pytest.raises(ValueError):
_ = RobustDPatch(frcnn, patch_location=(1, 2, 3))
with pytest.raises(ValueError):
_ = RobustDPatch(frcnn, crop_range="true")
with pytest.raises(ValueError):
_ = RobustDPatch(frcnn, crop_range=(1, 2, 3))
with pytest.raises(ValueError):
_ = RobustDPatch(frcnn, crop_range=(2, 1))
with pytest.raises(ValueError):
_ = RobustDPatch(frcnn, patch_location=(0, 1), crop_range=(1, 2))
with pytest.raises(ValueError):
_ = RobustDPatch(frcnn, brightness_range=(1, 2, 3))
with pytest.raises(ValueError):
_ = RobustDPatch(frcnn, brightness_range=(1.0, 2.0, 3.0))
with pytest.raises(ValueError):
_ = RobustDPatch(frcnn, brightness_range=(-1.0, 1.0))
with pytest.raises(ValueError):
_ = RobustDPatch(frcnn, brightness_range=(2.0, 1.0))
with pytest.raises(ValueError):
_ = RobustDPatch(frcnn, rotation_weights=("1", "2", "3"))
with pytest.raises(ValueError):
_ = RobustDPatch(frcnn, rotation_weights=(1, 2, 3))
with pytest.raises(ValueError):
_ = RobustDPatch(frcnn, rotation_weights=(-1, -2, -3, -4))
with pytest.raises(ValueError):
_ = RobustDPatch(frcnn, rotation_weights=(0.0, 0.0, 0.0, 0.0))
with pytest.raises(ValueError):
_ = RobustDPatch(frcnn, sample_size=1.0)
with pytest.raises(ValueError):
_ = RobustDPatch(frcnn, sample_size=-1)
with pytest.raises(ValueError):
_ = RobustDPatch(frcnn, targeted="true")
except ARTTestException as e:
art_warning(e)
@pytest.mark.framework_agnostic
def test_classifier_type_check_fail(art_warning):
try:
backend_test_classifier_type_check_fail(RobustDPatch, [BaseEstimator, LossGradientsMixin, ObjectDetectorMixin])
except ARTTestException as e:
art_warning(e)
| true | true |
f7f49e7f3d8753122ece7ea1d03f1e16f8545ebd | 3,120 | py | Python | dataset.py | bklppr/yolo2_onnx | fcb85bd94e22c1c47f20fc13bb6ae3ac1ccd10f4 | [
"MIT"
] | null | null | null | dataset.py | bklppr/yolo2_onnx | fcb85bd94e22c1c47f20fc13bb6ae3ac1ccd10f4 | [
"MIT"
] | null | null | null | dataset.py | bklppr/yolo2_onnx | fcb85bd94e22c1c47f20fc13bb6ae3ac1ccd10f4 | [
"MIT"
] | 1 | 2018-07-11T22:44:11.000Z | 2018-07-11T22:44:11.000Z | #!/usr/bin/python
# encoding: utf-8
import os
import random
import torch
import numpy as np
from torch.utils.data import Dataset
from PIL import Image
from utils import read_truths_args, read_truths
from image import *
class listDataset(Dataset):
def __init__(self, root, shape=None, shuffle=True, transform=None, target_transform=None, train=False, seen=0, batch_size=8, num_workers=2):
with open(root, 'r') as file:
self.lines = file.readlines()
if shuffle:
random.shuffle(self.lines)
self.nSamples = len(self.lines)
self.transform = transform
self.target_transform = target_transform
self.train = train
self.shape = shape
self.seen = seen
self.batch_size = batch_size
self.num_workers = num_workers
def __len__(self):
return self.nSamples
def __getitem__(self, index):
assert index <= len(self), 'index range error'
imgpath = self.lines[index].rstrip()
if self.train and index % 64== 0:
if self.seen < 4000*64:
width = 13*32
self.shape = (width, width)
elif self.seen < 8000*64:
width = (random.randint(0,3) + 13)*32
self.shape = (width, width)
elif self.seen < 12000*64:
width = (random.randint(0,5) + 12)*32
self.shape = (width, width)
elif self.seen < 16000*64:
width = (random.randint(0,7) + 11)*32
self.shape = (width, width)
else: # self.seen < 20000*64:
width = (random.randint(0,9) + 10)*32
self.shape = (width, width)
if self.train:
jitter = 0.2
hue = 0.1
saturation = 1.5
exposure = 1.5
img, label = load_data_detection(imgpath, self.shape, jitter, hue, saturation, exposure)
label = torch.from_numpy(label)
else:
img = Image.open(imgpath).convert('RGB')
if self.shape:
img = img.resize(self.shape)
labpath = imgpath.replace('images', 'labels').replace('JPEGImages', 'labels').replace('.jpg', '.txt').replace('.png','.txt')
label = torch.zeros(50*5)
#if os.path.getsize(labpath):
#tmp = torch.from_numpy(np.loadtxt(labpath))
try:
tmp = torch.from_numpy(read_truths_args(labpath, 8.0/img.width).astype('float32'))
except Exception:
tmp = torch.zeros(1,5)
#tmp = torch.from_numpy(read_truths(labpath))
tmp = tmp.view(-1)
tsz = tmp.numel()
#print('labpath = %s , tsz = %d' % (labpath, tsz))
if tsz > 50*5:
label = tmp[0:50*5]
elif tsz > 0:
label[0:tsz] = tmp
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
label = self.target_transform(label)
self.seen = self.seen + self.num_workers
return (img, label)
| 33.548387 | 144 | 0.549359 |
import os
import random
import torch
import numpy as np
from torch.utils.data import Dataset
from PIL import Image
from utils import read_truths_args, read_truths
from image import *
class listDataset(Dataset):
def __init__(self, root, shape=None, shuffle=True, transform=None, target_transform=None, train=False, seen=0, batch_size=8, num_workers=2):
with open(root, 'r') as file:
self.lines = file.readlines()
if shuffle:
random.shuffle(self.lines)
self.nSamples = len(self.lines)
self.transform = transform
self.target_transform = target_transform
self.train = train
self.shape = shape
self.seen = seen
self.batch_size = batch_size
self.num_workers = num_workers
def __len__(self):
return self.nSamples
def __getitem__(self, index):
assert index <= len(self), 'index range error'
imgpath = self.lines[index].rstrip()
if self.train and index % 64== 0:
if self.seen < 4000*64:
width = 13*32
self.shape = (width, width)
elif self.seen < 8000*64:
width = (random.randint(0,3) + 13)*32
self.shape = (width, width)
elif self.seen < 12000*64:
width = (random.randint(0,5) + 12)*32
self.shape = (width, width)
elif self.seen < 16000*64:
width = (random.randint(0,7) + 11)*32
self.shape = (width, width)
else:
width = (random.randint(0,9) + 10)*32
self.shape = (width, width)
if self.train:
jitter = 0.2
hue = 0.1
saturation = 1.5
exposure = 1.5
img, label = load_data_detection(imgpath, self.shape, jitter, hue, saturation, exposure)
label = torch.from_numpy(label)
else:
img = Image.open(imgpath).convert('RGB')
if self.shape:
img = img.resize(self.shape)
labpath = imgpath.replace('images', 'labels').replace('JPEGImages', 'labels').replace('.jpg', '.txt').replace('.png','.txt')
label = torch.zeros(50*5)
try:
tmp = torch.from_numpy(read_truths_args(labpath, 8.0/img.width).astype('float32'))
except Exception:
tmp = torch.zeros(1,5)
tmp = tmp.view(-1)
tsz = tmp.numel()
if tsz > 50*5:
label = tmp[0:50*5]
elif tsz > 0:
label[0:tsz] = tmp
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
label = self.target_transform(label)
self.seen = self.seen + self.num_workers
return (img, label)
| true | true |
f7f4a0858fe848911aa0c1a54269a299d7536d5a | 2,021 | py | Python | tests/backends/django/testapp/migrations/0001_initial.py | bitner/pygeofilter | 140aee2f3197044cc18dd111c71e2fcdc516a200 | [
"MIT"
] | 19 | 2021-03-30T18:18:10.000Z | 2022-03-23T13:53:55.000Z | tests/backends/django/testapp/migrations/0001_initial.py | bitner/pygeofilter | 140aee2f3197044cc18dd111c71e2fcdc516a200 | [
"MIT"
] | 20 | 2021-04-25T10:32:41.000Z | 2022-01-21T10:48:30.000Z | tests/backends/django/testapp/migrations/0001_initial.py | bitner/pygeofilter | 140aee2f3197044cc18dd111c71e2fcdc516a200 | [
"MIT"
] | 6 | 2021-06-09T01:07:17.000Z | 2022-02-27T16:29:55.000Z | # flake8: noqa
# Generated by Django 2.2.5 on 2019-09-09 07:18
import django.contrib.gis.db.models.fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Record',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('identifier', models.CharField(max_length=256, unique=True)),
('geometry', django.contrib.gis.db.models.fields.GeometryField(srid=4326)),
('float_attribute', models.FloatField(blank=True, null=True)),
('int_attribute', models.IntegerField(blank=True, null=True)),
('str_attribute', models.CharField(blank=True, max_length=256, null=True)),
('datetime_attribute', models.DateTimeField(blank=True, null=True)),
('choice_attribute', models.PositiveSmallIntegerField(blank=True, choices=[(1, 'A'), (2, 'B'), (3, 'C')], null=True)),
],
),
migrations.CreateModel(
name='RecordMeta',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('float_meta_attribute', models.FloatField(blank=True, null=True)),
('int_meta_attribute', models.IntegerField(blank=True, null=True)),
('str_meta_attribute', models.CharField(blank=True, max_length=256, null=True)),
('datetime_meta_attribute', models.DateTimeField(blank=True, null=True)),
('choice_meta_attribute', models.PositiveSmallIntegerField(blank=True, choices=[(1, 'X'), (2, 'Y'), (3, 'Z')], null=True)),
('record', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='record_metas', to='testapp.Record')),
],
),
]
| 45.931818 | 141 | 0.613063 |
import django.contrib.gis.db.models.fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Record',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('identifier', models.CharField(max_length=256, unique=True)),
('geometry', django.contrib.gis.db.models.fields.GeometryField(srid=4326)),
('float_attribute', models.FloatField(blank=True, null=True)),
('int_attribute', models.IntegerField(blank=True, null=True)),
('str_attribute', models.CharField(blank=True, max_length=256, null=True)),
('datetime_attribute', models.DateTimeField(blank=True, null=True)),
('choice_attribute', models.PositiveSmallIntegerField(blank=True, choices=[(1, 'A'), (2, 'B'), (3, 'C')], null=True)),
],
),
migrations.CreateModel(
name='RecordMeta',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('float_meta_attribute', models.FloatField(blank=True, null=True)),
('int_meta_attribute', models.IntegerField(blank=True, null=True)),
('str_meta_attribute', models.CharField(blank=True, max_length=256, null=True)),
('datetime_meta_attribute', models.DateTimeField(blank=True, null=True)),
('choice_meta_attribute', models.PositiveSmallIntegerField(blank=True, choices=[(1, 'X'), (2, 'Y'), (3, 'Z')], null=True)),
('record', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='record_metas', to='testapp.Record')),
],
),
]
| true | true |
f7f4a1884584bcd3f47bbe36d93f495b8cdc7a6f | 75,507 | py | Python | neurora/rsa_plot.py | ZitongLu1996/NeuroRA | 4e72f5b37ff308a4a068107b35f7555df6b7df0d | [
"MIT"
] | 110 | 2019-04-30T03:52:48.000Z | 2022-03-19T08:23:38.000Z | neurora/rsa_plot.py | ZitongLu1996/NeuroRA | 4e72f5b37ff308a4a068107b35f7555df6b7df0d | [
"MIT"
] | 2 | 2020-07-23T14:31:30.000Z | 2022-01-14T08:30:00.000Z | neurora/rsa_plot.py | ZitongLu1996/NeuroRA | 4e72f5b37ff308a4a068107b35f7555df6b7df0d | [
"MIT"
] | 20 | 2020-03-02T11:58:30.000Z | 2021-12-31T08:29:53.000Z | # -*- coding: utf-8 -*-
' a module for plotting the NeuroRA results '
__author__ = 'Zitong Lu'
import numpy as np
import copy
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
from scipy import signal
from scipy.stats import ttest_1samp, ttest_rel
from nilearn import plotting, datasets, surface
import nibabel as nib
from neurora.stuff import get_affine, get_bg_ch2, get_bg_ch2bet, correct_by_threshold, \
clusterbased_permutation_1d_1samp_1sided, clusterbased_permutation_2d_1samp_1sided, \
clusterbased_permutation_1d_1samp_2sided, clusterbased_permutation_2d_2sided, smooth_1d
from decimal import Decimal
' a function for plotting the RDM '
def plot_rdm(rdm, percentile=False, rescale=False, lim=[0, 1], conditions=None, con_fontsize=12, cmap=None):
"""
Plot the RDM
Parameters
----------
rdm : array or list [n_cons, n_cons]
A representational dissimilarity matrix.
percentile : bool True or False. Default is False.
Rescale the values in RDM or not by displaying the percentile.
rescale : bool True or False. Default is False.
Rescale the values in RDM or not.
Here, the maximum-minimum method is used to rescale the values except for the
values on the diagnal.
lim : array or list [min, max]. Default is [0, 1].
The corrs view lims.
conditions : string-array or string-list. Default is None.
The labels of the conditions for plotting.
conditions should contain n_cons strings, If conditions=None, the labels of conditions will be invisible.
con_fontsize : int or float. Default is 12.
The fontsize of the labels of the conditions for plotting.
cmap : matplotlib colormap. Default is None.
The colormap for RDM.
If cmap=None, the ccolormap will be 'jet'.
"""
if len(np.shape(rdm)) != 2 or np.shape(rdm)[0] != np.shape(rdm)[1]:
return "Invalid input!"
# get the number of conditions
cons = rdm.shape[0]
crdm = copy.deepcopy(rdm)
# if cons=2, the RDM cannot be plotted.
if cons == 2:
print("The shape of RDM cannot be 2*2. Here NeuroRA cannot plot this RDM.")
return None
# determine if it's a square
a, b = np.shape(crdm)
if a != b:
return None
if percentile == True:
v = np.zeros([cons * cons, 2], dtype=np.float)
for i in range(cons):
for j in range(cons):
v[i * cons + j, 0] = crdm[i, j]
index = np.argsort(v[:, 0])
m = 0
for i in range(cons * cons):
if i > 0:
if v[index[i], 0] > v[index[i - 1], 0]:
m = m + 1
v[index[i], 1] = m
v[:, 0] = v[:, 1] * 100 / m
for i in range(cons):
for j in range(cons):
crdm[i, j] = v[i * cons + j, 0]
if cmap == None:
plt.imshow(crdm, extent=(0, 1, 0, 1), cmap=plt.cm.jet, clim=(0, 100))
else:
plt.imshow(crdm, extent=(0, 1, 0, 1), cmap=cmap, clim=(0, 100))
# rescale the RDM
elif rescale == True:
# flatten the RDM
vrdm = np.reshape(rdm, [cons * cons])
# array -> set -> list
svrdm = set(vrdm)
lvrdm = list(svrdm)
lvrdm.sort()
# get max & min
maxvalue = lvrdm[-1]
minvalue = lvrdm[1]
# rescale
if maxvalue != minvalue:
for i in range(cons):
for j in range(cons):
# not on the diagnal
if i != j:
crdm[i, j] = float((crdm[i, j] - minvalue) / (maxvalue - minvalue))
# plot the RDM
min = lim[0]
max = lim[1]
if cmap == None:
plt.imshow(crdm, extent=(0, 1, 0, 1), cmap=plt.cm.jet, clim=(min, max))
else:
plt.imshow(crdm, extent=(0, 1, 0, 1), cmap=cmap, clim=(min, max))
else:
# plot the RDM
min = lim[0]
max = lim[1]
if cmap == None:
plt.imshow(crdm, extent=(0, 1, 0, 1), cmap=plt.cm.jet, clim=(min, max))
else:
plt.imshow(crdm, extent=(0, 1, 0, 1), cmap=cmap, clim=(min, max))
# plt.axis("off")
cb = plt.colorbar()
cb.ax.tick_params(labelsize=16)
font = {'size': 18}
if percentile == True:
cb.set_label("Dissimilarity (percentile)", fontdict=font)
elif rescale == True:
cb.set_label("Dissimilarity (Rescaling)", fontdict=font)
else:
cb.set_label("Dissimilarity", fontdict=font)
if conditions != None:
print("1")
step = float(1 / cons)
x = np.arange(0.5 * step, 1 + 0.5 * step, step)
y = np.arange(1 - 0.5 * step, -0.5 * step, -step)
plt.xticks(x, conditions, fontsize=con_fontsize, rotation=30, ha="right")
plt.yticks(y, conditions, fontsize=con_fontsize)
else:
plt.axis("off")
plt.show()
return 0
' a function for plotting the RDM with values '
def plot_rdm_withvalue(rdm, lim=[0, 1], value_fontsize=10, conditions=None, con_fontsize=12, cmap=None):
"""
Plot the RDM with values
Parameters
----------
rdm : array or list [n_cons, n_cons]
A representational dissimilarity matrix.
lim : array or list [min, max]. Default is [0, 1].
The corrs view lims.
value_fontsize : int or float. Default is 10.
The fontsize of the values on the RDM.
conditions : string-array or string-list or None. Default is None.
The labels of the conditions for plotting.
conditions should contain n_cons strings, If conditions=None, the labels of conditions will be invisible.
con_fontsize : int or float. Default is 12.
The fontsize of the labels of the conditions for plotting.
cmap : matplotlib colormap or None. Default is None.
The colormap for RDM.
If cmap=None, the ccolormap will be 'Greens'.
"""
if len(np.shape(rdm)) != 2 or np.shape(rdm)[0] != np.shape(rdm)[1]:
return "Invalid input!"
# get the number of conditions
cons = rdm.shape[0]
# if cons=2, the RDM cannot be plotted.
if cons == 2:
print("The shape of RDM cannot be 2*2. Here NeuroRA cannot plot this RDM.")
return None
crdm = copy.deepcopy(rdm)
# determine if it's a square
a, b = np.shape(crdm)
if a != b:
return None
# plot the RDM
min = lim[0]
max = lim[1]
if cmap == None:
plt.imshow(crdm, extent=(0, 1, 0, 1), cmap=plt.cm.Greens, clim=(min, max))
else:
plt.imshow(crdm, extent=(0, 1, 0, 1), cmap=cmap, clim=(min, max))
# plt.axis("off")
cb = plt.colorbar()
cb.ax.tick_params(labelsize=16)
font = {'size': 18}
cb.set_label("Dissimilarity", fontdict=font)
# add values
step = float(1 / cons)
for i in range(cons):
for j in range(cons):
print(i, j)
text = plt.text(i * step + 0.5 * step, 1 - j * step - 0.5 * step, float('%.4f' % rdm[i, j]),
ha="center", va="center", color="blue", fontsize=value_fontsize)
if conditions != None:
print("1")
step = float(1 / cons)
x = np.arange(0.5 * step, 1 + 0.5 * step, step)
y = np.arange(1 - 0.5 * step, -0.5 * step, -step)
plt.xticks(x, conditions, fontsize=con_fontsize, rotation=30, ha="right")
plt.yticks(y, conditions, fontsize=con_fontsize)
else:
plt.axis("off")
plt.show()
return 0
' a function for plotting the correlation coefficients by time sequence '
def plot_corrs_by_time(corrs, labels=None, time_unit=[0, 0.1]):
"""
plot the correlation coefficients by time sequence
corrs : array
The correlation coefficients time-by-time.
The shape of corrs must be [n, ts, 2] or [n, ts]. n represents the number of curves of the correlation
coefficient by time sequence. ts represents the time-points. If shape of corrs is [n, ts 2], each time-point
of each correlation coefficient curve contains a r-value and a p-value. If shape is [n, ts], only r-values.
label : string-array or string-list or None. Default is None.
The label for each corrs curve.
If label=None, no legend in the figure.
time_unit : array or list [start_t, t_step]. Default is [0, 0.1]
The time information of corrs for plotting
start_t represents the start time and t_step represents the time between two adjacent time-points. Default
time_unit=[0, 0.1], which means the start time of corrs is 0 sec and the time step is 0.1 sec.
"""
if len(np.shape(corrs)) < 2 or len(np.shape(corrs)) > 3:
return "Invalid input!"
# get the number of curves
n = corrs.shape[0]
# get the number of time-points
ts = corrs.shape[1]
# get the start time and the time step
start_t = time_unit[0]
tstep = time_unit[1]
# calculate the end time
end_t = start_t + ts * tstep
# initialize the x
x = np.arange(start_t, end_t, tstep)
# interp1d t
t = ts * 50
# initialize the interp1d x
x_soft = np.linspace(x.min(), x.max(), t)
# initialize the interp1d y
y_soft = np.zeros([n, t])
# interp1d
for i in range(n):
if len(corrs.shape) == 3:
f = interp1d(x, corrs[i, :, 0], kind='cubic')
y_soft[i] = f(x_soft)
if len(corrs.shape) == 2:
f = interp1d(x, corrs[i, :], kind='cubic')
y_soft[i] = f(x_soft)
# get the max value
vmax = np.max(y_soft)
# get the min value
vmin = np.min(y_soft)
if vmax <= 1/1.1:
ymax = np.max(y_soft)*1.1
else:
ymax = 1
if vmin >= 0:
ymin = -0.1
elif vmin < 0 and vmin > -1/1.1:
ymin = np.min(y_soft)*1.1
else:
ymin = -1
fig, ax = plt.subplots()
for i in range(n):
if labels:
plt.plot(x_soft, y_soft[i], linewidth=3, label=labels[i])
else:
plt.plot(x_soft, y_soft[i], linewidth=3)
plt.ylim(ymin, ymax)
plt.ylabel("Similarity", fontsize=20)
plt.xlabel("Time (s)", fontsize=20)
plt.tick_params(labelsize=18)
if labels:
plt.legend()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.show()
return 0
' a function for plotting the time-by-time Similarities with statistical results'
def plot_tbytsim_withstats(similarities, start_time=0, end_time=1, time_interval=0.01, smooth=True, p=0.05, cbpt=True,
clusterp=0.05, stats_time=[0, 1], color='r', xlim=[0, 1], ylim=[-0.1, 0.8],
xlabel='Time (s)', ylabel='Representational Similarity', figsize=[6.4, 3.6], x0=0,
ticksize=12, fontsize=16, markersize=2, avgshow=False):
"""
Plot the time-by-time Similarities with statistical results
Parameters
----------
similarities : array
The Similarities.
The size of similarities should be [n_subs, n_ts] or [n_subs, n_ts, 2]. n_subs, n_ts represent the number of
subjects and number of time-points. 2 represents the similarity and a p-value.
start_time : int or float. Default is 0.
The start time.
end_time : int or float. Default is 1.
The end time.
time_interval : float. Default is 0.01.
The time interval between two time samples.
smooth : bool True or False. Default is True.
Smooth the results or not.
chance : float. Default is 0.5.
The chance level.
p : float. Default is 0.05.
The threshold of p-values.
cbpt : bool True or False. Default is True.
Conduct cluster-based permutation test or not.
clusterp : float. Default is 0.05.
The threshold of cluster-defining p-values.
stats_time : array or list [stats_time1, stats_time2]. Default os [0, 1].
Time period for statistical analysis.
color : matplotlib color or None. Default is 'r'.
The color for the curve.
xlim : array or list [xmin, xmax]. Default is [0, 1].
The x-axis (time) view lims.
ylim : array or list [ymin, ymax]. Default is [0.4, 0.8].
The y-axis (decoding accuracy) view lims.
xlabel : string. Default is 'Time (s)'.
The label of x-axis.
ylabel : string. Default is 'Representational Similarity'.
The label of y-axis.
figsize : array or list, [size_X, size_Y]. Default is [6.4, 3.6].
The size of the figure.
x0 : float. Default is 0.
The Y-axis is at x=x0.
ticksize : int or float. Default is 12.
The size of the ticks.
fontsize : int or float. Default is 16.
The fontsize of the labels.
markersize : int or float. Default is 2.
The size of significant marker.
avgshow : boolen True or False. Default is False.
Show the averaging decoding accuracies or not.
"""
if len(np.shape(similarities)) < 2 or len(np.shape(similarities)) > 3:
return "Invalid input!"
n = len(np.shape(similarities))
yminlim = ylim[0]
ymaxlim = ylim[1]
if n == 3:
similarities = similarities[:, :, 0]
csimilarities = copy.deepcopy(similarities)
nsubs, nts = np.shape(csimilarities)
tstep = float(Decimal((end_time - start_time) / nts).quantize(Decimal(str(time_interval))))
if tstep != time_interval:
return "Invalid input!"
delta1 = (stats_time[0] - start_time) / tstep - int((stats_time[0] - start_time) / tstep)
delta2 = (stats_time[1] - start_time) / tstep - int((stats_time[1] - start_time) / tstep)
if delta1 == 0:
stats_time1 = int((stats_time[0] - start_time) / tstep)
else:
stats_time1 = int((stats_time[0] - start_time) / tstep) + 1
if delta2 == 0:
stats_time2 = int((stats_time[1] - start_time) / tstep)
else:
stats_time2 = int((stats_time[1] - start_time) / tstep) + 1
if smooth is True:
for sub in range(nsubs):
for t in range(nts):
if t<=1:
csimilarities[sub, t] = np.average(csimilarities[sub, :t+3])
if t>1 and t<(nts-2):
csimilarities[sub, t] = np.average(csimilarities[sub, t-2:t+3])
if t>=(nts-2):
csimilarities[sub, t] = np.average(csimilarities[sub, t-2:])
avg = np.average(csimilarities, axis=0)
err = np.zeros([nts], dtype=np.float)
for t in range(nts):
err[t] = np.std(csimilarities[:, t], ddof=1)/np.sqrt(nsubs)
if cbpt == True:
ps_stats = clusterbased_permutation_1d_1samp_1sided(csimilarities[:, stats_time1:stats_time2], level=0,
p_threshold=p, clusterp_threshold=clusterp)
ps = np.zeros([nts])
ps[stats_time1:stats_time2] = ps_stats
else:
ps = np.zeros([nts])
for t in range(nts):
ps[t] = ttest_1samp(csimilarities[:, t], 0, alternative="greater")[1]
if ps[t] < p:
ps[t] = 1
else:
ps[t] = 0
for t in range(nts):
if ps[t] == 1:
plt.plot(t*tstep+start_time+0.5*tstep, (ymaxlim-yminlim)*0.95+yminlim, 's',
color=color, alpha=0.8, markersize=markersize)
xi = [t*tstep+start_time, t*tstep+tstep+start_time]
ymin = [0]
ymax = [avg[t]-err[t]]
plt.fill_between(xi, ymax, ymin, facecolor=color, alpha=0.1)
fig = plt.gcf()
fig.set_size_inches(figsize[0], figsize[1])
ax = plt.gca()
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["left"].set_linewidth(3)
ax.spines["left"].set_position(("data", x0))
ax.spines["bottom"].set_linewidth(3)
ax.spines['bottom'].set_position(('data', 0))
x = np.arange(start_time+0.5*tstep, end_time+0.5*tstep, tstep)
if avgshow is True:
plt.plot(x, avg, color=color, alpha=0.9)
plt.fill_between(x, avg + err, avg - err, facecolor=color, alpha=0.8)
plt.ylim(yminlim, ymaxlim)
plt.xlim(xlim[0], xlim[1])
plt.tick_params(labelsize=ticksize)
plt.xlabel(xlabel, fontsize=fontsize)
plt.ylabel(ylabel, fontsize=fontsize)
plt.show()
return 0
' a function for plotting the time-by-time decoding accuracies '
def plot_tbyt_decoding_acc(acc, start_time=0, end_time=1, time_interval=0.01, chance=0.5, p=0.05, cbpt=True,
clusterp=0.05, stats_time=[0, 1], color='r', xlim=[0, 1], ylim=[0.4, 0.8],
xlabel='Time (s)', ylabel='Decoding Accuracy', figsize=[6.4, 3.6], x0=0, ticksize=12,
fontsize=16, markersize=2, avgshow=False):
"""
Plot the time-by-time decoding accuracies
Parameters
----------
acc : array
The decoding accuracies.
The size of acc should be [n_subs, n_ts]. n_subs, n_ts represent the number of subjects and number of
time-points.
start_time : int or float. Default is 0.
The start time.
end_time : int or float. Default is 1.
The end time.
time_interval : float. Default is 0.01.
The time interval between two time samples.
chance : float. Default is 0.5.
The chance level.
p : float. Default is 0.05.
The threshold of p-values.
cbpt : bool True or False. Default is True.
Conduct cluster-based permutation test or not.
clusterp : float. Default is 0.05.
The threshold of cluster-defining p-values.
stats_time : array or list [stats_time1, stats_time2]. Default os [0, 1].
Time period for statistical analysis.
color : matplotlib color or None. Default is 'r'.
The color for the curve.
xlim : array or list [xmin, xmax]. Default is [0, 1].
The x-axis (time) view lims.
ylim : array or list [ymin, ymax]. Default is [0.4, 0.8].
The y-axis (decoding accuracy) view lims.
xlabel : string. Default is 'Time (s)'.
The label of x-axis.
ylabel : string. Default is 'Decoding Accuracy'.
The label of y-axis.
figsize : array or list, [size_X, size_Y]. Default is [6.4, 3.6].
The size of the figure.
x0 : float. Default is 0.
The Y-axis is at x=x0.
ticksize : int or float. Default is 12.
The size of the ticks.
fontsize : int or float. Default is 16.
The fontsize of the labels.
markersize : int or float. Default is 2.
The size of significant marker.
avgshow : boolen True or False. Default is False.
Show the averaging decoding accuracies or not.
"""
if len(np.shape(acc)) != 2:
return "Invalid input!"
nsubs, nts = np.shape(acc)
tstep = float(Decimal((end_time - start_time) / nts).quantize(Decimal(str(time_interval))))
if tstep != time_interval:
return "Invalid input!"
delta1 = (stats_time[0] - start_time) / tstep - int((stats_time[0] - start_time) / tstep)
delta2 = (stats_time[1] - start_time) / tstep - int((stats_time[1] - start_time) / tstep)
if delta1 == 0:
stats_time1 = int((stats_time[0] - start_time) / tstep)
else:
stats_time1 = int((stats_time[0] - start_time) / tstep) + 1
if delta2 == 0:
stats_time2 = int((stats_time[1] - start_time) / tstep)
else:
stats_time2 = int((stats_time[1] - start_time) / tstep) + 1
yminlim = ylim[0]
ymaxlim = ylim[1]
avg = np.average(acc, axis=0)
err = np.zeros([nts])
for t in range(nts):
err[t] = np.std(acc[:, t], ddof=1) / np.sqrt(nsubs)
if cbpt == True:
ps_stats = clusterbased_permutation_1d_1samp_1sided(acc[:, stats_time1:stats_time2], level=chance,
p_threshold=p, clusterp_threshold=clusterp, iter=1000)
ps = np.zeros([nts])
ps[stats_time1:stats_time2] = ps_stats
else:
ps = np.zeros([nts])
for t in range(nts):
if t >= stats_time1 and t< stats_time2:
ps[t] = ttest_1samp(acc[:, t], chance, alternative="greater")[1]
if ps[t] < p:
ps[t] = 1
else:
ps[t] = 0
for t in range(nts):
if ps[t] == 1:
plt.plot(t*tstep+start_time+0.5*tstep, (ymaxlim-yminlim)*0.95+yminlim, 's', color=color, alpha=0.8,
markersize=markersize)
xi = [t*tstep+start_time, t*tstep+tstep+start_time]
ymin = [chance]
ymax = [avg[t] - err[t]]
plt.fill_between(xi, ymax, ymin, facecolor=color, alpha=0.2)
fig = plt.gcf()
fig.set_size_inches(figsize[0], figsize[1])
ax = plt.gca()
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["left"].set_linewidth(3)
ax.spines["left"].set_position(("data", x0))
ax.spines["bottom"].set_linewidth(3)
ax.spines["bottom"].set_position(("data", chance))
x = np.arange(start_time+0.5*tstep, end_time+0.5*tstep, tstep)
if avgshow is True:
plt.plot(x, avg, color=color, alpha=0.9)
plt.fill_between(x, avg+err, avg-err, facecolor=color, alpha=0.8)
plt.ylim(yminlim, ymaxlim)
plt.xlim(xlim[0], xlim[1])
plt.tick_params(labelsize=ticksize)
plt.xlabel(xlabel, fontsize=fontsize)
plt.ylabel(ylabel, fontsize=fontsize)
plt.show()
' a function for plotting the differences of time-by-time decoding accuracies between two conditions '
def plot_tbyt_diff_decoding_acc(acc1, acc2, start_time=0, end_time=1, time_interval=0.01, chance=0.5, p=0.05, cbpt=True,
clusterp=0.05, stats_time=[0, 1], color1='r', color2='b', xlim=[0, 1], ylim=[0.4, 0.8],
xlabel='Time (s)', ylabel='Decoding Accuracy', figsize=[6.4, 3.6], x0=0, ticksize=12,
fontsize=16, markersize=2, avgshow=False):
"""
Plot the differences of time-by-time decoding accuracies between two conditions
Parameters
----------
acc1 : array
The decoding accuracies under condition1.
The size of acc1 should be [n_subs, n_ts]. n_subs, n_ts represent the number of subjects and number of
time-points.
acc2 : array
The decoding accuracies under condition2.
The size of acc2 should be [n_subs, n_ts]. n_subs, n_ts represent the number of subjects and number of
time-points.
start_time : int or float. Default is 0.
The start time.
end_time : int or float. Default is 1.
The end time.
time_interval : float. Default is 0.01.
The time interval between two time samples.
chance : float. Default is 0.5.
The chance level.
p : float. Default is 0.05.
The threshold of p-values.
cbpt : bool True or False. Default is True.
Conduct cluster-based permutation test or not.
clusterp : float. Default is 0.05.
The threshold of cluster-defining p-values.
stats_time : array or list [stats_time1, stats_time2]. Default os [0, 1].
Time period for statistical analysis.
color1 : matplotlib color or None. Default is 'r'.
The color for the curve under condition1.
color2 : matplotlib color or None. Default is 'r'.
The color for the curve under condition2.
xlim : array or list [xmin, xmax]. Default is [0, 1].
The x-axis (time) view lims.
ylim : array or list [ymin, ymax]. Default is [0.4, 0.8].
The y-axis (decoding accuracy) view lims.
xlabel : string. Default is 'Time (s)'.
The label of x-axis.
ylabel : string. Default is 'Decoding Accuracy'.
The label of y-axis.
figsize : array or list, [size_X, size_Y]. Default is [6.4, 3.6].
The size of the figure.
x0 : float. Default is 0.
The Y-axis is at x=x0.
ticksize : int or float. Default is 12.
The size of the ticks.
fontsize : int or float. Default is 16.
The fontsize of the labels.
markersize : int or float. Default is 2.
The size of significant marker.
avgshow : boolen True or False. Default is False.
Show the averaging decoding accuracies or not.
"""
if len(np.shape(acc1)) != 2 or len(np.shape(acc2)) != 2:
return "Invalid input!"
nsubs, nts = np.shape(acc1)
tstep = float(Decimal((end_time - start_time) / nts).quantize(Decimal(str(time_interval))))
if tstep != time_interval:
return "Invalid input!"
delta1 = (stats_time[0] - start_time) / tstep - int((stats_time[0] - start_time) / tstep)
delta2 = (stats_time[1] - start_time) / tstep - int((stats_time[1] - start_time) / tstep)
if delta1 == 0:
stats_time1 = int((stats_time[0] - start_time) / tstep)
else:
stats_time1 = int((stats_time[0] - start_time) / tstep) + 1
if delta2 == 0:
stats_time2 = int((stats_time[1] - start_time) / tstep)
else:
stats_time2 = int((stats_time[1] - start_time) / tstep) + 1
yminlim = ylim[0]
ymaxlim = ylim[1]
avg1 = np.average(acc1, axis=0)
err1 = np.zeros([nts])
for t in range(nts):
err1[t] = np.std(acc1[:, t], ddof=1) / np.sqrt(nsubs)
avg2 = np.average(acc2, axis=0)
err2 = np.zeros([nts])
for t in range(nts):
err2[t] = np.std(acc2[:, t], ddof=1) / np.sqrt(nsubs)
if cbpt == True:
ps1_stats = clusterbased_permutation_1d_1samp_1sided(acc1[:, stats_time1:stats_time2], level=chance,
p_threshold=p, clusterp_threshold=clusterp, iter=1000)
ps1 = np.zeros([nts])
ps1[stats_time1:stats_time2] = ps1_stats
ps2_stats = clusterbased_permutation_1d_1samp_1sided(acc2[:, stats_time1:stats_time2], level=chance,
p_threshold=p, clusterp_threshold=clusterp, iter=1000)
ps2 = np.zeros([nts])
ps2[stats_time1:stats_time2] = ps2_stats
ps_stats = clusterbased_permutation_1d_1samp_2sided(acc1[:, stats_time1:stats_time2]-
acc2[:, stats_time1:stats_time2], level=0, p_threshold=p,
clusterp_threshold=clusterp, iter=1000)
ps = np.zeros([nts])
ps[stats_time1:stats_time2] = ps_stats
else:
ps1 = np.zeros([nts])
ps2 = np.zeros([nts])
ps = np.zeros([nts])
for t in range(nts):
if t >= stats_time1 and t< stats_time2:
ps1[t] = ttest_1samp(acc1[:, t], chance, alternative="greater")[1]
ps2[t] = ttest_1samp(acc2[:, t], chance, alternative="greater")[1]
if ps1[t] < p:
ps1[t] = 1
else:
ps1[t] = 0
if ps2[t] < p:
ps2[t] = 1
else:
ps2[t] = 0
if ttest_rel(acc1[:, t], acc1[:, t], alternative="greater")[1] < p:
ps[t] = 1
elif ttest_rel(acc1[:, t], acc1[:, t], alternative="less")[1] < p:
ps[t] = -1
else:
ps[t] = 0
for t in range(nts):
if ps1[t] == 1:
plt.plot(t*tstep+start_time+0.5*tstep, (ymaxlim-yminlim)*0.95+yminlim, 's', color=color1, alpha=0.8,
markersize=markersize)
if ps2[t] == 1:
plt.plot(t*tstep+start_time+0.5*tstep, (ymaxlim-yminlim)*0.91+yminlim, 's', color=color2, alpha=0.8,
markersize=markersize)
if ps[t] == 1:
xi = [t*tstep+start_time, t*tstep+tstep+start_time]
ymin = [avg2[t] + err2[t]]
ymax = [avg1[t] - err1[t]]
plt.fill_between(xi, ymax, ymin, facecolor="grey", alpha=0.2)
if ps[t] == -1:
xi = [t*tstep+start_time, t*tstep+tstep+start_time]
ymin = [avg1[t] + err1[t]]
ymax = [avg2[t] - err2[t]]
plt.fill_between(xi, ymax, ymin, facecolor="grey", alpha=0.2)
fig = plt.gcf()
fig.set_size_inches(figsize[0], figsize[1])
ax = plt.gca()
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["left"].set_linewidth(3)
ax.spines["left"].set_position(("data", x0))
ax.spines["bottom"].set_linewidth(3)
ax.spines["bottom"].set_position(("data", chance))
x = np.arange(start_time+0.5*tstep, end_time+0.5*tstep, tstep)
if avgshow is True:
plt.plot(x, avg1, color=color1, alpha=0.9)
plt.plot(x, avg2, color=color2, alpha=0.9)
plt.fill_between(x, avg1+err1, avg1-err1, facecolor=color1, alpha=0.8)
plt.fill_between(x, avg2+err2, avg2-err2, facecolor=color2, alpha=0.8)
plt.ylim(yminlim, ymaxlim)
plt.xlim(xlim[0], xlim[1])
plt.tick_params(labelsize=ticksize)
plt.xlabel(xlabel, fontsize=fontsize)
plt.ylabel(ylabel, fontsize=fontsize)
plt.show()
' a function for plotting cross-temporal decoding accuracies '
def plot_ct_decoding_acc(acc, start_timex=0, end_timex=1, start_timey=0, end_timey=1, time_intervalx=0.01,
time_intervaly=0.01, chance=0.5, p=0.05, cbpt=True, clusterp=0.05, stats_timex=[0, 1],
stats_timey=[0, 1], xlim=[0, 1], ylim=[0, 1], clim=[0.4, 0.8], xlabel='Training Time (s)',
ylabel='Test Time (s)', clabel='Decoding Accuracy', figsize=[6.4, 4.8], cmap="viridis",
ticksize=12, fontsize=16):
"""
Plot the cross-temporal decoding accuracies
Parameters
----------
acc : array
The decoding accuracies.
The size of acc should be [n_subs, n_tsx, n_tsy]. n_subs, n_tsx and n_tsy represent the number of subjects,
the number of training time-points and the number of test time-points.
start_timex : int or float. Default is 0.
The training start time.
end_timex : int or float. Default is 1.
The training end time.
start_timey : int or float. Default is 0.
The test start time.
end_timey : int or float. Default is 1.
The test end time.
time_intervalx : float. Default is 0.01.
The training time interval between two time samples.
time_intervaly : float. Default is 0.01.
The test time interval between two time samples.
chance : float. Default is 0.5.
The chance level.
p : float. Default is 0.05.
The threshold of p-values.
cbpt : bool True or False. Default is True.
Conduct cluster-based permutation test or not.
clusterp : float. Default is 0.05.
The threshold of cluster-defining p-values.
stats_timex : array or list [stats_timex1, stats_timex2]. Default os [0, 1].
Trainning time period for statistical analysis.
stats_timey : array or list [stats_timey1, stats_timey2]. Default os [0, 1].
Test time period for statistical analysis.
xlim : array or list [xmin, xmax]. Default is [0, 1].
The x-axis (training time) view lims.
ylim : array or list [ymin, ymax]. Default is [0, 1].
The y-axis (test time) view lims.
clim : array or list [cmin, cmax]. Default is [0.4, 0.8].
The color-bar (decoding accuracy) view lims.
xlabel : string. Default is 'Training Time (s)'.
The label of x-axis.
ylabel : string. Default is 'Test Time (s)'.
The label of y-axis.
clabel : string. Default is 'Decoding Accuracy'.
The label of color-bar.
figsize : array or list, [size_X, size_Y]. Default is [6.4, 3.6].
The size of the figure.
cmap : matplotlib colormap or None. Default is None.
The colormap for the figure.
ticksize : int or float. Default is 12.
The size of the ticks.
fontsize : int or float. Default is 16.
The fontsize of the labels.
"""
nsubs, nx, ny = np.shape(acc)
cminlim = clim[0]
cmaxlim = clim[1]
tstepx = float(Decimal((end_timex - start_timex) / nx).quantize(Decimal(str(time_intervalx))))
tstepy = float(Decimal((end_timey - start_timey) / ny).quantize(Decimal(str(time_intervaly))))
if tstepx != time_intervalx or tstepy != time_intervaly:
return "Invalid input!"
deltax1 = (stats_timex[0] - start_timex) / tstepx - int((stats_timex[0] - start_timex) / tstepx)
deltax2 = (stats_timex[1] - start_timex) / tstepx - int((stats_timex[1] - start_timex) / tstepx)
if deltax1 == 0:
stats_timex1 = int((stats_timex[0] - start_timex) / tstepx)
else:
stats_timex1 = int((stats_timex[0] - start_timex) / tstepx) + 1
if deltax2 == 0:
stats_timex2 = int((stats_timex[1] - start_timex) / tstepx)
else:
stats_timex2 = int((stats_timex[1] - start_timex) / tstepx) + 1
deltay1 = (stats_timey[0] - start_timey) / tstepy - int((stats_timey[0] - start_timey) / tstepy)
deltay2 = (stats_timey[1] - start_timey) / tstepy - int((stats_timey[1] - start_timey) / tstepy)
if deltay1 == 0:
stats_timey1 = int((stats_timey[0] - start_timey) / tstepy)
else:
stats_timey1 = int((stats_timey[0] - start_timey) / tstepy) + 1
if deltay2 == 0:
stats_timey2 = int((stats_timey[1] - start_timey) / tstepy)
else:
stats_timey2 = int((stats_timey[1] - start_timey) / tstepy) + 1
if cbpt is True:
ps_stats = clusterbased_permutation_2d_1samp_1sided(
acc[:, stats_timex1:stats_timex2, stats_timey1:stats_timey2], level=chance, p_threshold=p,
clusterp_threshold=clusterp, iter=1000)
ps = np.zeros([nx, ny])
ps[stats_timex1:stats_timex2, stats_timey1:stats_timey2] = ps_stats
else:
ps = np.zeros([nx, ny])
for t1 in range(nx):
for t2 in range(ny):
if t1 >= stats_timex1 and t1 < stats_timex2 and t2 >= stats_timey1 and t2 < stats_timey2:
ps[t1, t2] = ttest_1samp(acc[:, t1, t2], chance, alternative="greater")[1]
if ps[t1, t2] < p:
ps[t1, t2] = 1
else:
ps[t1, t2] = 0
newps = np.zeros([nx + 2, ny + 2])
newps[1:nx + 1, 1:ny + 1] = ps
x = np.linspace(start_timex - 0.5 * tstepx, end_timex + 0.5 * tstepx, nx + 2)
y = np.linspace(start_timey - 0.5 * tstepy, end_timey + 0.5 * tstepy, ny + 2)
X, Y = np.meshgrid(x, y)
plt.contour(X, Y, np.transpose(newps, (1, 0)), [0, 1], colors="silver", alpha=0.9, linewidths=3,
linestyles="dashed")
fig = plt.gcf()
fig.set_size_inches(figsize[0], figsize[1])
ax = plt.gca()
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["left"].set_linewidth(2)
ax.spines["bottom"].set_linewidth(2)
avg = np.average(acc, axis=0)
avg = np.transpose(avg, (1, 0))
plt.imshow(avg, extent=(start_timex, end_timex, start_timey, end_timey), cmap=cmap, origin="lower",
clim=(cminlim, cmaxlim))
cb = plt.colorbar()
cb.ax.tick_params(labelsize=ticksize)
font = {'size': ticksize+2}
cb.set_label(clabel, fontdict=font)
plt.xlim(xlim[0], xlim[1])
plt.ylim(ylim[0], ylim[1])
plt.tick_params(labelsize=ticksize)
plt.xlabel(xlabel, fontsize=fontsize)
plt.ylabel(ylabel, fontsize=fontsize)
plt.show()
' a function for plotting the differences of cross-temporal decoding accuracies between two conditions '
def plot_ct_diff_decoding_acc(acc1, acc2, start_timex=0, end_timex=1, start_timey=0, end_timey=1, time_intervalx=0.01,
time_intervaly=0.01, p=0.05, cbpt=True, clusterp=0.05, stats_timex=[0, 1],
stats_timey=[0, 1], xlim=[0, 1], ylim=[0, 1], clim=[0.4, 0.8], xlabel='Training Time (s)',
ylabel='Test Time (s)', clabel='Differences of Decoding Accuracies', figsize=[6.4, 4.8],
cmap="viridis", ticksize=12, fontsize=16):
"""
Plot the differences of cross-temporal decoding accuracies between two conditions
Parameters
----------
acc1 : array
The decoding accuracies under condition1.
The size of acc should be [n_subs, n_tsx, n_tsy]. n_subs, n_tsx and n_tsy represent the number of subjects,
the number of training time-points and the number of test time-points.
acc2 : array
The decoding accuracies under condition2.
The size of acc should be [n_subs, n_tsx, n_tsy]. n_subs, n_tsx and n_tsy represent the number of subjects,
the number of training time-points and the number of test time-points.
start_timex : int or float. Default is 0.
The training start time.
end_timex : int or float. Default is 1.
The training end time.
start_timey : int or float. Default is 0.
The test start time.
end_timey : int or float. Default is 1.
The test end time.
time_intervalx : float. Default is 0.01.
The training time interval between two time samples.
time_intervaly : float. Default is 0.01.
The test time interval between two time samples.
chance : float. Default is 0.5.
The chance level.
p : float. Default is 0.05.
The threshold of p-values.
cbpt : bool True or False. Default is True.
Conduct cluster-based permutation test or not.
clusterp : float. Default is 0.05.
The threshold of cluster-defining p-values.
stats_timex : array or list [stats_timex1, stats_timex2]. Default os [0, 1].
Trainning time period for statistical analysis.
stats_timey : array or list [stats_timey1, stats_timey2]. Default os [0, 1].
Test time period for statistical analysis.
xlim : array or list [xmin, xmax]. Default is [0, 1].
The x-axis (training time) view lims.
ylim : array or list [ymin, ymax]. Default is [0, 1].
The y-axis (test time) view lims.
clim : array or list [cmin, cmax]. Default is [0.4, 0.8].
The color-bar (decoding accuracy) view lims.
xlabel : string. Default is 'Training Time (s)'.
The label of x-axis.
ylabel : string. Default is 'Test Time (s)'.
The label of y-axis.
clabel : string. Default is 'Differences of Decoding Accuracies'.
The label of color-bar.
figsize : array or list, [size_X, size_Y]. Default is [6.4, 3.6].
The size of the figure.
cmap : matplotlib colormap or None. Default is None.
The colormap for the figure.
ticksize : int or float. Default is 12.
The size of the ticks.
fontsize : int or float. Default is 16.
The fontsize of the labels.
"""
acc = acc1 - acc2
nsubs, nx, ny = np.shape(acc)
cminlim = clim[0]
cmaxlim = clim[1]
tstepx = float(Decimal((end_timex - start_timex) / nx).quantize(Decimal(str(time_intervalx))))
tstepy = float(Decimal((end_timey - start_timey) / ny).quantize(Decimal(str(time_intervaly))))
if tstepx != time_intervalx or tstepy != time_intervaly:
return "Invalid input!"
deltax1 = (stats_timex[0] - start_timex) / tstepx - int((stats_timex[0] - start_timex) / tstepx)
deltax2 = (stats_timex[1] - start_timex) / tstepx - int((stats_timex[1] - start_timex) / tstepx)
if deltax1 == 0:
stats_timex1 = int((stats_timex[0] - start_timex) / tstepx)
else:
stats_timex1 = int((stats_timex[0] - start_timex) / tstepx) + 1
if deltax2 == 0:
stats_timex2 = int((stats_timex[1] - start_timex) / tstepx)
else:
stats_timex2 = int((stats_timex[1] - start_timex) / tstepx) + 1
deltay1 = (stats_timey[0] - start_timey) / tstepy - int((stats_timey[0] - start_timey) / tstepy)
deltay2 = (stats_timey[1] - start_timey) / tstepy - int((stats_timey[1] - start_timey) / tstepy)
if deltay1 == 0:
stats_timey1 = int((stats_timey[0] - start_timey) / tstepy)
else:
stats_timey1 = int((stats_timey[0] - start_timey) / tstepy) + 1
if deltay2 == 0:
stats_timey2 = int((stats_timey[1] - start_timey) / tstepy)
else:
stats_timey2 = int((stats_timey[1] - start_timey) / tstepy) + 1
if cbpt is True:
ps_stats = clusterbased_permutation_2d_2sided(acc1[:, stats_timex1:stats_timex2, stats_timey1:stats_timey2],
acc2[:, stats_timex1:stats_timex2, stats_timey1:stats_timey2],
p_threshold=p, clusterp_threshold=clusterp, iter=1000)
ps = np.zeros([nx, ny])
ps[stats_timex1:stats_timex2, stats_timey1:stats_timey2] = ps_stats
else:
ps = np.zeros([nx, ny])
for t1 in range(nx):
for t2 in range(ny):
if t1 >= stats_timex1 and t1 < stats_timex2 and t2 >= stats_timey1 and t2 < stats_timey2:
if ttest_1samp(acc[:, t1, t2], 0, alternative="greater")[1] < p:
ps[t1, t2] = 1
elif ttest_1samp(acc[:, t1, t2], 0, alternative="less")[1] < p:
ps[t1, t2] = -1
else:
ps[t1, t2] = 0
newps = np.zeros([nx + 2, ny + 2])
newps[1:nx + 1, 1:ny + 1] = ps
x = np.linspace(start_timex - 0.5 * tstepx, end_timex + 0.5 * tstepx, nx + 2)
y = np.linspace(start_timey - 0.5 * tstepy, end_timey + 0.5 * tstepy, ny + 2)
X, Y = np.meshgrid(x, y)
plt.contour(X, Y, np.transpose(newps, (1, 0)), (-0.5, 0.5), colors="silver", alpha=0.9, linewidths=3,
linestyles="dashed")
fig = plt.gcf()
fig.set_size_inches(figsize[0], figsize[1])
ax = plt.gca()
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["left"].set_linewidth(2)
ax.spines["bottom"].set_linewidth(2)
avg = np.average(acc, axis=0)
avg = np.transpose(avg, (1, 0))
plt.imshow(avg, extent=(start_timex, end_timex, start_timey, end_timey), cmap=cmap, origin="lower",
clim=(cminlim, cmaxlim))
cb = plt.colorbar()
cb.ax.tick_params(labelsize=ticksize)
font = {'size': ticksize+2}
cb.set_label(clabel, fontdict=font)
plt.xlim(xlim[0], xlim[1])
plt.ylim(ylim[0], ylim[1])
plt.tick_params(labelsize=ticksize)
plt.xlabel(xlabel, fontsize=fontsize)
plt.ylabel(ylabel, fontsize=fontsize)
plt.show()
' a function for plotting the hotmap of correlations coefficients for channels/regions by time sequence '
def plot_corrs_hotmap(corrs, chllabels=None, time_unit=[0, 0.1], lim=[0, 1], smooth=False, figsize=None, cmap=None):
"""
plot the hotmap of correlation coefficients for channels/regions by time sequence
corrs : array
The correlation coefficients time-by-time.
The shape of corrs must be [n_chls, ts, 2] or [n_chls, ts]. n_chls represents the number of channels or
regions. ts represents the number of time-points. If shape of corrs is [n_chls, ts 2], each time-point
of each channel/region contains a r-value and a p-value. If shape is [n_chls, ts], only r-values.
chllabel : string-array or string-list or None. Default is None.
The label for channels/regions.
If label=None, the labels will be '1st', '2nd', '3th', '4th', ... automatically.
time_unit : array or list [start_t, t_step]. Default is [0, 0.1]
The time information of corrs for plotting
start_t represents the start time and t_step represents the time between two adjacent time-points. Default
time_unit=[0, 0.1], which means the start time of corrs is 0 sec and the time step is 0.1 sec.
lim : array or list [min, max]. Default is [0, 1].
The corrs view lims.
smooth : bool True or False. Default is False.
Smooth the results or not.
figsize : array or list, [size_X, size_Y]
The size of the figure.
If figsize=None, the size of the figure will be ajusted automatically.
cmap : matplotlib colormap or None. Default is None.
The colormap for the figure.
If cmap=None, the ccolormap will be 'inferno'.
"""
if len(np.shape(corrs)) < 2 or len(np.shape(corrs)) > 3:
return "Invalid input!"
# get the number of channels
nchls = corrs.shape[0]
# get the number of time-points
ts = corrs.shape[1]
# get the start time and the time step
start_t = time_unit[0]
tstep = time_unit[1]
# calculate the end time
end_t = start_t + ts * tstep
# initialize the x
x = np.arange(start_t, end_t, tstep)
# set labels of the channels
if chllabels == None:
chllabels = []
for i in range(nchls):
if i % 10 == 0 and i != 10:
newlabel = str(i+1) + "st"
elif i % 10 == 1 and i != 11:
newlabel = str(i+1) + "nd"
elif i % 10 == 2 and i != 12:
newlabel = str(i+1) + "rd"
else:
newlabel = str(i+1) + "th"
chllabels.append(newlabel)
# smooth the results
if smooth == True:
t = ts * 50
x_soft = np.linspace(x.min(), x.max(), t)
y_soft = np.zeros([nchls, t])
samplerate = int(1 / tstep) * 50
b, a = signal.butter(4, 2*30/samplerate, 'lowpass')
for i in range(nchls):
if len(corrs.shape) == 3:
f = interp1d(x, corrs[i, :, 0], kind='cubic')
y_soft[i] = f(x_soft)
elif len(corrs.shape) == 2:
f = interp1d(x, corrs[i, :], kind='cubic')
y_soft[i] = f(x_soft)
y_soft[i] = signal.filtfilt(b, a, y_soft[i])
rlts = y_soft
if smooth == False:
if len(corrs.shape) == 3:
rlts = corrs[:, :, 0]
elif len(corrs.shape) == 2:
rlts = corrs
fig = plt.gcf()
size = fig.get_size_inches()
if figsize == None:
size_x = ts * tstep * (size[0] - 2) + 2
size_y = nchls * 0.2 * (size[1] - 1.5) + 1.5
else:
size_x = figsize[0]
size_y = figsize[1]
fig.set_size_inches(size_x, size_y)
delta = (size_y * 3) / (size_x * 4)
# get min of lims & max of lims
limmin = lim[0]
limmax = lim[1]
if cmap == None:
plt.imshow(rlts, extent=(start_t, end_t, 0, nchls*0.16*delta), clim=(limmin, limmax), origin='lower', cmap='inferno')
else:
plt.imshow(rlts, extent=(start_t, end_t, 0, nchls * 0.16*delta), clim=(limmin, limmax), origin='lower', cmap=cmap)
cb = plt.colorbar()
cb.ax.tick_params(labelsize=16)
font = {'size': 18}
cb.set_label("Similarity", fontdict=font)
xi = []
for i in range(nchls):
xi.append(0.16*delta*i + 0.08*delta)
yi = chllabels
plt.tick_params(labelsize=18)
plt.yticks(xi, yi, fontsize=18)
plt.ylabel("Channel", fontsize=20)
plt.xlabel("Time (s)", fontsize=20)
plt.show()
return 0
' a function for plotting the hotmap of correlations coefficients for channels/regions by time sequence with the significant outline '
def plot_corrs_hotmap_withstats(corrs, chllabels=None, time_unit=[0, 0.1], lim=[0, 1], p=0.05, cbpt=False,
clusterp=0.05, stats_time=[0, 1], smooth=False, xlabel='Time (s)', ylabel='Channel',
clabel='Similarity', ticksize=18, figsize=None, cmap=None):
"""
plot the hotmap of correlation coefficients for channels/regions by time sequence with the significant outline
corrs : array
The correlation coefficients time-by-time.
The shape of corrs must be [n_subs, n_chls, ts, 2] or [n_subs, n_chls, ts]. n_subs represents the number of
subjects. n_chls represents the number of channels or regions. ts represents the number of time-points. If shape
of corrs is [n_chls, ts 2], each time-point of each channel/region contains a r-value and a p-value. If shape is
[n_chls, ts], only r-values.
chllabels : string-array or string-list or None. Default is None.
The label for channels/regions.
If label=None, the labels will be '1st', '2nd', '3th', '4th', ... automatically.
time_unit : array or list [start_t, t_step]. Default is [0, 0.1]
The time information of corrs for plotting
start_t represents the start time and t_step represents the time between two adjacent time-points. Default
time_unit=[0, 0.1], which means the start time of corrs is 0 sec and the time step is 0.1 sec.
lim : array or list [min, max]. Default is [0, 1].
The corrs view lims.
p: float. Default is 0.05.
The p threshold for outline.
cbpt : bool True or False. Default is True.
Conduct cluster-based permutation test or not.
clusterp : float. Default is 0.05.
The threshold of cluster-defining p-values.
stats_time : array or list [stats_time1, stats_time2]. Default os [0, 1].
The time period for statistical analysis.
smooth : bool True or False. Default is False.
Smooth the results or not.
xlabel : string. Default is 'Time (s)'.
The label of x-axis.
ylabel : string. Default is 'Channel'.
The label of y-axis.
clabel : string. Default is 'Similarity'.
The label of color-bar.
ticksize : int or float. Default is 18.
The size of the ticks.
figsize : array or list, [size_X, size_Y]
The size of the figure.
If figsize=None, the size of the figure will be ajusted automatically.
cmap : matplotlib colormap or None. Default is None.
The colormap for the figure.
If cmap=None, the colormap will be 'inferno'.
"""
if len(np.shape(corrs)) < 3 or len(np.shape(corrs)) > 4:
return "Invalid input!"
# get the number of channels
nchls = corrs.shape[1]
# get the number of time-points
nts = corrs.shape[2]
# get the start time and the time step
start_time = time_unit[0]
tstep = time_unit[1]
# calculate the end time
end_time = start_time + nts * tstep
delta1 = (stats_time[0] - start_time) / tstep - int((stats_time[0] - start_time) / tstep)
delta2 = (stats_time[1] - start_time) / tstep - int((stats_time[1] - start_time) / tstep)
if delta1 == 0:
stats_time1 = int((stats_time[0] - start_time) / tstep)
else:
stats_time1 = int((stats_time[0] - start_time) / tstep) + 1
if delta2 == 0:
stats_time2 = int((stats_time[1] - start_time) / tstep)
else:
stats_time2 = int((stats_time[1] - start_time) / tstep) + 1
# set labels of the channels
if chllabels == None:
chllabels = []
for i in range(nchls):
if i % 10 == 0 and i != 10:
newlabel = str(i+1) + "st"
elif i % 10 == 1 and i != 11:
newlabel = str(i+1) + "nd"
elif i % 10 == 2 and i != 12:
newlabel = str(i+1) + "rd"
else:
newlabel = str(i+1) + "th"
chllabels.append(newlabel)
if len(corrs.shape) == 4:
rlts = corrs[:, :, :, 0]
elif len(corrs.shape) == 3:
rlts = corrs
# smooth the results
if smooth == True:
for chl in range(nchls):
rlts[:, chl] = smooth_1d(rlts[:, chl])
fig = plt.gcf()
size = fig.get_size_inches()
if figsize == None:
size_x = nts * tstep * (size[0] - 2) + 2
size_y = nchls * 0.2 * (size[1] - 1.5) + 1.5
else:
size_x = figsize[0]
size_y = figsize[1]
fig.set_size_inches(size_x, size_y)
delta = (size_y * 3) / (size_x * 4)
avg = np.average(rlts, axis=0)
ps = np.zeros([nchls, nts])
if cbpt == True:
for chl in range(nchls):
ps_stats = clusterbased_permutation_1d_1samp_2sided(rlts[:, chl, stats_time1:stats_time2], 0, p_threshold=p,
clusterp_threshold=clusterp, iter=1000)
ps[chl, stats_time1:stats_time2] = ps_stats
else:
for chl in range(nchls):
for t in range(nts):
if t >= stats_time1 and t < stats_time2:
ps[chl, t] = ttest_1samp(rlts[:, chl, t], 0)[1]
if ps[chl, t] < p and avg[chl, t] > 0:
ps[chl, t] = 1
elif ps[chl, t] < p and avg[chl, t] < 0:
ps[chl, t] = -1
else:
ps[chl, t] = 0
newps = np.zeros([nchls + 2, nts + 2], dtype=np.float)
newps[1:nchls + 1, 1:nts + 1] = ps
x = np.linspace(start_time - 0.5 * tstep, end_time + 0.5 * tstep, nts + 2)
y = np.linspace(-0.08*delta, 0.16*delta * nchls + 0.08*delta, nchls + 2)
X, Y = np.meshgrid(x, y)
plt.contour(X, Y, newps, [0.5], linewidths=2, linestyles="dashed")
plt.contour(X, Y, newps, [-0.5], linewidths=2, linestyles="dashed")
# get min of lims & max of lims
limmin = lim[0]
limmax = lim[1]
if cmap == None:
plt.imshow(avg, extent=(start_time, end_time, 0, nchls*delta*0.16), clim=(limmin, limmax), origin='lower', cmap='inferno')
else:
plt.imshow(avg, extent=(start_time, end_time, 0, nchls*delta * 0.16), clim=(limmin, limmax), origin='lower', cmap=cmap)
cb = plt.colorbar()
cb.ax.tick_params(labelsize=ticksize-2)
font = {'size': ticksize}
cb.set_label(clabel, fontdict=font)
xi = []
for i in range(nchls):
xi.append(0.16*delta*i + 0.08*delta)
yi = chllabels
plt.tick_params(labelsize=ticksize)
plt.yticks(xi, yi, fontsize=ticksize)
plt.ylabel(ylabel, fontsize=20)
plt.xlabel(xlabel, fontsize=20)
plt.show()
return 0
' a function for plotting the hotmap of neural pattern similarities for channels/regions by time sequence '
def plot_nps_hotmap(similarities, chllabels=None, time_unit=[0, 0.1], lim=[0, 1], abs=False, smooth=False, figsize=None,
cmap=None):
"""
plot the hotmap of neural pattern similarities for channels/regions by time sequence
similarities : array
The neural pattern similarities time-by-time.
The shape of similarities must be [n_chls, ts]. n_chls represents the number of channels or regions.
ts represents the number of time-points.
chllabel : string-array or string-list or None. Default is None.
The label for channels/regions.
If label=None, the labels will be '1st', '2nd', '3th', '4th', ... automatically.
time_unit : array or list [start_t, t_step]. Default is [0, 0.1]
The time information of corrs for plotting
start_t represents the start time and t_step represents the time between two adjacent time-points. Default
time_unit=[0, 0.1], which means the start time of corrs is 0 sec and the time step is 0.1 sec.
lim : array or list [min, max]. Default is [0, 1].
The corrs view lims.
abs : boolean True or False. Default is False.
Change the similarities into absolute values or not.
smooth : boolean True or False. Default is False.
Smooth the results or not.
figsize : array or list, [size_X, size_Y]
The size of the figure.
If figsize=None, the size of the figure will be ajusted automatically.
cmap : matplotlib colormap or None. Default is None.
The colormap for the figure.
If cmap=None, the ccolormap will be 'viridis'.
"""
if len(np.shape(similarities)) != 2:
return "Invalid input!"
# absolute value
if abs == True:
similarities = np.abs(similarities)
# get the number of channels
nchls = similarities.shape[0]
# get the number of time-points
ts = similarities.shape[1]
# get the start time and the time step
start_t = time_unit[0]
tstep = time_unit[1]
# calculate the end time
end_t = start_t + ts * tstep
# initialize the x
x = np.arange(start_t, end_t, tstep)
# set labels of the channels
if chllabels == None:
chllabels = []
for i in range(nchls):
if i % 10 == 0 and i != 10:
newlabel = str(i + 1) + "st"
elif i % 10 == 1 and i != 11:
newlabel = str(i + 1) + "nd"
elif i % 10 == 2 and i != 12:
newlabel = str(i + 1) + "rd"
else:
newlabel = str(i + 1) + "th"
chllabels.append(newlabel)
if smooth == True:
t = ts * 50
x_soft = np.linspace(x.min(), x.max(), t)
y_soft = np.zeros([nchls, t])
samplerate = int(1 / tstep) * 50
b, a = signal.butter(4, 2*30/samplerate, 'lowpass')
for i in range(nchls):
f = interp1d(x, similarities[i, :], kind='cubic')
y_soft[i] = f(x_soft)
y_soft[i] = signal.filtfilt(b, a, y_soft[i])
rlts = y_soft
if smooth == False:
rlts = similarities
fig = plt.gcf()
size = fig.get_size_inches()
if figsize == None:
size_x = ts * tstep * (size[0] - 2) + 2
size_y = nchls * 0.2 * (size[1] - 1.5) + 1.5
else:
size_x = figsize[0]
size_y = figsize[1]
fig.set_size_inches(size_x, size_y)
delta = (size_y * 3) / (size_x * 4)
# get min of lims & max of lims
limmin = lim[0]
limmax = lim[1]
if cmap == None:
plt.imshow(rlts, extent=(start_t, end_t, 0, nchls*delta*0.16), clim=(limmin, limmax), origin='lower')
else:
plt.imshow(rlts, extent=(start_t, end_t, 0, nchls*delta*0.16), clim=(limmin, limmax), origin='lower', cmap=cmap)
cb = plt.colorbar()
cb.ax.tick_params(labelsize=16)
font = {'size': 18}
cb.set_label("Similarity", fontdict=font)
xi = []
for i in range(nchls):
xi.append(0.16*delta*i + 0.08*delta)
yi = chllabels
plt.tick_params(labelsize=18)
plt.yticks(xi, yi, fontsize=18)
plt.ylabel("Channel", fontsize=20)
plt.xlabel("Time (s)", fontsize=20)
plt.show()
return 0
' a function for plotting the hotmap of statistical results for channels/regions by time sequence '
def plot_t_hotmap_withstats(results, chllabels=None, time_unit=[0, 0.1], lim=[-7, 7], p=0.05, cbpt=False,
clusterp=0.05, stats_time=[0, 1], smooth=False, xlabel='Time (s)', ylabel='Channel',
clabel='t', ticksize=18, figsize=None, cmap=None):
"""
plot the hotmap of statistical results for channels/regions by time sequence
results : array
The results.
The shape of results must be [n_subs, n_chls, ts, 2] or [n_subs, n_chls, ts]. n_subs represents the number of
subjects. n_chls represents the number of channels or regions. ts represents the number of time-points. If shape
of corrs is [n_chls, ts 2], each time-point of each channel/region contains a r-value and a p-value. If shape is
[n_chls, ts], only r-values.
chllabels : string-array or string-list or None. Default is None.
The label for channels/regions.
If label=None, the labels will be '1st', '2nd', '3th', '4th', ... automatically.
time_unit : array or list [start_t, t_step]. Default is [0, 0.1]
The time information of corrs for plotting
start_t represents the start time and t_step represents the time between two adjacent time-points. Default
time_unit=[0, 0.1], which means the start time of corrs is 0 sec and the time step is 0.1 sec.
lim : array or list [min, max]. Default is [0, 1].
The corrs view lims.
p: float. Default is 0.05.
The p threshold for outline.
cbpt : bool True or False. Default is True.
Conduct cluster-based permutation test or not.
clusterp : float. Default is 0.05.
The threshold of cluster-defining p-values.
stats_time : array or list [stats_time1, stats_time2]. Default os [0, 1].
The time period for statistical analysis.
smooth : bool True or False. Default is False.
Smooth the results or not.
xlabel : string. Default is 'Time (s)'.
The label of x-axis.
ylabel : string. Default is 'Channel'.
The label of y-axis.
clabel : string. Default is 'Similarity'.
The label of color-bar.
ticksize : int or float. Default is 18.
The size of the ticks.
figsize : array or list, [size_X, size_Y]
The size of the figure.
If figsize=None, the size of the figure will be ajusted automatically.
cmap : matplotlib colormap or None. Default is None.
The colormap for the figure.
If cmap=None, the ccolormap will be 'bwr'.
"""
if len(np.shape(results)) < 3 or len(np.shape(results)) > 4:
return "Invalid input!"
# get the number of channels
nchls = results.shape[1]
# get the number of time-points
nts = results.shape[2]
# get the start time and the time step
start_time = time_unit[0]
tstep = time_unit[1]
# calculate the end time
end_time = start_time + nts * tstep
delta1 = (stats_time[0] - start_time) / tstep - int((stats_time[0] - start_time) / tstep)
delta2 = (stats_time[1] - start_time) / tstep - int((stats_time[1] - start_time) / tstep)
if delta1 == 0:
stats_time1 = int((stats_time[0] - start_time) / tstep)
else:
stats_time1 = int((stats_time[0] - start_time) / tstep) + 1
if delta2 == 0:
stats_time2 = int((stats_time[1] - start_time) / tstep)
else:
stats_time2 = int((stats_time[1] - start_time) / tstep) + 1
# set labels of the channels
if chllabels == None:
chllabels = []
for i in range(nchls):
if i % 10 == 0 and i != 10:
newlabel = str(i + 1) + "st"
elif i % 10 == 1 and i != 11:
newlabel = str(i + 1) + "nd"
elif i % 10 == 2 and i != 12:
newlabel = str(i + 1) + "rd"
else:
newlabel = str(i + 1) + "th"
chllabels.append(newlabel)
if len(results.shape) == 4:
rlts = results[:, :, :, 0]
elif len(results.shape) == 3:
rlts = results
# smooth the results
if smooth == True:
for chl in range(nchls):
rlts[:, chl] = smooth_1d(rlts[:, chl])
fig = plt.gcf()
size = fig.get_size_inches()
if figsize == None:
size_x = nts * tstep * (size[0] - 2) + 2
size_y = nchls * 0.2 * (size[1] - 1.5) + 1.5
else:
size_x = figsize[0]
size_y = figsize[1]
fig.set_size_inches(size_x, size_y)
delta = (size_y * 3) / (size_x * 4)
ts = ttest_1samp(rlts, 0, axis=0)[:, :, 0]
ps = np.zeros([nchls, nts])
if cbpt == True:
for chl in range(nchls):
ps_stats = clusterbased_permutation_1d_1samp_2sided(rlts[:, chl, stats_time1:stats_time2], 0, p_threshold=p,
clusterp_threshold=clusterp, iter=1000)
ps[chl, stats_time1:stats_time2] = ps_stats
else:
for chl in range(nchls):
for t in range(nts):
if t >= stats_time1 and t < stats_time2:
ps[chl, t] = ttest_1samp(rlts[:, chl, t], 0)[1]
if ps[chl, t] < p and ts[chl, t] > 0:
ps[chl, t] = 1
elif ps[chl, t] < p and ts[chl, t] < 0:
ps[chl, t] = -1
else:
ps[chl, t] = 0
newps = np.zeros([nchls + 2, nts + 2], dtype=np.float)
newps[1:nchls + 1, 1:nts + 1] = ps
x = np.linspace(start_time - 0.5 * tstep, end_time + 0.5 * tstep, nts + 2)
y = np.linspace(-0.08*delta, 0.16*delta * nchls + 0.08*delta, nchls + 2)
X, Y = np.meshgrid(x, y)
plt.contour(X, Y, newps, [0.5], linewidths=2, linestyles="dashed")
plt.contour(X, Y, newps, [-0.5], linewidths=2, linestyles="dashed")
# get min of lims & max of lims
limmin = lim[0]
limmax = lim[1]
if cmap == None:
plt.imshow(ts, extent=(start_time, end_time, 0, nchls * 0.16*delta), clim=(limmin, limmax), origin='lower',
cmap='bwr')
else:
plt.imshow(ts, extent=(start_time, end_time, 0, nchls * 0.16*delta), clim=(limmin, limmax), origin='lower',
cmap=cmap)
cb = plt.colorbar()
cb.ax.tick_params(labelsize=ticksize-2)
font = {'size': ticksize}
cb.set_label(clabel, fontdict=font)
xi = []
for i in range(nchls):
xi.append(0.16*delta * i + 0.08*delta)
yi = chllabels
plt.tick_params(labelsize=ticksize)
plt.yticks(xi, yi, fontsize=ticksize)
plt.ylabel(ylabel, fontsize=20)
plt.xlabel(xlabel, fontsize=20)
plt.show()
return 0
' a function for plotting the RSA-result regions by 3 cuts (frontal, axial & lateral) '
def plot_brainrsa_regions(img, threshold=None, background=get_bg_ch2(), type='r'):
"""
Plot the RSA-result regions by 3 cuts (frontal, axial & lateral)
Parameters
----------
img : string
The file path of the .nii file of the RSA results.
threshold : None or int. Default is None.
The threshold of the number of voxels used in correction.
If threshold=n, only the similarity clusters consisting more than threshold voxels will be visible. If it is
None, the threshold-correction will not work.
background : Niimg-like object or string. Default is stuff.get_bg_ch2()
The background image that the RSA results will be plotted on top of.
type : string 'r' or 't'
The type of result (r-values or t-values).
"""
imgarray = nib.load(img).get_fdata()
if (imgarray == np.nan).all() == True:
print("No Valid Results")
else:
if threshold != None:
imgarray = nib.load(img).get_fdata()
affine = get_affine(img)
imgarray = correct_by_threshold(imgarray, threshold)
img = nib.Nifti1Image(imgarray, affine)
if type == 'r':
plotting.plot_roi(roi_img=img, bg_img=background, threshold=0, vmin=0.1, vmax=1,
title="Similarity", resampling_interpolation="continuous")
if type == 't':
plotting.plot_roi(roi_img=img, bg_img=background, threshold=0, vmin=-7, vmax=7,
title="Similarity", resampling_interpolation="continuous")
plt.show()
return 0
' a function for plotting the RSA-result by different cuts '
def plot_brainrsa_montage(img, threshold=None, slice=[6, 6, 6], background=get_bg_ch2bet(), type='r'):
"""
Plot the RSA-result by different cuts
Parameters
----------
img : string
The file path of the .nii file of the RSA results.
threshold : None or int. Default is None.
The threshold of the number of voxels used in correction.
If threshold=n, only the similarity clusters consisting more than threshold voxels will be visible. If it is
None, the threshold-correction will not work.
slice : array
The point where the cut is performed.
If slice=[slice_x, slice_y, slice_z], slice_x, slice_y, slice_z represent the coordinates of each cut in the x,
y, z direction. If slice=[[slice_x1, slice_x2], [slice_y1, slice_y2], [slice_z1, slice_z2]], slice_x1 & slice_x2
represent the coordinates of each cut in the x direction, slice_y1 & slice_y2 represent the coordinates of each
cut in the y direction, slice_z1 & slice_z2 represent the coordinates of each cut in the z direction.
background : Niimg-like object or string. Default is stuff.get_bg_ch2bet()
The background image that the RSA results will be plotted on top of.
type : string 'r' or 't'
The type of result (r-values or t-values).
"""
imgarray = nib.load(img).get_fdata()
if (imgarray == np.nan).all() == True:
print("No Valid Results")
else:
if threshold != None:
imgarray = nib.load(img).get_fdata()
affine = get_affine(img)
imgarray = correct_by_threshold(imgarray, threshold)
img = nib.Nifti1Image(imgarray, affine)
slice_x = slice[0]
slice_y = slice[1]
slice_z = slice[2]
if type == 'r':
vmax = 1
if type == 't':
vmax = 7
if slice_x != 0:
plotting.plot_stat_map(stat_map_img=img, bg_img=background, display_mode='x', cut_coords=slice_x,
title="Similarity -sagittal", draw_cross=True, vmax=vmax)
if slice_y != 0:
plotting.plot_stat_map(stat_map_img=img, bg_img=background, display_mode='y', cut_coords=slice_y,
title="Similarity -coronal", draw_cross=True, vmax=vmax)
if slice_z != 0:
plotting.plot_stat_map(stat_map_img=img, bg_img=background, display_mode='z', cut_coords=slice_z,
title="Similarity -axial", draw_cross=True, vmax=vmax)
plt.show()
return 0
' a function for plotting the 2-D projection of the RSA-result '
def plot_brainrsa_glass(img, threshold=None, type='r'):
"""
Plot the 2-D projection of the RSA-result
Parameters
----------
img : string
The file path of the .nii file of the RSA results.
threshold : None or int. Default is None.
The threshold of the number of voxels used in correction.
If threshold=n, only the similarity clusters consisting more than threshold voxels will be visible. If it is
None, the threshold-correction will not work.
type : string 'r' or 't'
The type of result (r-values or t-values).
"""
imgarray = nib.load(img).get_fdata()
if (imgarray == np.nan).all() == True:
print("No Valid Results")
else:
if threshold != None:
imgarray = nib.load(img).get_fdata()
affine = get_affine(img)
imgarray = correct_by_threshold(imgarray, threshold)
img = nib.Nifti1Image(imgarray, affine)
if type == 'r':
plotting.plot_glass_brain(img, colorbar=True, title="Similarity", black_bg=True, draw_cross=True, vmax=1)
if type == 't':
plotting.plot_glass_brain(img, colorbar=True, title="Similarity", black_bg=True, draw_cross=True, vmax=7)
plt.show()
return 0
' a function for plotting the RSA-result into a brain surface '
def plot_brainrsa_surface(img, threshold=None, type='r'):
"""
Plot the RSA-result into a brain surface
Parameters
----------
img : string
The file path of the .nii file of the RSA results.
threshold : None or int. Default is None.
The threshold of the number of voxels used in correction.
If threshold=n, only the similarity clusters consisting more than threshold voxels will be visible. If it is
None, the threshold-correction will not work.
type : string 'r' or 't'
The type of result (r-values or t-values).
"""
imgarray = nib.load(img).get_fdata()
if (imgarray == np.nan).all() == True:
print("No Valid Results")
else:
if threshold != None:
imgarray = nib.load(img).get_fdata()
affine = get_affine(img)
imgarray = correct_by_threshold(imgarray, threshold)
img = nib.Nifti1Image(imgarray, affine)
fsaverage = datasets.fetch_surf_fsaverage(mesh='fsaverage')
texture_left = surface.vol_to_surf(img, fsaverage.pial_left)
texture_right = surface.vol_to_surf(img, fsaverage.pial_right)
# type='r'
if type == 'r':
plotting.plot_surf_stat_map(fsaverage.pial_left, texture_left, hemi='left', threshold=0.1,
bg_map=fsaverage.sulc_right, colorbar=False, vmax=0.8, darkness=0.7)
plotting.plot_surf_stat_map(fsaverage.pial_right, texture_right, hemi='right', threshold=0.1,
bg_map=fsaverage.sulc_right, colorbar=True, vmax=0.8, darkness=0.7)
plotting.plot_surf_stat_map(fsaverage.pial_right, texture_left, hemi='left', threshold=0.1,
bg_map=fsaverage.sulc_right, colorbar=False, vmax=0.8, darkness=0.7)
plotting.plot_surf_stat_map(fsaverage.pial_left, texture_right, hemi='right', threshold=0.1,
bg_map=fsaverage.sulc_right, colorbar=True, vmax=0.8, darkness=0.7)
plt.show()
# type='t'
if type == 't':
plotting.plot_surf_stat_map(fsaverage.pial_left, texture_left, hemi='left', threshold=0.8,
bg_map=fsaverage.sulc_right, colorbar=False, darkness=0.7)
plotting.plot_surf_stat_map(fsaverage.pial_right, texture_right, hemi='right', threshold=0.8,
bg_map=fsaverage.sulc_right, colorbar=True, darkness=0.7)
plotting.plot_surf_stat_map(fsaverage.pial_right, texture_left, hemi='left', threshold=0.8,
bg_map=fsaverage.sulc_right, colorbar=False, darkness=0.7)
plotting.plot_surf_stat_map(fsaverage.pial_left, texture_right, hemi='right', threshold=0.8,
bg_map=fsaverage.sulc_right, colorbar=True, darkness=0.7)
plt.show()
return 0
' a function for plotting the RSA-result by a set of images '
def plot_brainrsa_rlts(img, threshold=None, slice=[6, 6, 6], background=None, type='r'):
"""
Plot the RSA-result by a set of images
Parameters
----------
img : string
The file path of the .nii file of the RSA results.
threshold : None or int. Default is None.
The threshold of the number of voxels used in correction.
If threshold=n, only the similarity clusters consisting more than threshold voxels will be visible. If it is
None, the threshold-correction will not work.
background : Niimg-like object or string. Default is None.
The background image that the RSA results will be plotted on top of.
type : string 'r' or 't'
The type of result (r-values or t-values).
"""
imgarray = nib.load(img).get_fdata()
if (imgarray == np.nan).all() == True:
print("No Valid Results")
else:
if threshold != None:
imgarray = nib.load(img).get_fdata()
affine = get_affine(img)
imgarray = correct_by_threshold(imgarray, threshold)
img = nib.Nifti1Image(imgarray, affine)
if background == None:
plot_brainrsa_regions(img, threshold=threshold, type=type)
plot_brainrsa_montage(img, threshold=threshold, slice=slice, type=type)
plot_brainrsa_glass(img, threshold=threshold, type=type)
plot_brainrsa_surface(img, threshold=threshold, type=type)
else:
plot_brainrsa_regions(img, threshold=threshold, background=background, type=type)
plot_brainrsa_montage(img, threshold=threshold, slice=slice, background=background, type=type)
plot_brainrsa_surface(img, threshold=threshold, type=type)
return 0 | 37.049558 | 134 | 0.596673 |
__author__ = 'Zitong Lu'
import numpy as np
import copy
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
from scipy import signal
from scipy.stats import ttest_1samp, ttest_rel
from nilearn import plotting, datasets, surface
import nibabel as nib
from neurora.stuff import get_affine, get_bg_ch2, get_bg_ch2bet, correct_by_threshold, \
clusterbased_permutation_1d_1samp_1sided, clusterbased_permutation_2d_1samp_1sided, \
clusterbased_permutation_1d_1samp_2sided, clusterbased_permutation_2d_2sided, smooth_1d
from decimal import Decimal
def plot_rdm(rdm, percentile=False, rescale=False, lim=[0, 1], conditions=None, con_fontsize=12, cmap=None):
if len(np.shape(rdm)) != 2 or np.shape(rdm)[0] != np.shape(rdm)[1]:
return "Invalid input!"
cons = rdm.shape[0]
crdm = copy.deepcopy(rdm)
if cons == 2:
print("The shape of RDM cannot be 2*2. Here NeuroRA cannot plot this RDM.")
return None
a, b = np.shape(crdm)
if a != b:
return None
if percentile == True:
v = np.zeros([cons * cons, 2], dtype=np.float)
for i in range(cons):
for j in range(cons):
v[i * cons + j, 0] = crdm[i, j]
index = np.argsort(v[:, 0])
m = 0
for i in range(cons * cons):
if i > 0:
if v[index[i], 0] > v[index[i - 1], 0]:
m = m + 1
v[index[i], 1] = m
v[:, 0] = v[:, 1] * 100 / m
for i in range(cons):
for j in range(cons):
crdm[i, j] = v[i * cons + j, 0]
if cmap == None:
plt.imshow(crdm, extent=(0, 1, 0, 1), cmap=plt.cm.jet, clim=(0, 100))
else:
plt.imshow(crdm, extent=(0, 1, 0, 1), cmap=cmap, clim=(0, 100))
# rescale the RDM
elif rescale == True:
# flatten the RDM
vrdm = np.reshape(rdm, [cons * cons])
# array -> set -> list
svrdm = set(vrdm)
lvrdm = list(svrdm)
lvrdm.sort()
# get max & min
maxvalue = lvrdm[-1]
minvalue = lvrdm[1]
# rescale
if maxvalue != minvalue:
for i in range(cons):
for j in range(cons):
# not on the diagnal
if i != j:
crdm[i, j] = float((crdm[i, j] - minvalue) / (maxvalue - minvalue))
# plot the RDM
min = lim[0]
max = lim[1]
if cmap == None:
plt.imshow(crdm, extent=(0, 1, 0, 1), cmap=plt.cm.jet, clim=(min, max))
else:
plt.imshow(crdm, extent=(0, 1, 0, 1), cmap=cmap, clim=(min, max))
else:
# plot the RDM
min = lim[0]
max = lim[1]
if cmap == None:
plt.imshow(crdm, extent=(0, 1, 0, 1), cmap=plt.cm.jet, clim=(min, max))
else:
plt.imshow(crdm, extent=(0, 1, 0, 1), cmap=cmap, clim=(min, max))
# plt.axis("off")
cb = plt.colorbar()
cb.ax.tick_params(labelsize=16)
font = {'size': 18}
if percentile == True:
cb.set_label("Dissimilarity (percentile)", fontdict=font)
elif rescale == True:
cb.set_label("Dissimilarity (Rescaling)", fontdict=font)
else:
cb.set_label("Dissimilarity", fontdict=font)
if conditions != None:
print("1")
step = float(1 / cons)
x = np.arange(0.5 * step, 1 + 0.5 * step, step)
y = np.arange(1 - 0.5 * step, -0.5 * step, -step)
plt.xticks(x, conditions, fontsize=con_fontsize, rotation=30, ha="right")
plt.yticks(y, conditions, fontsize=con_fontsize)
else:
plt.axis("off")
plt.show()
return 0
def plot_rdm_withvalue(rdm, lim=[0, 1], value_fontsize=10, conditions=None, con_fontsize=12, cmap=None):
if len(np.shape(rdm)) != 2 or np.shape(rdm)[0] != np.shape(rdm)[1]:
return "Invalid input!"
# get the number of conditions
cons = rdm.shape[0]
# if cons=2, the RDM cannot be plotted.
if cons == 2:
print("The shape of RDM cannot be 2*2. Here NeuroRA cannot plot this RDM.")
return None
crdm = copy.deepcopy(rdm)
# determine if it's a square
a, b = np.shape(crdm)
if a != b:
return None
min = lim[0]
max = lim[1]
if cmap == None:
plt.imshow(crdm, extent=(0, 1, 0, 1), cmap=plt.cm.Greens, clim=(min, max))
else:
plt.imshow(crdm, extent=(0, 1, 0, 1), cmap=cmap, clim=(min, max))
cb = plt.colorbar()
cb.ax.tick_params(labelsize=16)
font = {'size': 18}
cb.set_label("Dissimilarity", fontdict=font)
step = float(1 / cons)
for i in range(cons):
for j in range(cons):
print(i, j)
text = plt.text(i * step + 0.5 * step, 1 - j * step - 0.5 * step, float('%.4f' % rdm[i, j]),
ha="center", va="center", color="blue", fontsize=value_fontsize)
if conditions != None:
print("1")
step = float(1 / cons)
x = np.arange(0.5 * step, 1 + 0.5 * step, step)
y = np.arange(1 - 0.5 * step, -0.5 * step, -step)
plt.xticks(x, conditions, fontsize=con_fontsize, rotation=30, ha="right")
plt.yticks(y, conditions, fontsize=con_fontsize)
else:
plt.axis("off")
plt.show()
return 0
def plot_corrs_by_time(corrs, labels=None, time_unit=[0, 0.1]):
if len(np.shape(corrs)) < 2 or len(np.shape(corrs)) > 3:
return "Invalid input!"
n = corrs.shape[0]
ts = corrs.shape[1]
start_t = time_unit[0]
tstep = time_unit[1]
end_t = start_t + ts * tstep
x = np.arange(start_t, end_t, tstep)
t = ts * 50
x_soft = np.linspace(x.min(), x.max(), t)
y_soft = np.zeros([n, t])
for i in range(n):
if len(corrs.shape) == 3:
f = interp1d(x, corrs[i, :, 0], kind='cubic')
y_soft[i] = f(x_soft)
if len(corrs.shape) == 2:
f = interp1d(x, corrs[i, :], kind='cubic')
y_soft[i] = f(x_soft)
vmax = np.max(y_soft)
vmin = np.min(y_soft)
if vmax <= 1/1.1:
ymax = np.max(y_soft)*1.1
else:
ymax = 1
if vmin >= 0:
ymin = -0.1
elif vmin < 0 and vmin > -1/1.1:
ymin = np.min(y_soft)*1.1
else:
ymin = -1
fig, ax = plt.subplots()
for i in range(n):
if labels:
plt.plot(x_soft, y_soft[i], linewidth=3, label=labels[i])
else:
plt.plot(x_soft, y_soft[i], linewidth=3)
plt.ylim(ymin, ymax)
plt.ylabel("Similarity", fontsize=20)
plt.xlabel("Time (s)", fontsize=20)
plt.tick_params(labelsize=18)
if labels:
plt.legend()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.show()
return 0
def plot_tbytsim_withstats(similarities, start_time=0, end_time=1, time_interval=0.01, smooth=True, p=0.05, cbpt=True,
clusterp=0.05, stats_time=[0, 1], color='r', xlim=[0, 1], ylim=[-0.1, 0.8],
xlabel='Time (s)', ylabel='Representational Similarity', figsize=[6.4, 3.6], x0=0,
ticksize=12, fontsize=16, markersize=2, avgshow=False):
if len(np.shape(similarities)) < 2 or len(np.shape(similarities)) > 3:
return "Invalid input!"
n = len(np.shape(similarities))
yminlim = ylim[0]
ymaxlim = ylim[1]
if n == 3:
similarities = similarities[:, :, 0]
csimilarities = copy.deepcopy(similarities)
nsubs, nts = np.shape(csimilarities)
tstep = float(Decimal((end_time - start_time) / nts).quantize(Decimal(str(time_interval))))
if tstep != time_interval:
return "Invalid input!"
delta1 = (stats_time[0] - start_time) / tstep - int((stats_time[0] - start_time) / tstep)
delta2 = (stats_time[1] - start_time) / tstep - int((stats_time[1] - start_time) / tstep)
if delta1 == 0:
stats_time1 = int((stats_time[0] - start_time) / tstep)
else:
stats_time1 = int((stats_time[0] - start_time) / tstep) + 1
if delta2 == 0:
stats_time2 = int((stats_time[1] - start_time) / tstep)
else:
stats_time2 = int((stats_time[1] - start_time) / tstep) + 1
if smooth is True:
for sub in range(nsubs):
for t in range(nts):
if t<=1:
csimilarities[sub, t] = np.average(csimilarities[sub, :t+3])
if t>1 and t<(nts-2):
csimilarities[sub, t] = np.average(csimilarities[sub, t-2:t+3])
if t>=(nts-2):
csimilarities[sub, t] = np.average(csimilarities[sub, t-2:])
avg = np.average(csimilarities, axis=0)
err = np.zeros([nts], dtype=np.float)
for t in range(nts):
err[t] = np.std(csimilarities[:, t], ddof=1)/np.sqrt(nsubs)
if cbpt == True:
ps_stats = clusterbased_permutation_1d_1samp_1sided(csimilarities[:, stats_time1:stats_time2], level=0,
p_threshold=p, clusterp_threshold=clusterp)
ps = np.zeros([nts])
ps[stats_time1:stats_time2] = ps_stats
else:
ps = np.zeros([nts])
for t in range(nts):
ps[t] = ttest_1samp(csimilarities[:, t], 0, alternative="greater")[1]
if ps[t] < p:
ps[t] = 1
else:
ps[t] = 0
for t in range(nts):
if ps[t] == 1:
plt.plot(t*tstep+start_time+0.5*tstep, (ymaxlim-yminlim)*0.95+yminlim, 's',
color=color, alpha=0.8, markersize=markersize)
xi = [t*tstep+start_time, t*tstep+tstep+start_time]
ymin = [0]
ymax = [avg[t]-err[t]]
plt.fill_between(xi, ymax, ymin, facecolor=color, alpha=0.1)
fig = plt.gcf()
fig.set_size_inches(figsize[0], figsize[1])
ax = plt.gca()
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["left"].set_linewidth(3)
ax.spines["left"].set_position(("data", x0))
ax.spines["bottom"].set_linewidth(3)
ax.spines['bottom'].set_position(('data', 0))
x = np.arange(start_time+0.5*tstep, end_time+0.5*tstep, tstep)
if avgshow is True:
plt.plot(x, avg, color=color, alpha=0.9)
plt.fill_between(x, avg + err, avg - err, facecolor=color, alpha=0.8)
plt.ylim(yminlim, ymaxlim)
plt.xlim(xlim[0], xlim[1])
plt.tick_params(labelsize=ticksize)
plt.xlabel(xlabel, fontsize=fontsize)
plt.ylabel(ylabel, fontsize=fontsize)
plt.show()
return 0
def plot_tbyt_decoding_acc(acc, start_time=0, end_time=1, time_interval=0.01, chance=0.5, p=0.05, cbpt=True,
clusterp=0.05, stats_time=[0, 1], color='r', xlim=[0, 1], ylim=[0.4, 0.8],
xlabel='Time (s)', ylabel='Decoding Accuracy', figsize=[6.4, 3.6], x0=0, ticksize=12,
fontsize=16, markersize=2, avgshow=False):
if len(np.shape(acc)) != 2:
return "Invalid input!"
nsubs, nts = np.shape(acc)
tstep = float(Decimal((end_time - start_time) / nts).quantize(Decimal(str(time_interval))))
if tstep != time_interval:
return "Invalid input!"
delta1 = (stats_time[0] - start_time) / tstep - int((stats_time[0] - start_time) / tstep)
delta2 = (stats_time[1] - start_time) / tstep - int((stats_time[1] - start_time) / tstep)
if delta1 == 0:
stats_time1 = int((stats_time[0] - start_time) / tstep)
else:
stats_time1 = int((stats_time[0] - start_time) / tstep) + 1
if delta2 == 0:
stats_time2 = int((stats_time[1] - start_time) / tstep)
else:
stats_time2 = int((stats_time[1] - start_time) / tstep) + 1
yminlim = ylim[0]
ymaxlim = ylim[1]
avg = np.average(acc, axis=0)
err = np.zeros([nts])
for t in range(nts):
err[t] = np.std(acc[:, t], ddof=1) / np.sqrt(nsubs)
if cbpt == True:
ps_stats = clusterbased_permutation_1d_1samp_1sided(acc[:, stats_time1:stats_time2], level=chance,
p_threshold=p, clusterp_threshold=clusterp, iter=1000)
ps = np.zeros([nts])
ps[stats_time1:stats_time2] = ps_stats
else:
ps = np.zeros([nts])
for t in range(nts):
if t >= stats_time1 and t< stats_time2:
ps[t] = ttest_1samp(acc[:, t], chance, alternative="greater")[1]
if ps[t] < p:
ps[t] = 1
else:
ps[t] = 0
for t in range(nts):
if ps[t] == 1:
plt.plot(t*tstep+start_time+0.5*tstep, (ymaxlim-yminlim)*0.95+yminlim, 's', color=color, alpha=0.8,
markersize=markersize)
xi = [t*tstep+start_time, t*tstep+tstep+start_time]
ymin = [chance]
ymax = [avg[t] - err[t]]
plt.fill_between(xi, ymax, ymin, facecolor=color, alpha=0.2)
fig = plt.gcf()
fig.set_size_inches(figsize[0], figsize[1])
ax = plt.gca()
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["left"].set_linewidth(3)
ax.spines["left"].set_position(("data", x0))
ax.spines["bottom"].set_linewidth(3)
ax.spines["bottom"].set_position(("data", chance))
x = np.arange(start_time+0.5*tstep, end_time+0.5*tstep, tstep)
if avgshow is True:
plt.plot(x, avg, color=color, alpha=0.9)
plt.fill_between(x, avg+err, avg-err, facecolor=color, alpha=0.8)
plt.ylim(yminlim, ymaxlim)
plt.xlim(xlim[0], xlim[1])
plt.tick_params(labelsize=ticksize)
plt.xlabel(xlabel, fontsize=fontsize)
plt.ylabel(ylabel, fontsize=fontsize)
plt.show()
def plot_tbyt_diff_decoding_acc(acc1, acc2, start_time=0, end_time=1, time_interval=0.01, chance=0.5, p=0.05, cbpt=True,
clusterp=0.05, stats_time=[0, 1], color1='r', color2='b', xlim=[0, 1], ylim=[0.4, 0.8],
xlabel='Time (s)', ylabel='Decoding Accuracy', figsize=[6.4, 3.6], x0=0, ticksize=12,
fontsize=16, markersize=2, avgshow=False):
if len(np.shape(acc1)) != 2 or len(np.shape(acc2)) != 2:
return "Invalid input!"
nsubs, nts = np.shape(acc1)
tstep = float(Decimal((end_time - start_time) / nts).quantize(Decimal(str(time_interval))))
if tstep != time_interval:
return "Invalid input!"
delta1 = (stats_time[0] - start_time) / tstep - int((stats_time[0] - start_time) / tstep)
delta2 = (stats_time[1] - start_time) / tstep - int((stats_time[1] - start_time) / tstep)
if delta1 == 0:
stats_time1 = int((stats_time[0] - start_time) / tstep)
else:
stats_time1 = int((stats_time[0] - start_time) / tstep) + 1
if delta2 == 0:
stats_time2 = int((stats_time[1] - start_time) / tstep)
else:
stats_time2 = int((stats_time[1] - start_time) / tstep) + 1
yminlim = ylim[0]
ymaxlim = ylim[1]
avg1 = np.average(acc1, axis=0)
err1 = np.zeros([nts])
for t in range(nts):
err1[t] = np.std(acc1[:, t], ddof=1) / np.sqrt(nsubs)
avg2 = np.average(acc2, axis=0)
err2 = np.zeros([nts])
for t in range(nts):
err2[t] = np.std(acc2[:, t], ddof=1) / np.sqrt(nsubs)
if cbpt == True:
ps1_stats = clusterbased_permutation_1d_1samp_1sided(acc1[:, stats_time1:stats_time2], level=chance,
p_threshold=p, clusterp_threshold=clusterp, iter=1000)
ps1 = np.zeros([nts])
ps1[stats_time1:stats_time2] = ps1_stats
ps2_stats = clusterbased_permutation_1d_1samp_1sided(acc2[:, stats_time1:stats_time2], level=chance,
p_threshold=p, clusterp_threshold=clusterp, iter=1000)
ps2 = np.zeros([nts])
ps2[stats_time1:stats_time2] = ps2_stats
ps_stats = clusterbased_permutation_1d_1samp_2sided(acc1[:, stats_time1:stats_time2]-
acc2[:, stats_time1:stats_time2], level=0, p_threshold=p,
clusterp_threshold=clusterp, iter=1000)
ps = np.zeros([nts])
ps[stats_time1:stats_time2] = ps_stats
else:
ps1 = np.zeros([nts])
ps2 = np.zeros([nts])
ps = np.zeros([nts])
for t in range(nts):
if t >= stats_time1 and t< stats_time2:
ps1[t] = ttest_1samp(acc1[:, t], chance, alternative="greater")[1]
ps2[t] = ttest_1samp(acc2[:, t], chance, alternative="greater")[1]
if ps1[t] < p:
ps1[t] = 1
else:
ps1[t] = 0
if ps2[t] < p:
ps2[t] = 1
else:
ps2[t] = 0
if ttest_rel(acc1[:, t], acc1[:, t], alternative="greater")[1] < p:
ps[t] = 1
elif ttest_rel(acc1[:, t], acc1[:, t], alternative="less")[1] < p:
ps[t] = -1
else:
ps[t] = 0
for t in range(nts):
if ps1[t] == 1:
plt.plot(t*tstep+start_time+0.5*tstep, (ymaxlim-yminlim)*0.95+yminlim, 's', color=color1, alpha=0.8,
markersize=markersize)
if ps2[t] == 1:
plt.plot(t*tstep+start_time+0.5*tstep, (ymaxlim-yminlim)*0.91+yminlim, 's', color=color2, alpha=0.8,
markersize=markersize)
if ps[t] == 1:
xi = [t*tstep+start_time, t*tstep+tstep+start_time]
ymin = [avg2[t] + err2[t]]
ymax = [avg1[t] - err1[t]]
plt.fill_between(xi, ymax, ymin, facecolor="grey", alpha=0.2)
if ps[t] == -1:
xi = [t*tstep+start_time, t*tstep+tstep+start_time]
ymin = [avg1[t] + err1[t]]
ymax = [avg2[t] - err2[t]]
plt.fill_between(xi, ymax, ymin, facecolor="grey", alpha=0.2)
fig = plt.gcf()
fig.set_size_inches(figsize[0], figsize[1])
ax = plt.gca()
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["left"].set_linewidth(3)
ax.spines["left"].set_position(("data", x0))
ax.spines["bottom"].set_linewidth(3)
ax.spines["bottom"].set_position(("data", chance))
x = np.arange(start_time+0.5*tstep, end_time+0.5*tstep, tstep)
if avgshow is True:
plt.plot(x, avg1, color=color1, alpha=0.9)
plt.plot(x, avg2, color=color2, alpha=0.9)
plt.fill_between(x, avg1+err1, avg1-err1, facecolor=color1, alpha=0.8)
plt.fill_between(x, avg2+err2, avg2-err2, facecolor=color2, alpha=0.8)
plt.ylim(yminlim, ymaxlim)
plt.xlim(xlim[0], xlim[1])
plt.tick_params(labelsize=ticksize)
plt.xlabel(xlabel, fontsize=fontsize)
plt.ylabel(ylabel, fontsize=fontsize)
plt.show()
def plot_ct_decoding_acc(acc, start_timex=0, end_timex=1, start_timey=0, end_timey=1, time_intervalx=0.01,
time_intervaly=0.01, chance=0.5, p=0.05, cbpt=True, clusterp=0.05, stats_timex=[0, 1],
stats_timey=[0, 1], xlim=[0, 1], ylim=[0, 1], clim=[0.4, 0.8], xlabel='Training Time (s)',
ylabel='Test Time (s)', clabel='Decoding Accuracy', figsize=[6.4, 4.8], cmap="viridis",
ticksize=12, fontsize=16):
nsubs, nx, ny = np.shape(acc)
cminlim = clim[0]
cmaxlim = clim[1]
tstepx = float(Decimal((end_timex - start_timex) / nx).quantize(Decimal(str(time_intervalx))))
tstepy = float(Decimal((end_timey - start_timey) / ny).quantize(Decimal(str(time_intervaly))))
if tstepx != time_intervalx or tstepy != time_intervaly:
return "Invalid input!"
deltax1 = (stats_timex[0] - start_timex) / tstepx - int((stats_timex[0] - start_timex) / tstepx)
deltax2 = (stats_timex[1] - start_timex) / tstepx - int((stats_timex[1] - start_timex) / tstepx)
if deltax1 == 0:
stats_timex1 = int((stats_timex[0] - start_timex) / tstepx)
else:
stats_timex1 = int((stats_timex[0] - start_timex) / tstepx) + 1
if deltax2 == 0:
stats_timex2 = int((stats_timex[1] - start_timex) / tstepx)
else:
stats_timex2 = int((stats_timex[1] - start_timex) / tstepx) + 1
deltay1 = (stats_timey[0] - start_timey) / tstepy - int((stats_timey[0] - start_timey) / tstepy)
deltay2 = (stats_timey[1] - start_timey) / tstepy - int((stats_timey[1] - start_timey) / tstepy)
if deltay1 == 0:
stats_timey1 = int((stats_timey[0] - start_timey) / tstepy)
else:
stats_timey1 = int((stats_timey[0] - start_timey) / tstepy) + 1
if deltay2 == 0:
stats_timey2 = int((stats_timey[1] - start_timey) / tstepy)
else:
stats_timey2 = int((stats_timey[1] - start_timey) / tstepy) + 1
if cbpt is True:
ps_stats = clusterbased_permutation_2d_1samp_1sided(
acc[:, stats_timex1:stats_timex2, stats_timey1:stats_timey2], level=chance, p_threshold=p,
clusterp_threshold=clusterp, iter=1000)
ps = np.zeros([nx, ny])
ps[stats_timex1:stats_timex2, stats_timey1:stats_timey2] = ps_stats
else:
ps = np.zeros([nx, ny])
for t1 in range(nx):
for t2 in range(ny):
if t1 >= stats_timex1 and t1 < stats_timex2 and t2 >= stats_timey1 and t2 < stats_timey2:
ps[t1, t2] = ttest_1samp(acc[:, t1, t2], chance, alternative="greater")[1]
if ps[t1, t2] < p:
ps[t1, t2] = 1
else:
ps[t1, t2] = 0
newps = np.zeros([nx + 2, ny + 2])
newps[1:nx + 1, 1:ny + 1] = ps
x = np.linspace(start_timex - 0.5 * tstepx, end_timex + 0.5 * tstepx, nx + 2)
y = np.linspace(start_timey - 0.5 * tstepy, end_timey + 0.5 * tstepy, ny + 2)
X, Y = np.meshgrid(x, y)
plt.contour(X, Y, np.transpose(newps, (1, 0)), [0, 1], colors="silver", alpha=0.9, linewidths=3,
linestyles="dashed")
fig = plt.gcf()
fig.set_size_inches(figsize[0], figsize[1])
ax = plt.gca()
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["left"].set_linewidth(2)
ax.spines["bottom"].set_linewidth(2)
avg = np.average(acc, axis=0)
avg = np.transpose(avg, (1, 0))
plt.imshow(avg, extent=(start_timex, end_timex, start_timey, end_timey), cmap=cmap, origin="lower",
clim=(cminlim, cmaxlim))
cb = plt.colorbar()
cb.ax.tick_params(labelsize=ticksize)
font = {'size': ticksize+2}
cb.set_label(clabel, fontdict=font)
plt.xlim(xlim[0], xlim[1])
plt.ylim(ylim[0], ylim[1])
plt.tick_params(labelsize=ticksize)
plt.xlabel(xlabel, fontsize=fontsize)
plt.ylabel(ylabel, fontsize=fontsize)
plt.show()
def plot_ct_diff_decoding_acc(acc1, acc2, start_timex=0, end_timex=1, start_timey=0, end_timey=1, time_intervalx=0.01,
time_intervaly=0.01, p=0.05, cbpt=True, clusterp=0.05, stats_timex=[0, 1],
stats_timey=[0, 1], xlim=[0, 1], ylim=[0, 1], clim=[0.4, 0.8], xlabel='Training Time (s)',
ylabel='Test Time (s)', clabel='Differences of Decoding Accuracies', figsize=[6.4, 4.8],
cmap="viridis", ticksize=12, fontsize=16):
acc = acc1 - acc2
nsubs, nx, ny = np.shape(acc)
cminlim = clim[0]
cmaxlim = clim[1]
tstepx = float(Decimal((end_timex - start_timex) / nx).quantize(Decimal(str(time_intervalx))))
tstepy = float(Decimal((end_timey - start_timey) / ny).quantize(Decimal(str(time_intervaly))))
if tstepx != time_intervalx or tstepy != time_intervaly:
return "Invalid input!"
deltax1 = (stats_timex[0] - start_timex) / tstepx - int((stats_timex[0] - start_timex) / tstepx)
deltax2 = (stats_timex[1] - start_timex) / tstepx - int((stats_timex[1] - start_timex) / tstepx)
if deltax1 == 0:
stats_timex1 = int((stats_timex[0] - start_timex) / tstepx)
else:
stats_timex1 = int((stats_timex[0] - start_timex) / tstepx) + 1
if deltax2 == 0:
stats_timex2 = int((stats_timex[1] - start_timex) / tstepx)
else:
stats_timex2 = int((stats_timex[1] - start_timex) / tstepx) + 1
deltay1 = (stats_timey[0] - start_timey) / tstepy - int((stats_timey[0] - start_timey) / tstepy)
deltay2 = (stats_timey[1] - start_timey) / tstepy - int((stats_timey[1] - start_timey) / tstepy)
if deltay1 == 0:
stats_timey1 = int((stats_timey[0] - start_timey) / tstepy)
else:
stats_timey1 = int((stats_timey[0] - start_timey) / tstepy) + 1
if deltay2 == 0:
stats_timey2 = int((stats_timey[1] - start_timey) / tstepy)
else:
stats_timey2 = int((stats_timey[1] - start_timey) / tstepy) + 1
if cbpt is True:
ps_stats = clusterbased_permutation_2d_2sided(acc1[:, stats_timex1:stats_timex2, stats_timey1:stats_timey2],
acc2[:, stats_timex1:stats_timex2, stats_timey1:stats_timey2],
p_threshold=p, clusterp_threshold=clusterp, iter=1000)
ps = np.zeros([nx, ny])
ps[stats_timex1:stats_timex2, stats_timey1:stats_timey2] = ps_stats
else:
ps = np.zeros([nx, ny])
for t1 in range(nx):
for t2 in range(ny):
if t1 >= stats_timex1 and t1 < stats_timex2 and t2 >= stats_timey1 and t2 < stats_timey2:
if ttest_1samp(acc[:, t1, t2], 0, alternative="greater")[1] < p:
ps[t1, t2] = 1
elif ttest_1samp(acc[:, t1, t2], 0, alternative="less")[1] < p:
ps[t1, t2] = -1
else:
ps[t1, t2] = 0
newps = np.zeros([nx + 2, ny + 2])
newps[1:nx + 1, 1:ny + 1] = ps
x = np.linspace(start_timex - 0.5 * tstepx, end_timex + 0.5 * tstepx, nx + 2)
y = np.linspace(start_timey - 0.5 * tstepy, end_timey + 0.5 * tstepy, ny + 2)
X, Y = np.meshgrid(x, y)
plt.contour(X, Y, np.transpose(newps, (1, 0)), (-0.5, 0.5), colors="silver", alpha=0.9, linewidths=3,
linestyles="dashed")
fig = plt.gcf()
fig.set_size_inches(figsize[0], figsize[1])
ax = plt.gca()
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["left"].set_linewidth(2)
ax.spines["bottom"].set_linewidth(2)
avg = np.average(acc, axis=0)
avg = np.transpose(avg, (1, 0))
plt.imshow(avg, extent=(start_timex, end_timex, start_timey, end_timey), cmap=cmap, origin="lower",
clim=(cminlim, cmaxlim))
cb = plt.colorbar()
cb.ax.tick_params(labelsize=ticksize)
font = {'size': ticksize+2}
cb.set_label(clabel, fontdict=font)
plt.xlim(xlim[0], xlim[1])
plt.ylim(ylim[0], ylim[1])
plt.tick_params(labelsize=ticksize)
plt.xlabel(xlabel, fontsize=fontsize)
plt.ylabel(ylabel, fontsize=fontsize)
plt.show()
def plot_corrs_hotmap(corrs, chllabels=None, time_unit=[0, 0.1], lim=[0, 1], smooth=False, figsize=None, cmap=None):
if len(np.shape(corrs)) < 2 or len(np.shape(corrs)) > 3:
return "Invalid input!"
nchls = corrs.shape[0]
ts = corrs.shape[1]
start_t = time_unit[0]
tstep = time_unit[1]
end_t = start_t + ts * tstep
x = np.arange(start_t, end_t, tstep)
if chllabels == None:
chllabels = []
for i in range(nchls):
if i % 10 == 0 and i != 10:
newlabel = str(i+1) + "st"
elif i % 10 == 1 and i != 11:
newlabel = str(i+1) + "nd"
elif i % 10 == 2 and i != 12:
newlabel = str(i+1) + "rd"
else:
newlabel = str(i+1) + "th"
chllabels.append(newlabel)
if smooth == True:
t = ts * 50
x_soft = np.linspace(x.min(), x.max(), t)
y_soft = np.zeros([nchls, t])
samplerate = int(1 / tstep) * 50
b, a = signal.butter(4, 2*30/samplerate, 'lowpass')
for i in range(nchls):
if len(corrs.shape) == 3:
f = interp1d(x, corrs[i, :, 0], kind='cubic')
y_soft[i] = f(x_soft)
elif len(corrs.shape) == 2:
f = interp1d(x, corrs[i, :], kind='cubic')
y_soft[i] = f(x_soft)
y_soft[i] = signal.filtfilt(b, a, y_soft[i])
rlts = y_soft
if smooth == False:
if len(corrs.shape) == 3:
rlts = corrs[:, :, 0]
elif len(corrs.shape) == 2:
rlts = corrs
fig = plt.gcf()
size = fig.get_size_inches()
if figsize == None:
size_x = ts * tstep * (size[0] - 2) + 2
size_y = nchls * 0.2 * (size[1] - 1.5) + 1.5
else:
size_x = figsize[0]
size_y = figsize[1]
fig.set_size_inches(size_x, size_y)
delta = (size_y * 3) / (size_x * 4)
limmin = lim[0]
limmax = lim[1]
if cmap == None:
plt.imshow(rlts, extent=(start_t, end_t, 0, nchls*0.16*delta), clim=(limmin, limmax), origin='lower', cmap='inferno')
else:
plt.imshow(rlts, extent=(start_t, end_t, 0, nchls * 0.16*delta), clim=(limmin, limmax), origin='lower', cmap=cmap)
cb = plt.colorbar()
cb.ax.tick_params(labelsize=16)
font = {'size': 18}
cb.set_label("Similarity", fontdict=font)
xi = []
for i in range(nchls):
xi.append(0.16*delta*i + 0.08*delta)
yi = chllabels
plt.tick_params(labelsize=18)
plt.yticks(xi, yi, fontsize=18)
plt.ylabel("Channel", fontsize=20)
plt.xlabel("Time (s)", fontsize=20)
plt.show()
return 0
def plot_corrs_hotmap_withstats(corrs, chllabels=None, time_unit=[0, 0.1], lim=[0, 1], p=0.05, cbpt=False,
clusterp=0.05, stats_time=[0, 1], smooth=False, xlabel='Time (s)', ylabel='Channel',
clabel='Similarity', ticksize=18, figsize=None, cmap=None):
if len(np.shape(corrs)) < 3 or len(np.shape(corrs)) > 4:
return "Invalid input!"
nchls = corrs.shape[1]
nts = corrs.shape[2]
start_time = time_unit[0]
tstep = time_unit[1]
end_time = start_time + nts * tstep
delta1 = (stats_time[0] - start_time) / tstep - int((stats_time[0] - start_time) / tstep)
delta2 = (stats_time[1] - start_time) / tstep - int((stats_time[1] - start_time) / tstep)
if delta1 == 0:
stats_time1 = int((stats_time[0] - start_time) / tstep)
else:
stats_time1 = int((stats_time[0] - start_time) / tstep) + 1
if delta2 == 0:
stats_time2 = int((stats_time[1] - start_time) / tstep)
else:
stats_time2 = int((stats_time[1] - start_time) / tstep) + 1
if chllabels == None:
chllabels = []
for i in range(nchls):
if i % 10 == 0 and i != 10:
newlabel = str(i+1) + "st"
elif i % 10 == 1 and i != 11:
newlabel = str(i+1) + "nd"
elif i % 10 == 2 and i != 12:
newlabel = str(i+1) + "rd"
else:
newlabel = str(i+1) + "th"
chllabels.append(newlabel)
if len(corrs.shape) == 4:
rlts = corrs[:, :, :, 0]
elif len(corrs.shape) == 3:
rlts = corrs
if smooth == True:
for chl in range(nchls):
rlts[:, chl] = smooth_1d(rlts[:, chl])
fig = plt.gcf()
size = fig.get_size_inches()
if figsize == None:
size_x = nts * tstep * (size[0] - 2) + 2
size_y = nchls * 0.2 * (size[1] - 1.5) + 1.5
else:
size_x = figsize[0]
size_y = figsize[1]
fig.set_size_inches(size_x, size_y)
delta = (size_y * 3) / (size_x * 4)
avg = np.average(rlts, axis=0)
ps = np.zeros([nchls, nts])
if cbpt == True:
for chl in range(nchls):
ps_stats = clusterbased_permutation_1d_1samp_2sided(rlts[:, chl, stats_time1:stats_time2], 0, p_threshold=p,
clusterp_threshold=clusterp, iter=1000)
ps[chl, stats_time1:stats_time2] = ps_stats
else:
for chl in range(nchls):
for t in range(nts):
if t >= stats_time1 and t < stats_time2:
ps[chl, t] = ttest_1samp(rlts[:, chl, t], 0)[1]
if ps[chl, t] < p and avg[chl, t] > 0:
ps[chl, t] = 1
elif ps[chl, t] < p and avg[chl, t] < 0:
ps[chl, t] = -1
else:
ps[chl, t] = 0
newps = np.zeros([nchls + 2, nts + 2], dtype=np.float)
newps[1:nchls + 1, 1:nts + 1] = ps
x = np.linspace(start_time - 0.5 * tstep, end_time + 0.5 * tstep, nts + 2)
y = np.linspace(-0.08*delta, 0.16*delta * nchls + 0.08*delta, nchls + 2)
X, Y = np.meshgrid(x, y)
plt.contour(X, Y, newps, [0.5], linewidths=2, linestyles="dashed")
plt.contour(X, Y, newps, [-0.5], linewidths=2, linestyles="dashed")
limmin = lim[0]
limmax = lim[1]
if cmap == None:
plt.imshow(avg, extent=(start_time, end_time, 0, nchls*delta*0.16), clim=(limmin, limmax), origin='lower', cmap='inferno')
else:
plt.imshow(avg, extent=(start_time, end_time, 0, nchls*delta * 0.16), clim=(limmin, limmax), origin='lower', cmap=cmap)
cb = plt.colorbar()
cb.ax.tick_params(labelsize=ticksize-2)
font = {'size': ticksize}
cb.set_label(clabel, fontdict=font)
xi = []
for i in range(nchls):
xi.append(0.16*delta*i + 0.08*delta)
yi = chllabels
plt.tick_params(labelsize=ticksize)
plt.yticks(xi, yi, fontsize=ticksize)
plt.ylabel(ylabel, fontsize=20)
plt.xlabel(xlabel, fontsize=20)
plt.show()
return 0
def plot_nps_hotmap(similarities, chllabels=None, time_unit=[0, 0.1], lim=[0, 1], abs=False, smooth=False, figsize=None,
cmap=None):
if len(np.shape(similarities)) != 2:
return "Invalid input!"
if abs == True:
similarities = np.abs(similarities)
nchls = similarities.shape[0]
ts = similarities.shape[1]
start_t = time_unit[0]
tstep = time_unit[1]
end_t = start_t + ts * tstep
x = np.arange(start_t, end_t, tstep)
if chllabels == None:
chllabels = []
for i in range(nchls):
if i % 10 == 0 and i != 10:
newlabel = str(i + 1) + "st"
elif i % 10 == 1 and i != 11:
newlabel = str(i + 1) + "nd"
elif i % 10 == 2 and i != 12:
newlabel = str(i + 1) + "rd"
else:
newlabel = str(i + 1) + "th"
chllabels.append(newlabel)
if smooth == True:
t = ts * 50
x_soft = np.linspace(x.min(), x.max(), t)
y_soft = np.zeros([nchls, t])
samplerate = int(1 / tstep) * 50
b, a = signal.butter(4, 2*30/samplerate, 'lowpass')
for i in range(nchls):
f = interp1d(x, similarities[i, :], kind='cubic')
y_soft[i] = f(x_soft)
y_soft[i] = signal.filtfilt(b, a, y_soft[i])
rlts = y_soft
if smooth == False:
rlts = similarities
fig = plt.gcf()
size = fig.get_size_inches()
if figsize == None:
size_x = ts * tstep * (size[0] - 2) + 2
size_y = nchls * 0.2 * (size[1] - 1.5) + 1.5
else:
size_x = figsize[0]
size_y = figsize[1]
fig.set_size_inches(size_x, size_y)
delta = (size_y * 3) / (size_x * 4)
limmin = lim[0]
limmax = lim[1]
if cmap == None:
plt.imshow(rlts, extent=(start_t, end_t, 0, nchls*delta*0.16), clim=(limmin, limmax), origin='lower')
else:
plt.imshow(rlts, extent=(start_t, end_t, 0, nchls*delta*0.16), clim=(limmin, limmax), origin='lower', cmap=cmap)
cb = plt.colorbar()
cb.ax.tick_params(labelsize=16)
font = {'size': 18}
cb.set_label("Similarity", fontdict=font)
xi = []
for i in range(nchls):
xi.append(0.16*delta*i + 0.08*delta)
yi = chllabels
plt.tick_params(labelsize=18)
plt.yticks(xi, yi, fontsize=18)
plt.ylabel("Channel", fontsize=20)
plt.xlabel("Time (s)", fontsize=20)
plt.show()
return 0
def plot_t_hotmap_withstats(results, chllabels=None, time_unit=[0, 0.1], lim=[-7, 7], p=0.05, cbpt=False,
clusterp=0.05, stats_time=[0, 1], smooth=False, xlabel='Time (s)', ylabel='Channel',
clabel='t', ticksize=18, figsize=None, cmap=None):
if len(np.shape(results)) < 3 or len(np.shape(results)) > 4:
return "Invalid input!"
nchls = results.shape[1]
nts = results.shape[2]
start_time = time_unit[0]
tstep = time_unit[1]
end_time = start_time + nts * tstep
delta1 = (stats_time[0] - start_time) / tstep - int((stats_time[0] - start_time) / tstep)
delta2 = (stats_time[1] - start_time) / tstep - int((stats_time[1] - start_time) / tstep)
if delta1 == 0:
stats_time1 = int((stats_time[0] - start_time) / tstep)
else:
stats_time1 = int((stats_time[0] - start_time) / tstep) + 1
if delta2 == 0:
stats_time2 = int((stats_time[1] - start_time) / tstep)
else:
stats_time2 = int((stats_time[1] - start_time) / tstep) + 1
if chllabels == None:
chllabels = []
for i in range(nchls):
if i % 10 == 0 and i != 10:
newlabel = str(i + 1) + "st"
elif i % 10 == 1 and i != 11:
newlabel = str(i + 1) + "nd"
elif i % 10 == 2 and i != 12:
newlabel = str(i + 1) + "rd"
else:
newlabel = str(i + 1) + "th"
chllabels.append(newlabel)
if len(results.shape) == 4:
rlts = results[:, :, :, 0]
elif len(results.shape) == 3:
rlts = results
if smooth == True:
for chl in range(nchls):
rlts[:, chl] = smooth_1d(rlts[:, chl])
fig = plt.gcf()
size = fig.get_size_inches()
if figsize == None:
size_x = nts * tstep * (size[0] - 2) + 2
size_y = nchls * 0.2 * (size[1] - 1.5) + 1.5
else:
size_x = figsize[0]
size_y = figsize[1]
fig.set_size_inches(size_x, size_y)
delta = (size_y * 3) / (size_x * 4)
ts = ttest_1samp(rlts, 0, axis=0)[:, :, 0]
ps = np.zeros([nchls, nts])
if cbpt == True:
for chl in range(nchls):
ps_stats = clusterbased_permutation_1d_1samp_2sided(rlts[:, chl, stats_time1:stats_time2], 0, p_threshold=p,
clusterp_threshold=clusterp, iter=1000)
ps[chl, stats_time1:stats_time2] = ps_stats
else:
for chl in range(nchls):
for t in range(nts):
if t >= stats_time1 and t < stats_time2:
ps[chl, t] = ttest_1samp(rlts[:, chl, t], 0)[1]
if ps[chl, t] < p and ts[chl, t] > 0:
ps[chl, t] = 1
elif ps[chl, t] < p and ts[chl, t] < 0:
ps[chl, t] = -1
else:
ps[chl, t] = 0
newps = np.zeros([nchls + 2, nts + 2], dtype=np.float)
newps[1:nchls + 1, 1:nts + 1] = ps
x = np.linspace(start_time - 0.5 * tstep, end_time + 0.5 * tstep, nts + 2)
y = np.linspace(-0.08*delta, 0.16*delta * nchls + 0.08*delta, nchls + 2)
X, Y = np.meshgrid(x, y)
plt.contour(X, Y, newps, [0.5], linewidths=2, linestyles="dashed")
plt.contour(X, Y, newps, [-0.5], linewidths=2, linestyles="dashed")
limmin = lim[0]
limmax = lim[1]
if cmap == None:
plt.imshow(ts, extent=(start_time, end_time, 0, nchls * 0.16*delta), clim=(limmin, limmax), origin='lower',
cmap='bwr')
else:
plt.imshow(ts, extent=(start_time, end_time, 0, nchls * 0.16*delta), clim=(limmin, limmax), origin='lower',
cmap=cmap)
cb = plt.colorbar()
cb.ax.tick_params(labelsize=ticksize-2)
font = {'size': ticksize}
cb.set_label(clabel, fontdict=font)
xi = []
for i in range(nchls):
xi.append(0.16*delta * i + 0.08*delta)
yi = chllabels
plt.tick_params(labelsize=ticksize)
plt.yticks(xi, yi, fontsize=ticksize)
plt.ylabel(ylabel, fontsize=20)
plt.xlabel(xlabel, fontsize=20)
plt.show()
return 0
def plot_brainrsa_regions(img, threshold=None, background=get_bg_ch2(), type='r'):
imgarray = nib.load(img).get_fdata()
if (imgarray == np.nan).all() == True:
print("No Valid Results")
else:
if threshold != None:
imgarray = nib.load(img).get_fdata()
affine = get_affine(img)
imgarray = correct_by_threshold(imgarray, threshold)
img = nib.Nifti1Image(imgarray, affine)
if type == 'r':
plotting.plot_roi(roi_img=img, bg_img=background, threshold=0, vmin=0.1, vmax=1,
title="Similarity", resampling_interpolation="continuous")
if type == 't':
plotting.plot_roi(roi_img=img, bg_img=background, threshold=0, vmin=-7, vmax=7,
title="Similarity", resampling_interpolation="continuous")
plt.show()
return 0
def plot_brainrsa_montage(img, threshold=None, slice=[6, 6, 6], background=get_bg_ch2bet(), type='r'):
imgarray = nib.load(img).get_fdata()
if (imgarray == np.nan).all() == True:
print("No Valid Results")
else:
if threshold != None:
imgarray = nib.load(img).get_fdata()
affine = get_affine(img)
imgarray = correct_by_threshold(imgarray, threshold)
img = nib.Nifti1Image(imgarray, affine)
slice_x = slice[0]
slice_y = slice[1]
slice_z = slice[2]
if type == 'r':
vmax = 1
if type == 't':
vmax = 7
if slice_x != 0:
plotting.plot_stat_map(stat_map_img=img, bg_img=background, display_mode='x', cut_coords=slice_x,
title="Similarity -sagittal", draw_cross=True, vmax=vmax)
if slice_y != 0:
plotting.plot_stat_map(stat_map_img=img, bg_img=background, display_mode='y', cut_coords=slice_y,
title="Similarity -coronal", draw_cross=True, vmax=vmax)
if slice_z != 0:
plotting.plot_stat_map(stat_map_img=img, bg_img=background, display_mode='z', cut_coords=slice_z,
title="Similarity -axial", draw_cross=True, vmax=vmax)
plt.show()
return 0
def plot_brainrsa_glass(img, threshold=None, type='r'):
imgarray = nib.load(img).get_fdata()
if (imgarray == np.nan).all() == True:
print("No Valid Results")
else:
if threshold != None:
imgarray = nib.load(img).get_fdata()
affine = get_affine(img)
imgarray = correct_by_threshold(imgarray, threshold)
img = nib.Nifti1Image(imgarray, affine)
if type == 'r':
plotting.plot_glass_brain(img, colorbar=True, title="Similarity", black_bg=True, draw_cross=True, vmax=1)
if type == 't':
plotting.plot_glass_brain(img, colorbar=True, title="Similarity", black_bg=True, draw_cross=True, vmax=7)
plt.show()
return 0
def plot_brainrsa_surface(img, threshold=None, type='r'):
imgarray = nib.load(img).get_fdata()
if (imgarray == np.nan).all() == True:
print("No Valid Results")
else:
if threshold != None:
imgarray = nib.load(img).get_fdata()
affine = get_affine(img)
imgarray = correct_by_threshold(imgarray, threshold)
img = nib.Nifti1Image(imgarray, affine)
fsaverage = datasets.fetch_surf_fsaverage(mesh='fsaverage')
texture_left = surface.vol_to_surf(img, fsaverage.pial_left)
texture_right = surface.vol_to_surf(img, fsaverage.pial_right)
if type == 'r':
plotting.plot_surf_stat_map(fsaverage.pial_left, texture_left, hemi='left', threshold=0.1,
bg_map=fsaverage.sulc_right, colorbar=False, vmax=0.8, darkness=0.7)
plotting.plot_surf_stat_map(fsaverage.pial_right, texture_right, hemi='right', threshold=0.1,
bg_map=fsaverage.sulc_right, colorbar=True, vmax=0.8, darkness=0.7)
plotting.plot_surf_stat_map(fsaverage.pial_right, texture_left, hemi='left', threshold=0.1,
bg_map=fsaverage.sulc_right, colorbar=False, vmax=0.8, darkness=0.7)
plotting.plot_surf_stat_map(fsaverage.pial_left, texture_right, hemi='right', threshold=0.1,
bg_map=fsaverage.sulc_right, colorbar=True, vmax=0.8, darkness=0.7)
plt.show()
if type == 't':
plotting.plot_surf_stat_map(fsaverage.pial_left, texture_left, hemi='left', threshold=0.8,
bg_map=fsaverage.sulc_right, colorbar=False, darkness=0.7)
plotting.plot_surf_stat_map(fsaverage.pial_right, texture_right, hemi='right', threshold=0.8,
bg_map=fsaverage.sulc_right, colorbar=True, darkness=0.7)
plotting.plot_surf_stat_map(fsaverage.pial_right, texture_left, hemi='left', threshold=0.8,
bg_map=fsaverage.sulc_right, colorbar=False, darkness=0.7)
plotting.plot_surf_stat_map(fsaverage.pial_left, texture_right, hemi='right', threshold=0.8,
bg_map=fsaverage.sulc_right, colorbar=True, darkness=0.7)
plt.show()
return 0
def plot_brainrsa_rlts(img, threshold=None, slice=[6, 6, 6], background=None, type='r'):
imgarray = nib.load(img).get_fdata()
if (imgarray == np.nan).all() == True:
print("No Valid Results")
else:
if threshold != None:
imgarray = nib.load(img).get_fdata()
affine = get_affine(img)
imgarray = correct_by_threshold(imgarray, threshold)
img = nib.Nifti1Image(imgarray, affine)
if background == None:
plot_brainrsa_regions(img, threshold=threshold, type=type)
plot_brainrsa_montage(img, threshold=threshold, slice=slice, type=type)
plot_brainrsa_glass(img, threshold=threshold, type=type)
plot_brainrsa_surface(img, threshold=threshold, type=type)
else:
plot_brainrsa_regions(img, threshold=threshold, background=background, type=type)
plot_brainrsa_montage(img, threshold=threshold, slice=slice, background=background, type=type)
plot_brainrsa_surface(img, threshold=threshold, type=type)
return 0 | true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.