max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
omop2obo/__init__.py | cthoyt-forks-and-packages/OMOP2OBO | 29 | 6612951 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""" A Library for mapping OMOP concepts to OBO
OMOP2OBO is the first health system-wide, disease-agnostic mappings between standardized clinical terminologies in
the [Observational Medical Outcomes Partnership (OMOP) common data model and several Open Biomedical Ontologies (OBO)
foundry ontologies. These mappings were validated by domain experts and their coverage was examined in several health
systems.
There is one way to run PheKnowLator:
1. Command line via argparse (Main.py)
"""
__all__ = [
'ConceptAnnotator',
'OntologyDownloader',
'OntologyInfoExtractor',
'SimilarStringFinder'
]
from omop2obo.clinical_concept_annotator import ConceptAnnotator
from omop2obo.ontology_downloader import OntologyDownloader
from omop2obo.ontology_explorer import OntologyInfoExtractor
from omop2obo.string_similarity import SimilarStringFinder
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
""" A Library for mapping OMOP concepts to OBO
OMOP2OBO is the first health system-wide, disease-agnostic mappings between standardized clinical terminologies in
the [Observational Medical Outcomes Partnership (OMOP) common data model and several Open Biomedical Ontologies (OBO)
foundry ontologies. These mappings were validated by domain experts and their coverage was examined in several health
systems.
There is one way to run PheKnowLator:
1. Command line via argparse (Main.py)
"""
__all__ = [
'ConceptAnnotator',
'OntologyDownloader',
'OntologyInfoExtractor',
'SimilarStringFinder'
]
from omop2obo.clinical_concept_annotator import ConceptAnnotator
from omop2obo.ontology_downloader import OntologyDownloader
from omop2obo.ontology_explorer import OntologyInfoExtractor
from omop2obo.string_similarity import SimilarStringFinder
| en | 0.944253 | #!/usr/bin/env python # -*- coding: utf-8 -*- A Library for mapping OMOP concepts to OBO OMOP2OBO is the first health system-wide, disease-agnostic mappings between standardized clinical terminologies in the [Observational Medical Outcomes Partnership (OMOP) common data model and several Open Biomedical Ontologies (OBO) foundry ontologies. These mappings were validated by domain experts and their coverage was examined in several health systems. There is one way to run PheKnowLator: 1. Command line via argparse (Main.py) | 2.166108 | 2 |
Version0.2/Python/Preprocessing/mkelconcg.py | glwagner/Exasim | 37 | 6612952 | from numpy import *
from xiny import xiny
def mkelconcg(dgnodes):
# remove duplicate nodes in mesh.p1
ns = dgnodes.shape[0];
dim = dgnodes.shape[1];
nt = dgnodes.shape[2];
A = reshape(dgnodes.transpose(0,2,1),(ns*nt,dim),'F');
# B = unique(A,axis=0);
# b = xiny(A,B,1);
B, ia, b = unique(A,return_index=True,return_inverse=True,axis=0);
# CG mesh
cgnodes = B.transpose();
cgelcon = reshape(b, (ns, nt), order='F');
return cgnodes,cgelcon
| from numpy import *
from xiny import xiny
def mkelconcg(dgnodes):
# remove duplicate nodes in mesh.p1
ns = dgnodes.shape[0];
dim = dgnodes.shape[1];
nt = dgnodes.shape[2];
A = reshape(dgnodes.transpose(0,2,1),(ns*nt,dim),'F');
# B = unique(A,axis=0);
# b = xiny(A,B,1);
B, ia, b = unique(A,return_index=True,return_inverse=True,axis=0);
# CG mesh
cgnodes = B.transpose();
cgelcon = reshape(b, (ns, nt), order='F');
return cgnodes,cgelcon
| en | 0.229432 | # remove duplicate nodes in mesh.p1 # B = unique(A,axis=0); # b = xiny(A,B,1); # CG mesh | 2.573994 | 3 |
lesson-10/app.py | LindsayYoung/python-class | 18 | 6612953 | # import all of the classes and modules we use in this file from the flask package
from flask import Flask, render_template, request
# create an instance of the Flask class with name = __name__ or the name of this file
app = Flask(__name__)
# set debug to true so we see error dumps in the browser
app.debug = True
# decorators to establish the URL route to reach the method below
@app.route('/', methods=['GET'])
@app.route('/<string:name>/<string:color>/<int:age>', methods=['GET'])
def index(name=None, color='blue', age=25):
# create a list of dictionaries
data = [{'name':'olivia','color':'red', 'age':26},
{'name':'tom','color':'green', 'age':34}]
# add values from URL if present
if name is not None:
data.append({'name':name,'color': color, 'age':age})
# renders index.html from templates directory, passing in the names variable
return render_template('index.html', data=data)
# decorators to establish the URL route to reach the method below, accepts GET and POST
@app.route('/post', methods=['GET','POST'])
def post():
# checks if request method is POST
if request.method == 'POST':
# POST data is contained in request.form['variable_name']
full_name = request.form['firstname'] + " " + request.form['lastname']
return render_template("post.html", name=full_name)
else:
# otherwise another request method, probably a GET
return render_template("post.html")
# launch the app
app.run() | # import all of the classes and modules we use in this file from the flask package
from flask import Flask, render_template, request
# create an instance of the Flask class with name = __name__ or the name of this file
app = Flask(__name__)
# set debug to true so we see error dumps in the browser
app.debug = True
# decorators to establish the URL route to reach the method below
@app.route('/', methods=['GET'])
@app.route('/<string:name>/<string:color>/<int:age>', methods=['GET'])
def index(name=None, color='blue', age=25):
# create a list of dictionaries
data = [{'name':'olivia','color':'red', 'age':26},
{'name':'tom','color':'green', 'age':34}]
# add values from URL if present
if name is not None:
data.append({'name':name,'color': color, 'age':age})
# renders index.html from templates directory, passing in the names variable
return render_template('index.html', data=data)
# decorators to establish the URL route to reach the method below, accepts GET and POST
@app.route('/post', methods=['GET','POST'])
def post():
# checks if request method is POST
if request.method == 'POST':
# POST data is contained in request.form['variable_name']
full_name = request.form['firstname'] + " " + request.form['lastname']
return render_template("post.html", name=full_name)
else:
# otherwise another request method, probably a GET
return render_template("post.html")
# launch the app
app.run() | en | 0.76324 | # import all of the classes and modules we use in this file from the flask package # create an instance of the Flask class with name = __name__ or the name of this file # set debug to true so we see error dumps in the browser # decorators to establish the URL route to reach the method below # create a list of dictionaries # add values from URL if present # renders index.html from templates directory, passing in the names variable # decorators to establish the URL route to reach the method below, accepts GET and POST # checks if request method is POST # POST data is contained in request.form['variable_name'] # otherwise another request method, probably a GET # launch the app | 3.653499 | 4 |
AnimalProfile/Root.py | AtMostafa/AnimalProfile | 0 | 6612954 | <reponame>AtMostafa/AnimalProfile<gh_stars>0
import sys
from pathlib import Path
import json
import logging
from .Profile import Profile
from .File import File
class Root:
"""
This class initializes root, and variables if found,
otherwise lets the user to set up a new profile system.
"""
SETTING_FILE = 'AnimalProfile.setup'
def __init__(self, *, root: str = None,):
self.root = root
if self.root is None:
if sys.platform.startswith('linux'):
self.root = Path('/data')
elif sys.platform.startswith('win'):
self.root = Path('c:/data')
else:
self.root = Path('/data')
elif isinstance(self.root, str):
self.root = Path(self.root)
else:
assert ("root' must be an string, or None")
self._read_root()
def __str__(self):
return f'AnimalProfile profile at: {self.settingPath}'
def __repr__(self):
return self.__str__()
def _read_root(self):
self.settingPath = self.root / self.SETTING_FILE
if not self.settingPath.is_file():
self._set_up()
with open(self.settingPath, 'r') as f:
setting = json.load(f)
self.__dict__.update(setting)
self.body.insert(0, 'Sessions')
self.body.insert(1, 'Tag')
def _set_up(self):
"""
No tag system was found in the 'root' so user is asked
to provide the necessary information to set everything up
and write to the 'SETTING_FILE'.
This is done once and at the beginning
"""
prefix = input("""What is the descriptive prefix of the data structure?\n
E.g., Rat in Rat123, Rat124, etc.\n
Follow python variable naming rules in all the parameters: """)
prefix = prefix.split(maxsplit=2)[0]
print('Header: parameters defined once for each animal, e.g., name (added automatically).')
Nheader = int(input('\nNumber of header parameters (int):'))
header = []
for i in range(Nheader):
h = input(f'name of header param #{i+1}:')
header.append(h.split(maxsplit=2)[0])
print("""Body: parameters defined per session for each animal.
"Session":session name, and "Tag":experiment tag are added automatically.""")
Nbody = int(input('\nNumber of body parameters (int):'))
body = []
for i in range(Nbody):
b = input(f'name of body param #{i+1}:')
body.append(b.split(maxsplit=2)[0])
out = {'prefix': prefix, 'header': header, 'body': body}
with open(self.settingPath, 'w') as f:
json.dump(out, f, indent=4, sort_keys=True)
logging.info(f'Written: {self.settingPath}')
def get_profile(self):
"""
return a profile object for this root
"""
return Profile(root=self)
def get_all_animals(self):
animalPaths = sorted(self.root.glob(f'{self.prefix}???/'))
animalList = [animal.name for animal in animalPaths]
return sorted(animalList)
def update(self, animal: str):
"""
updates the profile file of the animal using File.write()
"""
tagFile = File(self, animal)
isWritten = tagFile.write()
if isWritten:
logging.info(f'Profile file updated for {animal}')
else:
logging.info(f'{animal} profile did not update.')
return isWritten
if __name__ == "__main__":
a = Root(root='/data')
print(a)
| import sys
from pathlib import Path
import json
import logging
from .Profile import Profile
from .File import File
class Root:
"""
This class initializes root, and variables if found,
otherwise lets the user to set up a new profile system.
"""
SETTING_FILE = 'AnimalProfile.setup'
def __init__(self, *, root: str = None,):
self.root = root
if self.root is None:
if sys.platform.startswith('linux'):
self.root = Path('/data')
elif sys.platform.startswith('win'):
self.root = Path('c:/data')
else:
self.root = Path('/data')
elif isinstance(self.root, str):
self.root = Path(self.root)
else:
assert ("root' must be an string, or None")
self._read_root()
def __str__(self):
return f'AnimalProfile profile at: {self.settingPath}'
def __repr__(self):
return self.__str__()
def _read_root(self):
self.settingPath = self.root / self.SETTING_FILE
if not self.settingPath.is_file():
self._set_up()
with open(self.settingPath, 'r') as f:
setting = json.load(f)
self.__dict__.update(setting)
self.body.insert(0, 'Sessions')
self.body.insert(1, 'Tag')
def _set_up(self):
"""
No tag system was found in the 'root' so user is asked
to provide the necessary information to set everything up
and write to the 'SETTING_FILE'.
This is done once and at the beginning
"""
prefix = input("""What is the descriptive prefix of the data structure?\n
E.g., Rat in Rat123, Rat124, etc.\n
Follow python variable naming rules in all the parameters: """)
prefix = prefix.split(maxsplit=2)[0]
print('Header: parameters defined once for each animal, e.g., name (added automatically).')
Nheader = int(input('\nNumber of header parameters (int):'))
header = []
for i in range(Nheader):
h = input(f'name of header param #{i+1}:')
header.append(h.split(maxsplit=2)[0])
print("""Body: parameters defined per session for each animal.
"Session":session name, and "Tag":experiment tag are added automatically.""")
Nbody = int(input('\nNumber of body parameters (int):'))
body = []
for i in range(Nbody):
b = input(f'name of body param #{i+1}:')
body.append(b.split(maxsplit=2)[0])
out = {'prefix': prefix, 'header': header, 'body': body}
with open(self.settingPath, 'w') as f:
json.dump(out, f, indent=4, sort_keys=True)
logging.info(f'Written: {self.settingPath}')
def get_profile(self):
"""
return a profile object for this root
"""
return Profile(root=self)
def get_all_animals(self):
animalPaths = sorted(self.root.glob(f'{self.prefix}???/'))
animalList = [animal.name for animal in animalPaths]
return sorted(animalList)
def update(self, animal: str):
"""
updates the profile file of the animal using File.write()
"""
tagFile = File(self, animal)
isWritten = tagFile.write()
if isWritten:
logging.info(f'Profile file updated for {animal}')
else:
logging.info(f'{animal} profile did not update.')
return isWritten
if __name__ == "__main__":
a = Root(root='/data')
print(a) | en | 0.800863 | This class initializes root, and variables if found, otherwise lets the user to set up a new profile system. No tag system was found in the 'root' so user is asked to provide the necessary information to set everything up and write to the 'SETTING_FILE'. This is done once and at the beginning What is the descriptive prefix of the data structure?\n E.g., Rat in Rat123, Rat124, etc.\n Follow python variable naming rules in all the parameters: #{i+1}:') Body: parameters defined per session for each animal. "Session":session name, and "Tag":experiment tag are added automatically. #{i+1}:') return a profile object for this root updates the profile file of the animal using File.write() | 3.517509 | 4 |
contrib_lib/plot_folium.py | DHI/ifm_contrib | 9 | 6612955 | from ifm import Enum
class PlotFolium:
"""
Functions for exporting plotted data like isocontours as GeoDataFrame. Results are similar to the
output of the View Components Panel of the FEFLOW GUI.
"""
def __init__(self, doc):
self.doc = doc
| from ifm import Enum
class PlotFolium:
"""
Functions for exporting plotted data like isocontours as GeoDataFrame. Results are similar to the
output of the View Components Panel of the FEFLOW GUI.
"""
def __init__(self, doc):
self.doc = doc
| en | 0.884469 | Functions for exporting plotted data like isocontours as GeoDataFrame. Results are similar to the output of the View Components Panel of the FEFLOW GUI. | 2.127329 | 2 |
PSET1/Exercise/Lecture 2/scipy_taylor.py | MonitSharma/Computational-Methods-in-Physics | 0 | 6612956 | <gh_stars>0
from scipy.integrate import odeint
import numpy as np
from matplotlib import pyplot as plt
def f(t,x):
return np.exp(np.power(-t,2))
x0 = 0
t = np.arange(0,2,0.01)
xs = odeint(f,x0,t)
plt.plot(t,xs,'-')
plt.plot(t,xs,'ro')
plt.xlabel('x values')
plt.ylabel('y values')
plt.title('Differential Equation')
plt.savefig('scipy_int.png') | from scipy.integrate import odeint
import numpy as np
from matplotlib import pyplot as plt
def f(t,x):
return np.exp(np.power(-t,2))
x0 = 0
t = np.arange(0,2,0.01)
xs = odeint(f,x0,t)
plt.plot(t,xs,'-')
plt.plot(t,xs,'ro')
plt.xlabel('x values')
plt.ylabel('y values')
plt.title('Differential Equation')
plt.savefig('scipy_int.png') | none | 1 | 3.214388 | 3 | |
plot_out_cmd.py | ramenspazz/Spirit_sandbox | 1 | 6612957 | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.cm as cmx
from matplotlib.colors import LinearSegmentedColormap
def get_coords(count, n_cols):
return([int(count / int(n_cols)),int(count % int(n_cols))])
def Plot_Lattice(f_name, n_cols, n_rows):
###===============================================###
# Itterativly add temporary vectors pulled from #
# output file to a color coded 2D vector field plot #
# where color represents the vectors' z-magnitude. #
# color plot is O(n^2) #
###===============================================###
with open(f_name, 'r') as fp:
#start file parsing and plotting
X = [n_cols]
Y = [n_rows]
#initialize data containers
U = np.zeros((n_rows, n_cols))
V = np.zeros((n_rows, n_cols))
M = np.zeros((n_rows, n_cols))
count = 0 # initilize counter
for ln in fp:
if '#' in ln:
continue
elif ln[0] == '\n':
continue
elif count > n_cols*n_rows:
print('Too many entries!\n')
return(-1)
else:
temp = [float(i) for i in ln.split()]
coords = get_coords(count, n_cols)
U[coords[0]][coords[1]] = temp[0]
V[coords[0]][coords[1]] = temp[1]
M[coords[0]][coords[1]] = (temp[2]+1)/2
count += 1
continue
#mycmap = LinearSegmentedColormap.from_list('mycmap', ['red', 'blue', 'green'])
q = plt.quiver(U, V, M, cmap='plasma', units='xy', angles='xy', pivot='middle', width=0.25, scale=1.5)
plt.show()
return
plot_name = input('Enter file name to plot: ')
#xs = int(raw_input('x = '))
#ys = int(raw_input('y = '))
Plot_Lattice(plot_name, 10,10)
| import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.cm as cmx
from matplotlib.colors import LinearSegmentedColormap
def get_coords(count, n_cols):
return([int(count / int(n_cols)),int(count % int(n_cols))])
def Plot_Lattice(f_name, n_cols, n_rows):
###===============================================###
# Itterativly add temporary vectors pulled from #
# output file to a color coded 2D vector field plot #
# where color represents the vectors' z-magnitude. #
# color plot is O(n^2) #
###===============================================###
with open(f_name, 'r') as fp:
#start file parsing and plotting
X = [n_cols]
Y = [n_rows]
#initialize data containers
U = np.zeros((n_rows, n_cols))
V = np.zeros((n_rows, n_cols))
M = np.zeros((n_rows, n_cols))
count = 0 # initilize counter
for ln in fp:
if '#' in ln:
continue
elif ln[0] == '\n':
continue
elif count > n_cols*n_rows:
print('Too many entries!\n')
return(-1)
else:
temp = [float(i) for i in ln.split()]
coords = get_coords(count, n_cols)
U[coords[0]][coords[1]] = temp[0]
V[coords[0]][coords[1]] = temp[1]
M[coords[0]][coords[1]] = (temp[2]+1)/2
count += 1
continue
#mycmap = LinearSegmentedColormap.from_list('mycmap', ['red', 'blue', 'green'])
q = plt.quiver(U, V, M, cmap='plasma', units='xy', angles='xy', pivot='middle', width=0.25, scale=1.5)
plt.show()
return
plot_name = input('Enter file name to plot: ')
#xs = int(raw_input('x = '))
#ys = int(raw_input('y = '))
Plot_Lattice(plot_name, 10,10)
| en | 0.512338 | ###===============================================### # Itterativly add temporary vectors pulled from # # output file to a color coded 2D vector field plot # # where color represents the vectors' z-magnitude. # # color plot is O(n^2) # ###===============================================### #start file parsing and plotting #initialize data containers # initilize counter #mycmap = LinearSegmentedColormap.from_list('mycmap', ['red', 'blue', 'green']) #xs = int(raw_input('x = ')) #ys = int(raw_input('y = ')) | 3.254933 | 3 |
tests/python_backend/test_graph_compilation.py | notoraptor/myia | 222 | 6612958 | from dataclasses import dataclass
from myia.compile.backends.python.python import compile_graph
from myia.ir import manage
from myia.ir.utils import print_graph
from myia.operations import (
grad,
random_initialize,
random_uint32,
value_and_grad,
)
from myia.parser import parse
def _assert_match(expected, given, rel=1e-03):
"""Assert two values match.
Use to check gradient output (computed using finite difference).
Inspired to gradient output checking in myia.debug.finite_diff.GradTester.
"""
threshold = max(abs(rel * expected), abs(rel * given))
match = bool(abs(expected - given) <= threshold)
assert match, (expected, given, rel)
def parse_and_compile(fn):
g = parse(fn)
manage(g)
print(print_graph(g))
cf = compile_graph(g, debug=True)
return cf
def test_grad_first_order():
def square(x):
return x * x
@parse_and_compile
def f(x):
return grad(square)(x)
_assert_match(20, f(10))
def test_grad_two_args():
def func(x, y):
return 2 * x * y + 2 * x * x - 3 * y * y
@parse_and_compile
def f(x, y):
return grad(func, "y")(x, y)
@parse_and_compile
def g(x, y):
return grad(func, "x")(x, y)
@parse_and_compile
def h(x, y):
return grad(func)(x, y)
_assert_match(-10, f(1, 2))
_assert_match(8, g(1, 2))
dx, dy = h(2, 3)
_assert_match(2 * 3 + 4 * 2, dx)
_assert_match(2 * 2 - 6 * 3, dy)
def test_value_and_grad_first_order():
def square(x):
return x * x
@parse_and_compile
def f(x):
return value_and_grad(square)(x)
v, g = f(10)
assert v == 100, v
_assert_match(20, g)
def test_value_grad_two_args():
def func(x, y):
return 2 * x * y + 2 * x * x - 3 * y * y
@parse_and_compile
def f(x, y):
return value_and_grad(func, "y")(x, y)
@parse_and_compile
def g(x, y):
return value_and_grad(func, "x")(x, y)
@parse_and_compile
def h(x, y):
return value_and_grad(func)(x, y)
func_1_2 = -6
assert func_1_2 == func(1, 2)
value_f, grad_f = f(1, 2)
assert value_f == func_1_2
_assert_match(-10, grad_f)
value_g, grad_g = g(1, 2)
assert value_g == func_1_2
_assert_match(8, grad_g)
v, (dx, dy) = h(2, 3)
assert v == 12 + 8 - 27
_assert_match(2 * 3 + 4 * 2, dx)
_assert_match(2 * 2 - 6 * 3, dy)
def test_rng():
@parse_and_compile
def f():
rstate = random_initialize(12345678)
r0, v0 = random_uint32(rstate, (2, 2))
return v0
print(f())
def test_while():
@parse_and_compile
def f(n):
ret = 0
i = 0
while i < n + 1:
ret = ret + i
i = i + 1
return ret
assert f(0) == 0
assert f(1) == 1
assert f(2) == 3
assert f(3) == 6
def test_iter_object():
@dataclass
class HalfIterator:
value: int
def __myia_iter__(self):
return self
def __myia_hasnext__(self):
return self.value > 0
def __myia_next__(self):
return self.value, HalfIterator(self.value // 2)
@parse_and_compile
def f(v):
ret = 0
for x in HalfIterator(v):
ret = ret + x
return ret
assert f(10) == 18, f(10)
def test_for_range():
@parse_and_compile
def f(n):
ret = 0
for i in range(n + 1):
ret = ret + i
return ret
assert f(0) == 0
assert f(1) == 1
assert f(2) == 3
assert f(3) == 6
def test_recursion():
@parse_and_compile
def factorial(n):
return 1 if n < 2 else n * factorial(n - 1)
for x, y in (
(0, 1),
(1, 1),
(2, 2),
(3, 6),
(4, 24),
(5, 120),
):
assert factorial(x) == y, factorial(x)
def test_function_call():
def _pgcd(a, b):
return a if not b else _pgcd(b, a % b)
@parse_and_compile
def pgcd(a, b):
return _pgcd(b, a) if a < b else _pgcd(a, b)
assert pgcd(12, 32) == 4, pgcd(12, 32)
assert pgcd(625, 250) == 125, pgcd(625, 250)
def test_inner_closure():
@parse_and_compile
def pgcd(a, b):
def _pgcd(a, b):
return a if not b else _pgcd(b, a % b)
return _pgcd(b, a) if a < b else _pgcd(a, b)
assert pgcd(12, 32) == 4, pgcd(12, 32)
assert pgcd(625, 250) == 125, pgcd(625, 250)
def test_if():
@parse_and_compile
def f(x):
if x < 0:
return x * x
else:
return x * x * x
assert f(-2) == 4, f(-2)
assert f(0) == 0, f(0)
assert f(2) == 8, f(2)
def test_simple():
@parse_and_compile
def f(x):
return 2 * x + 1
assert f(0) == 1, f(0)
assert f(-2) == -3, f(0)
def test_two_external_functions_with_same_name():
def f1(x):
return 2 * x
def f2(x):
return 3 * x
f1.__name__ = "f"
f2.__name__ = "f"
@parse_and_compile
def g(x):
return f1(x) + f2(x)
assert g(5) == 25, g(5)
def test_cast():
@parse_and_compile
def f(x):
return float(x) + 1.5
assert f(2) == 3.5, f(2)
def test_grad_with_dout():
"""Test grad with dout."""
def f(x, y):
a = x ** 3
b = y ** 4
return a * b
@parse_and_compile
def g(x, y):
return grad(f, "x")(x, y, dout=2)
@parse_and_compile
def h(x, y):
return grad(f, "y")(x, y, dout=-3)
x, y = 2.0, 3.0
dx = 3 * (x ** 2) * (y ** 4)
dy = 4 * (y ** 3) * (x ** 3)
_assert_match(dx * 2, g(x, y))
_assert_match(dy * -3, h(x, y))
def test_for_loop_on_tuple():
@parse_and_compile
def f(t):
ret = 0
for x in t:
ret = ret + x
return ret
t = (1, 2, 3)
assert f(t) == 6
def test_operators():
@parse_and_compile
def f(a, b):
return (
a + b,
a - b,
a * b,
a % b,
a ** b,
a == b,
a != b,
a < b,
a > b,
a <= b,
a >= b,
+a,
-a,
not a,
a & b,
a | b,
b << a,
b >> a,
~a,
a and b,
a or b,
)
a = 2
b = 3
results = (
5,
-1,
6,
2,
8,
False,
True,
True,
False,
True,
False,
2,
-2,
False,
2,
3,
12,
0,
~2,
3,
True,
)
assert f(a, b) == results, (f(a, b), results)
| from dataclasses import dataclass
from myia.compile.backends.python.python import compile_graph
from myia.ir import manage
from myia.ir.utils import print_graph
from myia.operations import (
grad,
random_initialize,
random_uint32,
value_and_grad,
)
from myia.parser import parse
def _assert_match(expected, given, rel=1e-03):
"""Assert two values match.
Use to check gradient output (computed using finite difference).
Inspired to gradient output checking in myia.debug.finite_diff.GradTester.
"""
threshold = max(abs(rel * expected), abs(rel * given))
match = bool(abs(expected - given) <= threshold)
assert match, (expected, given, rel)
def parse_and_compile(fn):
g = parse(fn)
manage(g)
print(print_graph(g))
cf = compile_graph(g, debug=True)
return cf
def test_grad_first_order():
def square(x):
return x * x
@parse_and_compile
def f(x):
return grad(square)(x)
_assert_match(20, f(10))
def test_grad_two_args():
def func(x, y):
return 2 * x * y + 2 * x * x - 3 * y * y
@parse_and_compile
def f(x, y):
return grad(func, "y")(x, y)
@parse_and_compile
def g(x, y):
return grad(func, "x")(x, y)
@parse_and_compile
def h(x, y):
return grad(func)(x, y)
_assert_match(-10, f(1, 2))
_assert_match(8, g(1, 2))
dx, dy = h(2, 3)
_assert_match(2 * 3 + 4 * 2, dx)
_assert_match(2 * 2 - 6 * 3, dy)
def test_value_and_grad_first_order():
def square(x):
return x * x
@parse_and_compile
def f(x):
return value_and_grad(square)(x)
v, g = f(10)
assert v == 100, v
_assert_match(20, g)
def test_value_grad_two_args():
def func(x, y):
return 2 * x * y + 2 * x * x - 3 * y * y
@parse_and_compile
def f(x, y):
return value_and_grad(func, "y")(x, y)
@parse_and_compile
def g(x, y):
return value_and_grad(func, "x")(x, y)
@parse_and_compile
def h(x, y):
return value_and_grad(func)(x, y)
func_1_2 = -6
assert func_1_2 == func(1, 2)
value_f, grad_f = f(1, 2)
assert value_f == func_1_2
_assert_match(-10, grad_f)
value_g, grad_g = g(1, 2)
assert value_g == func_1_2
_assert_match(8, grad_g)
v, (dx, dy) = h(2, 3)
assert v == 12 + 8 - 27
_assert_match(2 * 3 + 4 * 2, dx)
_assert_match(2 * 2 - 6 * 3, dy)
def test_rng():
@parse_and_compile
def f():
rstate = random_initialize(12345678)
r0, v0 = random_uint32(rstate, (2, 2))
return v0
print(f())
def test_while():
@parse_and_compile
def f(n):
ret = 0
i = 0
while i < n + 1:
ret = ret + i
i = i + 1
return ret
assert f(0) == 0
assert f(1) == 1
assert f(2) == 3
assert f(3) == 6
def test_iter_object():
@dataclass
class HalfIterator:
value: int
def __myia_iter__(self):
return self
def __myia_hasnext__(self):
return self.value > 0
def __myia_next__(self):
return self.value, HalfIterator(self.value // 2)
@parse_and_compile
def f(v):
ret = 0
for x in HalfIterator(v):
ret = ret + x
return ret
assert f(10) == 18, f(10)
def test_for_range():
@parse_and_compile
def f(n):
ret = 0
for i in range(n + 1):
ret = ret + i
return ret
assert f(0) == 0
assert f(1) == 1
assert f(2) == 3
assert f(3) == 6
def test_recursion():
@parse_and_compile
def factorial(n):
return 1 if n < 2 else n * factorial(n - 1)
for x, y in (
(0, 1),
(1, 1),
(2, 2),
(3, 6),
(4, 24),
(5, 120),
):
assert factorial(x) == y, factorial(x)
def test_function_call():
def _pgcd(a, b):
return a if not b else _pgcd(b, a % b)
@parse_and_compile
def pgcd(a, b):
return _pgcd(b, a) if a < b else _pgcd(a, b)
assert pgcd(12, 32) == 4, pgcd(12, 32)
assert pgcd(625, 250) == 125, pgcd(625, 250)
def test_inner_closure():
@parse_and_compile
def pgcd(a, b):
def _pgcd(a, b):
return a if not b else _pgcd(b, a % b)
return _pgcd(b, a) if a < b else _pgcd(a, b)
assert pgcd(12, 32) == 4, pgcd(12, 32)
assert pgcd(625, 250) == 125, pgcd(625, 250)
def test_if():
@parse_and_compile
def f(x):
if x < 0:
return x * x
else:
return x * x * x
assert f(-2) == 4, f(-2)
assert f(0) == 0, f(0)
assert f(2) == 8, f(2)
def test_simple():
@parse_and_compile
def f(x):
return 2 * x + 1
assert f(0) == 1, f(0)
assert f(-2) == -3, f(0)
def test_two_external_functions_with_same_name():
def f1(x):
return 2 * x
def f2(x):
return 3 * x
f1.__name__ = "f"
f2.__name__ = "f"
@parse_and_compile
def g(x):
return f1(x) + f2(x)
assert g(5) == 25, g(5)
def test_cast():
@parse_and_compile
def f(x):
return float(x) + 1.5
assert f(2) == 3.5, f(2)
def test_grad_with_dout():
"""Test grad with dout."""
def f(x, y):
a = x ** 3
b = y ** 4
return a * b
@parse_and_compile
def g(x, y):
return grad(f, "x")(x, y, dout=2)
@parse_and_compile
def h(x, y):
return grad(f, "y")(x, y, dout=-3)
x, y = 2.0, 3.0
dx = 3 * (x ** 2) * (y ** 4)
dy = 4 * (y ** 3) * (x ** 3)
_assert_match(dx * 2, g(x, y))
_assert_match(dy * -3, h(x, y))
def test_for_loop_on_tuple():
@parse_and_compile
def f(t):
ret = 0
for x in t:
ret = ret + x
return ret
t = (1, 2, 3)
assert f(t) == 6
def test_operators():
@parse_and_compile
def f(a, b):
return (
a + b,
a - b,
a * b,
a % b,
a ** b,
a == b,
a != b,
a < b,
a > b,
a <= b,
a >= b,
+a,
-a,
not a,
a & b,
a | b,
b << a,
b >> a,
~a,
a and b,
a or b,
)
a = 2
b = 3
results = (
5,
-1,
6,
2,
8,
False,
True,
True,
False,
True,
False,
2,
-2,
False,
2,
3,
12,
0,
~2,
3,
True,
)
assert f(a, b) == results, (f(a, b), results)
| en | 0.586559 | Assert two values match. Use to check gradient output (computed using finite difference). Inspired to gradient output checking in myia.debug.finite_diff.GradTester. Test grad with dout. | 3.045402 | 3 |
src/app/web.py | hannah-wright/land_acknowledgement | 25 | 6612959 | <filename>src/app/web.py
import json
import falcon
from twilio.twiml.messaging_response import MessagingResponse
from .db import GeoData
from .geocode import geolocate, LocationNotFound
from .responses import (
TooBigResponse,
PostalCodeResponse,
PlaceResponse,
AddressResponse,
PoiResponse,
GenericResponse
)
class Make_TwilML:
def process_response(self, req, resp, resource, req_succeeded):
'''Post-request middleware to turn the response into twilio's XML format'''
twil_resp = MessagingResponse()
twil_resp.message(resp.body)
resp.body = str(twil_resp)
def check_empty_input(req, resp, resource, params):
'''Hook to intercept empty messages or messages with predictable greetings'''
greetings = {'hello', 'hi', 'help'}
query = req.get_param('Body') and req.get_param('Body').strip()
if not query or query.lower() in greetings:
body = "Hello. Please tell me the town and state you are in. For example, 'Anchorage, AK'"
raise falcon.HTTPStatus(falcon.HTTP_200, body=body)
elif len(query) < 3:
body = "Hmm, that seems a little vague. Try sending a city and state such as 'Anchorage, AK'"
raise falcon.HTTPStatus(falcon.HTTP_200, body=body)
class LandResource(object):
def __init__(self):
self.geodata = None
self.type_dispatch = {
'country': TooBigResponse,
'region': TooBigResponse,
'postcode': PostalCodeResponse,
'district': TooBigResponse,
'place': PlaceResponse,
'locality': PlaceResponse,
'neighborhood': PlaceResponse, # these might be to vauge to handle
'address': AddressResponse,
'poi': PoiResponse
}
@falcon.before(check_empty_input)
def on_post(self, req, resp):
print(json.dumps(req.params))
if self.geodata is None:
self.geodata = GeoData()
query = req.get_param('Body').strip()
try:
location = geolocate(query)
except LocationNotFound:
raise falcon.HTTPStatus(falcon.HTTP_200, body=f"I could not find the location: {query}")
except Exception as e:
print(e)
raise falcon.HTTPStatus(falcon.HTTP_200, body="Sorry, I having some technical trouble right now.")
place_type = location['place_type'][0]
response_class = self.type_dispatch.get(place_type, GenericResponse)
response = response_class(query, location, self.geodata)
resp.body = str(response)
def create_app():
app = falcon.API(media_type=falcon.MEDIA_XML, middleware=[Make_TwilML()])
app.req_options.auto_parse_form_urlencoded = True
resource = LandResource()
app.add_route('/', resource)
return app
app = create_app()
| <filename>src/app/web.py
import json
import falcon
from twilio.twiml.messaging_response import MessagingResponse
from .db import GeoData
from .geocode import geolocate, LocationNotFound
from .responses import (
TooBigResponse,
PostalCodeResponse,
PlaceResponse,
AddressResponse,
PoiResponse,
GenericResponse
)
class Make_TwilML:
def process_response(self, req, resp, resource, req_succeeded):
'''Post-request middleware to turn the response into twilio's XML format'''
twil_resp = MessagingResponse()
twil_resp.message(resp.body)
resp.body = str(twil_resp)
def check_empty_input(req, resp, resource, params):
'''Hook to intercept empty messages or messages with predictable greetings'''
greetings = {'hello', 'hi', 'help'}
query = req.get_param('Body') and req.get_param('Body').strip()
if not query or query.lower() in greetings:
body = "Hello. Please tell me the town and state you are in. For example, 'Anchorage, AK'"
raise falcon.HTTPStatus(falcon.HTTP_200, body=body)
elif len(query) < 3:
body = "Hmm, that seems a little vague. Try sending a city and state such as 'Anchorage, AK'"
raise falcon.HTTPStatus(falcon.HTTP_200, body=body)
class LandResource(object):
def __init__(self):
self.geodata = None
self.type_dispatch = {
'country': TooBigResponse,
'region': TooBigResponse,
'postcode': PostalCodeResponse,
'district': TooBigResponse,
'place': PlaceResponse,
'locality': PlaceResponse,
'neighborhood': PlaceResponse, # these might be to vauge to handle
'address': AddressResponse,
'poi': PoiResponse
}
@falcon.before(check_empty_input)
def on_post(self, req, resp):
print(json.dumps(req.params))
if self.geodata is None:
self.geodata = GeoData()
query = req.get_param('Body').strip()
try:
location = geolocate(query)
except LocationNotFound:
raise falcon.HTTPStatus(falcon.HTTP_200, body=f"I could not find the location: {query}")
except Exception as e:
print(e)
raise falcon.HTTPStatus(falcon.HTTP_200, body="Sorry, I having some technical trouble right now.")
place_type = location['place_type'][0]
response_class = self.type_dispatch.get(place_type, GenericResponse)
response = response_class(query, location, self.geodata)
resp.body = str(response)
def create_app():
app = falcon.API(media_type=falcon.MEDIA_XML, middleware=[Make_TwilML()])
app.req_options.auto_parse_form_urlencoded = True
resource = LandResource()
app.add_route('/', resource)
return app
app = create_app()
| en | 0.805812 | Post-request middleware to turn the response into twilio's XML format Hook to intercept empty messages or messages with predictable greetings # these might be to vauge to handle | 2.932276 | 3 |
cloudmesh_web/modules/new_menu.py | JulienPalard/cloudmesh | 0 | 6612960 | super_duper_sidebar_pages = [
["Cloudmesh", None, None, ['all'],
[
["Home", "/", None],
["Status", "/status", None],
["Profile", "/profile/", None],
],
],
["Clouds", "cm/refresh", "365_restart", ['all'],
[
["Refresh", "/cm/refresh", None],
["VMs", "/mesh/servers", None],
["Images", "/mesh/images", None],
["Flavors", "/mesh/flavors/", None],
],
],
["HPC Queues", None, None, ['all'],
[
["Jobs", "/mesh/qstat", None],
["Queues Info", "/mesh/qinfo", None],
["Rack Diagram", "/inventory/rack", None],
]
],
["Admin", None, None, ['admin'],
[
["Admin", "/admin", None],
["Users - LDAP", "/users/ldap", None],
["Users - Cloud", "/mesh/users/", None],
["Register - Cloud", "/mesh/register/clouds", None],
]
],
["Admin - Inventory", None, None, ['admin'],
[
["Overview", "/inventory/", None],
["Table", "/inventory/summary", None],
["Images", "/inventory/images", None],
],
],
["Admin - Provision", None, None, ['admin', 'rain'],
[
["Policy", "/provision/policy", None],
["Overview", "/provision/summary/", None],
["Form", "/provision/", None],
],
],
["Admin - Launcher", None, None, ['admin', 'rain'],
[
["Launcher", "/cm/launch", None],
["Register", "/cm/register", None],
]
],
]
| super_duper_sidebar_pages = [
["Cloudmesh", None, None, ['all'],
[
["Home", "/", None],
["Status", "/status", None],
["Profile", "/profile/", None],
],
],
["Clouds", "cm/refresh", "365_restart", ['all'],
[
["Refresh", "/cm/refresh", None],
["VMs", "/mesh/servers", None],
["Images", "/mesh/images", None],
["Flavors", "/mesh/flavors/", None],
],
],
["HPC Queues", None, None, ['all'],
[
["Jobs", "/mesh/qstat", None],
["Queues Info", "/mesh/qinfo", None],
["Rack Diagram", "/inventory/rack", None],
]
],
["Admin", None, None, ['admin'],
[
["Admin", "/admin", None],
["Users - LDAP", "/users/ldap", None],
["Users - Cloud", "/mesh/users/", None],
["Register - Cloud", "/mesh/register/clouds", None],
]
],
["Admin - Inventory", None, None, ['admin'],
[
["Overview", "/inventory/", None],
["Table", "/inventory/summary", None],
["Images", "/inventory/images", None],
],
],
["Admin - Provision", None, None, ['admin', 'rain'],
[
["Policy", "/provision/policy", None],
["Overview", "/provision/summary/", None],
["Form", "/provision/", None],
],
],
["Admin - Launcher", None, None, ['admin', 'rain'],
[
["Launcher", "/cm/launch", None],
["Register", "/cm/register", None],
]
],
]
| none | 1 | 1.111936 | 1 | |
src/Strategies/Strategy.py | alexanu/TradingBot | 0 | 6612961 | <reponame>alexanu/TradingBot<gh_stars>0
import logging
import os
import inspect
import sys
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
from Utility.Utils import TradeDirection
class Strategy:
"""
Generic strategy template to use as a parent class for custom strategies.
"""
def __init__(self, config, broker):
self.positions = None
self.broker = broker
# Read configuration of derived Strategy
self.read_configuration(config)
# Initialise derived Strategy
self.initialise()
def set_open_positions(self, positions):
"""
Set the account open positions
"""
self.positions = positions
def run(self, market):
"""
Run the strategy against the specified market
"""
datapoints = self.fetch_datapoints(market)
logging.debug("Strategy datapoints: {}".format(datapoints))
if datapoints is None:
logging.debug('Unable to fetch market datapoints')
return TradeDirection.NONE, None, None
return self.find_trade_signal(market, datapoints)
#############################################################
# OVERRIDE THESE FUNCTIONS IN STRATEGY IMPLEMENTATION
#############################################################
def initialise(self):
"""
Must override
"""
raise NotImplementedError("Not implemented: initialise")
def read_configuration(self, config):
"""
Must override
"""
raise NotImplementedError("Not implemented: read_configuration")
def fetch_datapoints(self, market):
"""
Must override
"""
raise NotImplementedError("Not implemented: fetch_datapoints")
def find_trade_signal(self, epic_id, prices):
"""
Must override
"""
raise NotImplementedError("Not implemented: find_trade_signal")
def backtest(self, market, start_date, end_date):
"""
Must override
"""
return NotImplementedError("This strategy doe not support backtesting")
##############################################################
##############################################################
| import logging
import os
import inspect
import sys
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
from Utility.Utils import TradeDirection
class Strategy:
"""
Generic strategy template to use as a parent class for custom strategies.
"""
def __init__(self, config, broker):
self.positions = None
self.broker = broker
# Read configuration of derived Strategy
self.read_configuration(config)
# Initialise derived Strategy
self.initialise()
def set_open_positions(self, positions):
"""
Set the account open positions
"""
self.positions = positions
def run(self, market):
"""
Run the strategy against the specified market
"""
datapoints = self.fetch_datapoints(market)
logging.debug("Strategy datapoints: {}".format(datapoints))
if datapoints is None:
logging.debug('Unable to fetch market datapoints')
return TradeDirection.NONE, None, None
return self.find_trade_signal(market, datapoints)
#############################################################
# OVERRIDE THESE FUNCTIONS IN STRATEGY IMPLEMENTATION
#############################################################
def initialise(self):
"""
Must override
"""
raise NotImplementedError("Not implemented: initialise")
def read_configuration(self, config):
"""
Must override
"""
raise NotImplementedError("Not implemented: read_configuration")
def fetch_datapoints(self, market):
"""
Must override
"""
raise NotImplementedError("Not implemented: fetch_datapoints")
def find_trade_signal(self, epic_id, prices):
"""
Must override
"""
raise NotImplementedError("Not implemented: find_trade_signal")
def backtest(self, market, start_date, end_date):
"""
Must override
"""
return NotImplementedError("This strategy doe not support backtesting")
##############################################################
############################################################## | de | 0.450806 | Generic strategy template to use as a parent class for custom strategies. # Read configuration of derived Strategy # Initialise derived Strategy Set the account open positions Run the strategy against the specified market ############################################################# # OVERRIDE THESE FUNCTIONS IN STRATEGY IMPLEMENTATION ############################################################# Must override Must override Must override Must override Must override ############################################################## ############################################################## | 2.507375 | 3 |
Python-Exercise-100/python-exercise-example09.py | MiracleWong/PythonPractice | 0 | 6612962 | <filename>Python-Exercise-100/python-exercise-example09.py
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# 地址:http://www.runoob.com/python/python-exercise-example9.html
import time
Dictory = {1: "a", 2: "b", 3: "c", 4: "d", 5: "e"}
for key, value in dict.items(Dictory):
print(key, value)
time.sleep(1) # 暂停 1 秒
| <filename>Python-Exercise-100/python-exercise-example09.py
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# 地址:http://www.runoob.com/python/python-exercise-example9.html
import time
Dictory = {1: "a", 2: "b", 3: "c", 4: "d", 5: "e"}
for key, value in dict.items(Dictory):
print(key, value)
time.sleep(1) # 暂停 1 秒
| zh | 0.234319 | #!/usr/bin/env python # -*- coding: UTF-8 -*- # 地址:http://www.runoob.com/python/python-exercise-example9.html # 暂停 1 秒 | 3.110256 | 3 |
geocompose/export.py | InnovativeInventor/geocompose | 0 | 6612963 | import geopandas
def export(dataframe: geopandas.GeoDataFrame, location: str) -> bool:
"""
Exports a GeoDataFrame to a folder in various formats the location of the dir does not have to contain a trailing slash
"""
print(location)
success = True
try:
dataframe.to_file(location + ".geojson", driver="GeoJSON")
except ValueError as e:
success = False
print("Warning: ", e)
except AttributeError as e:
success = False
print("Warning: ", e)
try:
dataframe.to_file(location + ".shp")
except ValueError as e:
success = False
print("Warning: ", e)
except AttributeError as e:
success = False
print("Warning: ", e)
try:
dataframe.to_file(location + ".gpkg", layer="diff", driver="GPKG")
except ValueError as e:
success = False
print("Warning: ", e)
except AttributeError as e:
success = False
print("Warning: ", e)
return success
def unzip(tuple_iterable):
"""
Inverse of the Python builtin zip function
"""
def fst(tuple_iterable):
for x, _ in tuple_iterable:
yield x
def snd(tuple_iterable):
for _, y in tuple_iterable:
yield y
return fst(tuple_iterable), snd(tuple_iterable)
| import geopandas
def export(dataframe: geopandas.GeoDataFrame, location: str) -> bool:
"""
Exports a GeoDataFrame to a folder in various formats the location of the dir does not have to contain a trailing slash
"""
print(location)
success = True
try:
dataframe.to_file(location + ".geojson", driver="GeoJSON")
except ValueError as e:
success = False
print("Warning: ", e)
except AttributeError as e:
success = False
print("Warning: ", e)
try:
dataframe.to_file(location + ".shp")
except ValueError as e:
success = False
print("Warning: ", e)
except AttributeError as e:
success = False
print("Warning: ", e)
try:
dataframe.to_file(location + ".gpkg", layer="diff", driver="GPKG")
except ValueError as e:
success = False
print("Warning: ", e)
except AttributeError as e:
success = False
print("Warning: ", e)
return success
def unzip(tuple_iterable):
"""
Inverse of the Python builtin zip function
"""
def fst(tuple_iterable):
for x, _ in tuple_iterable:
yield x
def snd(tuple_iterable):
for _, y in tuple_iterable:
yield y
return fst(tuple_iterable), snd(tuple_iterable)
| en | 0.864372 | Exports a GeoDataFrame to a folder in various formats the location of the dir does not have to contain a trailing slash Inverse of the Python builtin zip function | 3.287694 | 3 |
s3netcdfapi/export/to_json.py | meracan/s3-netcdf-api | 1 | 6612964 | <gh_stars>1-10
from .table import to_table
def to_json(obj,data):
filepath=obj['filepath']+".json"
to_table(obj,data).to_json(filepath,date_format = 'iso')
return filepath | from .table import to_table
def to_json(obj,data):
filepath=obj['filepath']+".json"
to_table(obj,data).to_json(filepath,date_format = 'iso')
return filepath | none | 1 | 2.232358 | 2 | |
src/scripts/lib/EDict.py | Hoeze/CADD-scripts | 40 | 6612965 | <gh_stars>10-100
#!/usr/bin/env python
# -*- coding: ASCII -*-
"""
:Author: <NAME>
:Contact: <EMAIL>
:Date: *27.03.2008
"""
import math
class EDict:
def __init__(self):
self._internal_dict = {}
self._sorted_keys = []
self._is_sync = True
def __len__(self):
return len(self._internal_dict.keys())
def __getitem__(self,key):
return self._internal_dict[key]
def __repr__(self):
"""Return string representation of a EDict."""
return repr(self._internal_dict)
# __str__ is the same as __repr__
__str__ = __repr__
def clear(self):
self._is_sync = True
self._internal_dict = {}
self._sorted_keys = []
def set(self,key,value):
if key == None:
raise TypeError,"EDict does not allow None keys"
if not(self._internal_dict.has_key(key)):
self._is_sync = False
self._internal_dict[key] = value
return True
def get(self,key):
if not(self._internal_dict.has_key(key)):
return None
else:
return self._internal_dict[key]
def get_keys(self): return self._internal_dict.keys()
def get_values(self): return self._internal_dict.values()
def get_items(self): return self._internal_dict.items()
def iterkeys(self): return self._internal_dict.iterkeys()
def itervalues(self): return self._internal_dict.itervalues()
def iteritems(self): return self._internal_dict.iteritems()
def has_key(self,elem): return self._internal_dict.has_key(elem)
def _make_sync(self):
self._sorted_keys = self._internal_dict.keys()
self._sorted_keys.sort()
self._is_sync = True
def get_smaller(self,key):
if not(self._is_sync): self._make_sync()
cur_len = len(self._sorted_keys)
if cur_len > 0:
if not(key <= self._sorted_keys[0]):
cur_pos = -1
forlast = -1
new_pos = cur_len/2
dist = max(int(round(cur_len/4.0)),1)
while(cur_pos != new_pos) and (new_pos != forlast):
forlast = cur_pos
cur_pos = new_pos
#print cur_pos, dist
if key > self._sorted_keys[cur_pos]:
new_pos = cur_pos+dist
if (new_pos >= cur_len): new_pos = cur_len-1
elif key < self._sorted_keys[cur_pos]:
new_pos = cur_pos-dist
if (new_pos < 0): new_pos = 0
else:
new_pos = cur_pos
dist = max(int(dist/2.0),1)
if (cur_pos+1 < cur_len) and (cur_pos > 0):
if (key == self._sorted_keys[cur_pos]):
return (self._sorted_keys[cur_pos-1],self._internal_dict[self._sorted_keys[cur_pos-1]])
elif ((key <= self._sorted_keys[cur_pos+1]) and (key > self._sorted_keys[cur_pos])):
return (self._sorted_keys[cur_pos],self._internal_dict[self._sorted_keys[cur_pos]])
elif ((key <= self._sorted_keys[cur_pos]) and (key > self._sorted_keys[cur_pos-1])):
return (self._sorted_keys[cur_pos-1],self._internal_dict[self._sorted_keys[cur_pos-1]])
else:
print "get_smaller: SHOULD NOT HAPPEN!",cur_pos,"max:",cur_len
elif ((cur_pos == 0) and (key > self._sorted_keys[cur_pos])):
return (self._sorted_keys[cur_pos],self._internal_dict[self._sorted_keys[cur_pos]])
elif ((cur_pos+1 == cur_len) and (key > self._sorted_keys[cur_pos])):
return (self._sorted_keys[cur_pos],self._internal_dict[self._sorted_keys[cur_pos]])
elif ((cur_pos+1 == cur_len) and (key > self._sorted_keys[cur_pos-1])):
return (self._sorted_keys[cur_pos-1],self._internal_dict[self._sorted_keys[cur_pos-1]])
return (None,None)
def get_smaller_equal(self,key):
if self.has_key(key):
return key,self._internal_dict[key]
else:
return self.get_smaller(key)
def get_larger(self,key):
if not(self._is_sync): self._make_sync()
cur_len = len(self._sorted_keys)
if cur_len > 0:
if not(key >= self._sorted_keys[-1]):
cur_pos = -1
forlast = -1
new_pos = cur_len/2
dist = max(int(round(cur_len/4.0)),1)
while(cur_pos != new_pos) and (new_pos != forlast):
forlast = cur_pos
cur_pos = new_pos
#print cur_pos, dist
if key > self._sorted_keys[cur_pos]:
new_pos = cur_pos+dist
if (new_pos >= cur_len): new_pos = cur_len-1
elif key < self._sorted_keys[cur_pos]:
new_pos = cur_pos-dist
if (new_pos < 0): new_pos = 0
else:
new_pos = cur_pos
dist = max(int(dist/2.0),1)
if (cur_pos+1 < cur_len) and (cur_pos > 0):
if (key == self._sorted_keys[cur_pos]):
return (self._sorted_keys[cur_pos+1],self._internal_dict[self._sorted_keys[cur_pos+1]])
elif ((key < self._sorted_keys[cur_pos+1]) and (key >= self._sorted_keys[cur_pos])):
return (self._sorted_keys[cur_pos+1],self._internal_dict[self._sorted_keys[cur_pos+1]])
elif ((key < self._sorted_keys[cur_pos]) and (key >= self._sorted_keys[cur_pos-1])):
return (self._sorted_keys[cur_pos],self._internal_dict[self._sorted_keys[cur_pos]])
else:
print "get_larger: SHOULD NOT HAPPEN!",cur_pos,"max:",cur_len
elif ((cur_pos == 0) and (key < self._sorted_keys[cur_pos])):
return (self._sorted_keys[cur_pos],self._internal_dict[self._sorted_keys[cur_pos]])
elif ((cur_pos == 0) and (key < self._sorted_keys[cur_pos+1])):
return (self._sorted_keys[cur_pos+1],self._internal_dict[self._sorted_keys[cur_pos+1]])
elif ((cur_pos+1 == cur_len) and (key < self._sorted_keys[cur_pos])):
return (self._sorted_keys[cur_pos],self._internal_dict[self._sorted_keys[cur_pos]])
return (None,None)
def get_larger_equal(self,key):
if self.has_key(key):
return key,self._internal_dict[key]
else:
return self.get_larger(key)
if __name__ == '__main__':
import os
convtbl = EDict()
convtbl.set(149.449379,"99")
convtbl.set(149.399739,"96")
convtbl.set(4.393534,"23")
convtbl.set(3,"15")
convtbl.set(0,"5")
convtbl.set(-1.933954,"0.007")
convtbl.set(-2.118821,"0.005")
convtbl.set(-2.388300,"0.003")
convtbl.set(-2.874310,"0.001")
convtbl.set(-226.479313,"0")
print 150,convtbl.get_larger_equal(150)
print 149.4,convtbl.get_larger_equal(149.4)
print 3,convtbl.get_larger_equal(3)
print -2.3,convtbl.get_larger_equal(-2.3)
print -2.4,convtbl.get_larger_equal(-2.4)
print -2.8,convtbl.get_larger_equal(-2.8)
print -2.9,convtbl.get_larger_equal(-2.9)
print -227,convtbl.get_larger_equal(-227)
print "###############"
convtbl = EDict()
convtbl.set(149.449379,"99")
convtbl.set(149.399739,"96")
convtbl.set(4.393534,"23")
convtbl.set(3,"15")
convtbl.set(-1.933954,"0.007")
convtbl.set(-2.018751,"0.006")
convtbl.set(-2.118821,"0.005")
convtbl.set(-2.239310,"0.004")
convtbl.set(-2.388300,"0.003")
convtbl.set(-2.580755,"0.002")
convtbl.set(-2.874310,"0.001")
convtbl.set(-226.479313,"0")
print 150,convtbl.get_larger_equal(150)
print 149.4,convtbl.get_larger_equal(149.4)
print 3,convtbl.get_larger_equal(3)
print -2.3,convtbl.get_larger_equal(-2.3)
print -2.4,convtbl.get_larger_equal(-2.4)
print -2.6,convtbl.get_larger_equal(-2.6)
print -2.8,convtbl.get_larger_equal(-2.8)
print -2.9,convtbl.get_larger_equal(-2.9)
print -227,convtbl.get_larger_equal(-227)
print "###############"
table = os.environ['CADD'] + "/whole_genome/conversion_tbl_cave/conversion_table_ext.tsv"
maxValue,minValue = None,None
convtbl = EDict()
if os.path.exists(table):
infile = open(table)
for line in infile:
fields = line.split()
if len(fields) == 2:
val = float(fields[1])
convtbl.set(val,fields[0])
if val > maxValue or maxValue == None: maxValue = val
if val < minValue or minValue == None: minValue = val
infile.close()
#convtbl.set(-220.0,"0")
print 150,convtbl.get_larger_equal(150)
print 149.4,convtbl.get_larger_equal(149.4)
print 3,convtbl.get_larger_equal(3)
print -2.3,convtbl.get_larger_equal(-2.3)
print -2.4,convtbl.get_larger_equal(-2.4)
print -2.6,convtbl.get_larger_equal(-2.6)
print -2.8,convtbl.get_larger_equal(-2.8)
print -2.9,convtbl.get_larger_equal(-2.9)
print -227,convtbl.get_larger_equal(-227)
print "###"
print len(convtbl)
print "###"
count = 0
for key,value in sorted(convtbl.iteritems()):
print key,value
count += 1
if count > 10: break
| #!/usr/bin/env python
# -*- coding: ASCII -*-
"""
:Author: <NAME>
:Contact: <EMAIL>
:Date: *27.03.2008
"""
import math
class EDict:
def __init__(self):
self._internal_dict = {}
self._sorted_keys = []
self._is_sync = True
def __len__(self):
return len(self._internal_dict.keys())
def __getitem__(self,key):
return self._internal_dict[key]
def __repr__(self):
"""Return string representation of a EDict."""
return repr(self._internal_dict)
# __str__ is the same as __repr__
__str__ = __repr__
def clear(self):
self._is_sync = True
self._internal_dict = {}
self._sorted_keys = []
def set(self,key,value):
if key == None:
raise TypeError,"EDict does not allow None keys"
if not(self._internal_dict.has_key(key)):
self._is_sync = False
self._internal_dict[key] = value
return True
def get(self,key):
if not(self._internal_dict.has_key(key)):
return None
else:
return self._internal_dict[key]
def get_keys(self): return self._internal_dict.keys()
def get_values(self): return self._internal_dict.values()
def get_items(self): return self._internal_dict.items()
def iterkeys(self): return self._internal_dict.iterkeys()
def itervalues(self): return self._internal_dict.itervalues()
def iteritems(self): return self._internal_dict.iteritems()
def has_key(self,elem): return self._internal_dict.has_key(elem)
def _make_sync(self):
self._sorted_keys = self._internal_dict.keys()
self._sorted_keys.sort()
self._is_sync = True
def get_smaller(self,key):
if not(self._is_sync): self._make_sync()
cur_len = len(self._sorted_keys)
if cur_len > 0:
if not(key <= self._sorted_keys[0]):
cur_pos = -1
forlast = -1
new_pos = cur_len/2
dist = max(int(round(cur_len/4.0)),1)
while(cur_pos != new_pos) and (new_pos != forlast):
forlast = cur_pos
cur_pos = new_pos
#print cur_pos, dist
if key > self._sorted_keys[cur_pos]:
new_pos = cur_pos+dist
if (new_pos >= cur_len): new_pos = cur_len-1
elif key < self._sorted_keys[cur_pos]:
new_pos = cur_pos-dist
if (new_pos < 0): new_pos = 0
else:
new_pos = cur_pos
dist = max(int(dist/2.0),1)
if (cur_pos+1 < cur_len) and (cur_pos > 0):
if (key == self._sorted_keys[cur_pos]):
return (self._sorted_keys[cur_pos-1],self._internal_dict[self._sorted_keys[cur_pos-1]])
elif ((key <= self._sorted_keys[cur_pos+1]) and (key > self._sorted_keys[cur_pos])):
return (self._sorted_keys[cur_pos],self._internal_dict[self._sorted_keys[cur_pos]])
elif ((key <= self._sorted_keys[cur_pos]) and (key > self._sorted_keys[cur_pos-1])):
return (self._sorted_keys[cur_pos-1],self._internal_dict[self._sorted_keys[cur_pos-1]])
else:
print "get_smaller: SHOULD NOT HAPPEN!",cur_pos,"max:",cur_len
elif ((cur_pos == 0) and (key > self._sorted_keys[cur_pos])):
return (self._sorted_keys[cur_pos],self._internal_dict[self._sorted_keys[cur_pos]])
elif ((cur_pos+1 == cur_len) and (key > self._sorted_keys[cur_pos])):
return (self._sorted_keys[cur_pos],self._internal_dict[self._sorted_keys[cur_pos]])
elif ((cur_pos+1 == cur_len) and (key > self._sorted_keys[cur_pos-1])):
return (self._sorted_keys[cur_pos-1],self._internal_dict[self._sorted_keys[cur_pos-1]])
return (None,None)
def get_smaller_equal(self,key):
if self.has_key(key):
return key,self._internal_dict[key]
else:
return self.get_smaller(key)
def get_larger(self,key):
if not(self._is_sync): self._make_sync()
cur_len = len(self._sorted_keys)
if cur_len > 0:
if not(key >= self._sorted_keys[-1]):
cur_pos = -1
forlast = -1
new_pos = cur_len/2
dist = max(int(round(cur_len/4.0)),1)
while(cur_pos != new_pos) and (new_pos != forlast):
forlast = cur_pos
cur_pos = new_pos
#print cur_pos, dist
if key > self._sorted_keys[cur_pos]:
new_pos = cur_pos+dist
if (new_pos >= cur_len): new_pos = cur_len-1
elif key < self._sorted_keys[cur_pos]:
new_pos = cur_pos-dist
if (new_pos < 0): new_pos = 0
else:
new_pos = cur_pos
dist = max(int(dist/2.0),1)
if (cur_pos+1 < cur_len) and (cur_pos > 0):
if (key == self._sorted_keys[cur_pos]):
return (self._sorted_keys[cur_pos+1],self._internal_dict[self._sorted_keys[cur_pos+1]])
elif ((key < self._sorted_keys[cur_pos+1]) and (key >= self._sorted_keys[cur_pos])):
return (self._sorted_keys[cur_pos+1],self._internal_dict[self._sorted_keys[cur_pos+1]])
elif ((key < self._sorted_keys[cur_pos]) and (key >= self._sorted_keys[cur_pos-1])):
return (self._sorted_keys[cur_pos],self._internal_dict[self._sorted_keys[cur_pos]])
else:
print "get_larger: SHOULD NOT HAPPEN!",cur_pos,"max:",cur_len
elif ((cur_pos == 0) and (key < self._sorted_keys[cur_pos])):
return (self._sorted_keys[cur_pos],self._internal_dict[self._sorted_keys[cur_pos]])
elif ((cur_pos == 0) and (key < self._sorted_keys[cur_pos+1])):
return (self._sorted_keys[cur_pos+1],self._internal_dict[self._sorted_keys[cur_pos+1]])
elif ((cur_pos+1 == cur_len) and (key < self._sorted_keys[cur_pos])):
return (self._sorted_keys[cur_pos],self._internal_dict[self._sorted_keys[cur_pos]])
return (None,None)
def get_larger_equal(self,key):
if self.has_key(key):
return key,self._internal_dict[key]
else:
return self.get_larger(key)
if __name__ == '__main__':
import os
convtbl = EDict()
convtbl.set(149.449379,"99")
convtbl.set(149.399739,"96")
convtbl.set(4.393534,"23")
convtbl.set(3,"15")
convtbl.set(0,"5")
convtbl.set(-1.933954,"0.007")
convtbl.set(-2.118821,"0.005")
convtbl.set(-2.388300,"0.003")
convtbl.set(-2.874310,"0.001")
convtbl.set(-226.479313,"0")
print 150,convtbl.get_larger_equal(150)
print 149.4,convtbl.get_larger_equal(149.4)
print 3,convtbl.get_larger_equal(3)
print -2.3,convtbl.get_larger_equal(-2.3)
print -2.4,convtbl.get_larger_equal(-2.4)
print -2.8,convtbl.get_larger_equal(-2.8)
print -2.9,convtbl.get_larger_equal(-2.9)
print -227,convtbl.get_larger_equal(-227)
print "###############"
convtbl = EDict()
convtbl.set(149.449379,"99")
convtbl.set(149.399739,"96")
convtbl.set(4.393534,"23")
convtbl.set(3,"15")
convtbl.set(-1.933954,"0.007")
convtbl.set(-2.018751,"0.006")
convtbl.set(-2.118821,"0.005")
convtbl.set(-2.239310,"0.004")
convtbl.set(-2.388300,"0.003")
convtbl.set(-2.580755,"0.002")
convtbl.set(-2.874310,"0.001")
convtbl.set(-226.479313,"0")
print 150,convtbl.get_larger_equal(150)
print 149.4,convtbl.get_larger_equal(149.4)
print 3,convtbl.get_larger_equal(3)
print -2.3,convtbl.get_larger_equal(-2.3)
print -2.4,convtbl.get_larger_equal(-2.4)
print -2.6,convtbl.get_larger_equal(-2.6)
print -2.8,convtbl.get_larger_equal(-2.8)
print -2.9,convtbl.get_larger_equal(-2.9)
print -227,convtbl.get_larger_equal(-227)
print "###############"
table = os.environ['CADD'] + "/whole_genome/conversion_tbl_cave/conversion_table_ext.tsv"
maxValue,minValue = None,None
convtbl = EDict()
if os.path.exists(table):
infile = open(table)
for line in infile:
fields = line.split()
if len(fields) == 2:
val = float(fields[1])
convtbl.set(val,fields[0])
if val > maxValue or maxValue == None: maxValue = val
if val < minValue or minValue == None: minValue = val
infile.close()
#convtbl.set(-220.0,"0")
print 150,convtbl.get_larger_equal(150)
print 149.4,convtbl.get_larger_equal(149.4)
print 3,convtbl.get_larger_equal(3)
print -2.3,convtbl.get_larger_equal(-2.3)
print -2.4,convtbl.get_larger_equal(-2.4)
print -2.6,convtbl.get_larger_equal(-2.6)
print -2.8,convtbl.get_larger_equal(-2.8)
print -2.9,convtbl.get_larger_equal(-2.9)
print -227,convtbl.get_larger_equal(-227)
print "###"
print len(convtbl)
print "###"
count = 0
for key,value in sorted(convtbl.iteritems()):
print key,value
count += 1
if count > 10: break | en | 0.347412 | #!/usr/bin/env python # -*- coding: ASCII -*- :Author: <NAME> :Contact: <EMAIL> :Date: *27.03.2008 Return string representation of a EDict. # __str__ is the same as __repr__ #print cur_pos, dist #print cur_pos, dist ##############" ##############" #convtbl.set(-220.0,"0") ##" ##" | 3.371963 | 3 |
imgdiff.py | kevincwells/imgdiff | 0 | 6612966 | #!/usr/bin/env python3
import os
import stat
import sys
import argparse
import subprocess
import hashlib
import tempfile
import tarfile
class Image(object):
def __init__(self, image=None, root=None, files=None, tmp_dir=None):
super(Image, self).__init__()
self.image = image
self.root = root
self.files = files
self.tmp_dir = tmp_dir
def sha256sum(filename,block_size=65536, retry=True): # Default block_size is 64k
sha256 = hashlib.sha256()
try:
with open(filename, 'rb') as f:
while True:
data = f.read(block_size)
if not data:
break
sha256.update(data)
except PermissionError:
if retry:
os.chmod(filename, os.stat(filename).st_mode | stat.S_IRUSR) # Add u+r permission
return sha256sum(filename, retry=False)
else:
raise PermissionError
return sha256.hexdigest()
def get_contents(top_dir, sorted=False):
top_dir = top_dir.rstrip('/')
file_dict = {}
for root, dirs, files in os.walk(top_dir):
if sorted:
dirs.sort() # Affects recursive traversal order
files.sort() # Affects file inspection order
for file in files:
path = os.path.relpath(root,top_dir)
if not path in file_dict:
file_dict[path] = {}
file_dict[path][file] = os.path.join(root, file)
return file_dict
def main():
parser = argparse.ArgumentParser(
description="image and directory binary diff tool")
parser.add_argument('images', metavar='IMAGE_FILE', nargs=2,
help='Two images to make binary diff. Each should be a directory or .tar.bz2 image of a build.')
parser.add_argument('-d', '--diffoscope',
help='run diffoscope on files that do not match.',
action='store_true')
parser.add_argument('-o', '--output-file',
help='output file to use instead of stdout.')
parser.add_argument('-s', '--stats',
help='output statistics about diff', action='store_true')
parser.add_argument('-r', '--sort',
help='traverse files in sorted order (easier for human inspection)', action='store_true')
args = parser.parse_args()
output_handle = open(args.output_file, 'w') if args.output_file else sys.stdout
error_handle = output_handle if args.output_file else sys.stderr
if args.diffoscope:
try:
subprocess.run("diffoscope --version", shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, check=True)
except subprocess.SubprocessError:
error_handle.write("Please install diffoscope\n")
sys.exit(1)
image1 = Image(image=args.images[0])
image2 = Image(image=args.images[1])
# Set up the directories to compare
if os.path.isdir(image1.image): # If image1 is an already unpacked dir
image1.root = image1.image
elif tarfile.is_tarfile(image1.image): # If image1 is tar.bz2
image1.tmp_dir = tempfile.TemporaryDirectory()
image1.root = image1.tmp_dir.name
# Unpack the images to temporary directory
try:
subprocess.run('tar --atime-preserve -xjsf %s -C %s' % (image1.image, image1.root),
shell=True,stdout=subprocess.PIPE,stderr=subprocess.STDOUT)
except subprocess.SubprocessError:
error_handle.write("Error unpacking image: %s" % image1.image)
sys.exit(1)
# elif is ext4 partition, mount it
if os.path.isdir(image2.image): # If image2 is an already unpacked dir
image2.root = image2.image
elif tarfile.is_tarfile(image2.image):
image2.tmp_dir = tempfile.TemporaryDirectory()
image2.root = image2.tmp_dir.name
# Unpack the images to temporary directory
try:
subprocess.run('tar --atime-preserve -xjpsf %s -C %s' % (image2.image, image2.root),
shell=True,stdout=subprocess.PIPE,stderr=subprocess.STDOUT)
except subprocess.SubprocessError:
error_handle.write("Error unpacking image: %s" % image2.image)
sys.exit(1)
# elif is ext4 partition, mount it
image1.files = get_contents(image1.root, args.sort)
image2.files = get_contents(image2.root, args.sort)
ret = 0;
stats = {'match' : 0,
'missmatch' : 0,
'missing1' : 0,
'missing2' : 0,
'dir_missing1' : 0,
'dir_missing2' : 0}
for dir in image1.files:
if dir in image2.files:
# Check all files in image1.files[dir] against files in image2.files[dir]
for file in image1.files[dir]:
if file in image2.files[dir]:
match = True
try:
# If either file is a symlink, check that the other is too and that they point to the same target
if os.path.islink(image1.files[dir][file]) or os.path.islink(image2.files[dir][file]):
if not (os.path.islink(image1.files[dir][file]) and os.path.islink(image2.files[dir][file])):
match = False
elif os.readlink(image1.files[dir][file]) != os.readlink(image2.files[dir][file]):
match = False
# Else if it's a normal file, compare checksums
elif sha256sum(image1.files[dir][file]) != sha256sum(image2.files[dir][file]):
match = False
except PermissionError:
error_handle.write('Permission Error: cannot compare %s' % os.path.join(dir,file))
# If the files matched
if match:
if args.stats:
stats['match'] += 1
else:
output_handle.write("File Missmatch: '%s' from %s and %s\n" % (os.path.join(dir,file), image1.image, image2.image))
if args.stats:
stats['missmatch'] += 1
ret = 1
if args.diffoscope:
output_handle.write("diffoscope output:\n")
try:
output_handle.write(subprocess.run('diffoscope %s %s' % (image1.files[dir][file],image2.files[dir][file]),
shell=True, check=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).stdout.decode('utf-8'))
except subprocess.SubprocessError:
error_handle.write('Call to diffoscope failed.\n')
image2.files[dir].pop(file, None) # So we can check later if anything is left in image2.files[dir]
else:
output_handle.write("Missing File: '%s' from %s not found in %s\n" % (os.path.join(dir,file), image1.image, image2.image))
if args.stats:
stats['missing1'] += 1
ret = 1
# If there's anything left in image2.files[dir], it wasn't in image1.files[dir]
for file in image2.files[dir]:
output_handle.write("Missing File: '%s' from %s not found in %s\n" % (os.path.join(dir,file), image2.image, image1.image))
if args.stats:
stats['missing2'] += 1
ret = 1
image2.files.pop(dir, None) # So we can check later if anything is left in image2.files
else:
output_handle.write("Missing Directory (with %i files): '%s' from %s not found in %s\n" % (len(image1.files[dir]), dir, image1.image, image2.image))
if args.stats:
stats['dir_missing1'] +=1
stats['missing1'] += len(image1.files[dir])
ret = 1
# if there is anything left in image2.files, it wasn't in image1.files
for dir in image2.files:
output_handle.write("Missing Directory (with %i files): '%s' from %s not found in %s\n" % (len(image2.files[dir]), dir, image2.image, image1.image))
if args.stats:
stats['dir_missing2'] += 1
stats['missing2'] += len(image2.files[dir])
ret = 1
if args.stats:
file_total = stats['match'] + stats['missmatch'] + stats['missing1'] + stats['missing2']
missing_total = stats['missing1'] + stats['missing2']
output_handle.write('----------------------STATS----------------------\n')
output_handle.write('Total files compared: %i\n' % file_total)
output_handle.write('Matches: %i (%s)\n' % (stats['match'], '{:.2%}'.format(stats['match']/file_total)))
output_handle.write('Misses: %i (%s)\n' % (stats['missmatch'], '{:.2%}'.format(stats['missmatch']/file_total)))
output_handle.write('Missing: %i (%s)\n' % (missing_total, '{:.2%}'.format(missing_total/file_total)))
output_handle.write('Files from %s missing from %s: %i\n' % (image1.image,image2.image,stats['missing1']))
output_handle.write('Files from %s missing from %s: %i\n' % (image2.image,image1.image,stats['missing2']))
output_handle.write('Dirs from %s missing from %s: %i\n' % (image1.image,image2.image,stats['dir_missing1']))
output_handle.write('Dirs from %s missing from %s: %i\n' % (image2.image,image1.image,stats['dir_missing2']))
# Close output handle if file
if output_handle is not sys.stdout:
output_handle.close()
# Clean up any temp directories
if image1.tmp_dir:
del image1.tmp_dir
if image2.tmp_dir:
del image2.tmp_dir
return ret
if __name__ == '__main__':
try:
ret = main()
except Exception:
ret = 1
import traceback
traceback.print_exc()
sys.exit(ret)
| #!/usr/bin/env python3
import os
import stat
import sys
import argparse
import subprocess
import hashlib
import tempfile
import tarfile
class Image(object):
def __init__(self, image=None, root=None, files=None, tmp_dir=None):
super(Image, self).__init__()
self.image = image
self.root = root
self.files = files
self.tmp_dir = tmp_dir
def sha256sum(filename,block_size=65536, retry=True): # Default block_size is 64k
sha256 = hashlib.sha256()
try:
with open(filename, 'rb') as f:
while True:
data = f.read(block_size)
if not data:
break
sha256.update(data)
except PermissionError:
if retry:
os.chmod(filename, os.stat(filename).st_mode | stat.S_IRUSR) # Add u+r permission
return sha256sum(filename, retry=False)
else:
raise PermissionError
return sha256.hexdigest()
def get_contents(top_dir, sorted=False):
top_dir = top_dir.rstrip('/')
file_dict = {}
for root, dirs, files in os.walk(top_dir):
if sorted:
dirs.sort() # Affects recursive traversal order
files.sort() # Affects file inspection order
for file in files:
path = os.path.relpath(root,top_dir)
if not path in file_dict:
file_dict[path] = {}
file_dict[path][file] = os.path.join(root, file)
return file_dict
def main():
parser = argparse.ArgumentParser(
description="image and directory binary diff tool")
parser.add_argument('images', metavar='IMAGE_FILE', nargs=2,
help='Two images to make binary diff. Each should be a directory or .tar.bz2 image of a build.')
parser.add_argument('-d', '--diffoscope',
help='run diffoscope on files that do not match.',
action='store_true')
parser.add_argument('-o', '--output-file',
help='output file to use instead of stdout.')
parser.add_argument('-s', '--stats',
help='output statistics about diff', action='store_true')
parser.add_argument('-r', '--sort',
help='traverse files in sorted order (easier for human inspection)', action='store_true')
args = parser.parse_args()
output_handle = open(args.output_file, 'w') if args.output_file else sys.stdout
error_handle = output_handle if args.output_file else sys.stderr
if args.diffoscope:
try:
subprocess.run("diffoscope --version", shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, check=True)
except subprocess.SubprocessError:
error_handle.write("Please install diffoscope\n")
sys.exit(1)
image1 = Image(image=args.images[0])
image2 = Image(image=args.images[1])
# Set up the directories to compare
if os.path.isdir(image1.image): # If image1 is an already unpacked dir
image1.root = image1.image
elif tarfile.is_tarfile(image1.image): # If image1 is tar.bz2
image1.tmp_dir = tempfile.TemporaryDirectory()
image1.root = image1.tmp_dir.name
# Unpack the images to temporary directory
try:
subprocess.run('tar --atime-preserve -xjsf %s -C %s' % (image1.image, image1.root),
shell=True,stdout=subprocess.PIPE,stderr=subprocess.STDOUT)
except subprocess.SubprocessError:
error_handle.write("Error unpacking image: %s" % image1.image)
sys.exit(1)
# elif is ext4 partition, mount it
if os.path.isdir(image2.image): # If image2 is an already unpacked dir
image2.root = image2.image
elif tarfile.is_tarfile(image2.image):
image2.tmp_dir = tempfile.TemporaryDirectory()
image2.root = image2.tmp_dir.name
# Unpack the images to temporary directory
try:
subprocess.run('tar --atime-preserve -xjpsf %s -C %s' % (image2.image, image2.root),
shell=True,stdout=subprocess.PIPE,stderr=subprocess.STDOUT)
except subprocess.SubprocessError:
error_handle.write("Error unpacking image: %s" % image2.image)
sys.exit(1)
# elif is ext4 partition, mount it
image1.files = get_contents(image1.root, args.sort)
image2.files = get_contents(image2.root, args.sort)
ret = 0;
stats = {'match' : 0,
'missmatch' : 0,
'missing1' : 0,
'missing2' : 0,
'dir_missing1' : 0,
'dir_missing2' : 0}
for dir in image1.files:
if dir in image2.files:
# Check all files in image1.files[dir] against files in image2.files[dir]
for file in image1.files[dir]:
if file in image2.files[dir]:
match = True
try:
# If either file is a symlink, check that the other is too and that they point to the same target
if os.path.islink(image1.files[dir][file]) or os.path.islink(image2.files[dir][file]):
if not (os.path.islink(image1.files[dir][file]) and os.path.islink(image2.files[dir][file])):
match = False
elif os.readlink(image1.files[dir][file]) != os.readlink(image2.files[dir][file]):
match = False
# Else if it's a normal file, compare checksums
elif sha256sum(image1.files[dir][file]) != sha256sum(image2.files[dir][file]):
match = False
except PermissionError:
error_handle.write('Permission Error: cannot compare %s' % os.path.join(dir,file))
# If the files matched
if match:
if args.stats:
stats['match'] += 1
else:
output_handle.write("File Missmatch: '%s' from %s and %s\n" % (os.path.join(dir,file), image1.image, image2.image))
if args.stats:
stats['missmatch'] += 1
ret = 1
if args.diffoscope:
output_handle.write("diffoscope output:\n")
try:
output_handle.write(subprocess.run('diffoscope %s %s' % (image1.files[dir][file],image2.files[dir][file]),
shell=True, check=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).stdout.decode('utf-8'))
except subprocess.SubprocessError:
error_handle.write('Call to diffoscope failed.\n')
image2.files[dir].pop(file, None) # So we can check later if anything is left in image2.files[dir]
else:
output_handle.write("Missing File: '%s' from %s not found in %s\n" % (os.path.join(dir,file), image1.image, image2.image))
if args.stats:
stats['missing1'] += 1
ret = 1
# If there's anything left in image2.files[dir], it wasn't in image1.files[dir]
for file in image2.files[dir]:
output_handle.write("Missing File: '%s' from %s not found in %s\n" % (os.path.join(dir,file), image2.image, image1.image))
if args.stats:
stats['missing2'] += 1
ret = 1
image2.files.pop(dir, None) # So we can check later if anything is left in image2.files
else:
output_handle.write("Missing Directory (with %i files): '%s' from %s not found in %s\n" % (len(image1.files[dir]), dir, image1.image, image2.image))
if args.stats:
stats['dir_missing1'] +=1
stats['missing1'] += len(image1.files[dir])
ret = 1
# if there is anything left in image2.files, it wasn't in image1.files
for dir in image2.files:
output_handle.write("Missing Directory (with %i files): '%s' from %s not found in %s\n" % (len(image2.files[dir]), dir, image2.image, image1.image))
if args.stats:
stats['dir_missing2'] += 1
stats['missing2'] += len(image2.files[dir])
ret = 1
if args.stats:
file_total = stats['match'] + stats['missmatch'] + stats['missing1'] + stats['missing2']
missing_total = stats['missing1'] + stats['missing2']
output_handle.write('----------------------STATS----------------------\n')
output_handle.write('Total files compared: %i\n' % file_total)
output_handle.write('Matches: %i (%s)\n' % (stats['match'], '{:.2%}'.format(stats['match']/file_total)))
output_handle.write('Misses: %i (%s)\n' % (stats['missmatch'], '{:.2%}'.format(stats['missmatch']/file_total)))
output_handle.write('Missing: %i (%s)\n' % (missing_total, '{:.2%}'.format(missing_total/file_total)))
output_handle.write('Files from %s missing from %s: %i\n' % (image1.image,image2.image,stats['missing1']))
output_handle.write('Files from %s missing from %s: %i\n' % (image2.image,image1.image,stats['missing2']))
output_handle.write('Dirs from %s missing from %s: %i\n' % (image1.image,image2.image,stats['dir_missing1']))
output_handle.write('Dirs from %s missing from %s: %i\n' % (image2.image,image1.image,stats['dir_missing2']))
# Close output handle if file
if output_handle is not sys.stdout:
output_handle.close()
# Clean up any temp directories
if image1.tmp_dir:
del image1.tmp_dir
if image2.tmp_dir:
del image2.tmp_dir
return ret
if __name__ == '__main__':
try:
ret = main()
except Exception:
ret = 1
import traceback
traceback.print_exc()
sys.exit(ret)
| en | 0.820036 | #!/usr/bin/env python3 # Default block_size is 64k # Add u+r permission # Affects recursive traversal order # Affects file inspection order # Set up the directories to compare # If image1 is an already unpacked dir # If image1 is tar.bz2 # Unpack the images to temporary directory # elif is ext4 partition, mount it # If image2 is an already unpacked dir # Unpack the images to temporary directory # elif is ext4 partition, mount it # Check all files in image1.files[dir] against files in image2.files[dir] # If either file is a symlink, check that the other is too and that they point to the same target # Else if it's a normal file, compare checksums # If the files matched # So we can check later if anything is left in image2.files[dir] # If there's anything left in image2.files[dir], it wasn't in image1.files[dir] # So we can check later if anything is left in image2.files # if there is anything left in image2.files, it wasn't in image1.files # Close output handle if file # Clean up any temp directories | 2.506994 | 3 |
multiTaskLearning/stl.py | liuvictoria/multiTaskLearning | 0 | 6612967 | """Docstring for stl.py
This is the user-facing module for STL training.
Run this module from the command line, and pass in the
appropriate arguments to define the model and data.
"""
import argparse
import json
import os
import numpy as np
import torch
import torch.nn as nn
from torch.utils.tensorboard import SummaryWriter
from dloader import genDataLoader
from wrappers import single_task_trainer
"""
=========== Model ============
"""
from models import STL_VarNet
"""
=========== command line parser ============
"""
# command line argument parser
parser = argparse.ArgumentParser(
description = 'define parameters and roots for STL training'
)
# hyperparameters
parser.add_argument(
'--epochs', default=100, type=int,
help='number of epochs to run'
)
parser.add_argument(
'--lr', default=0.0002, type=float,
help='learning rate'
)
parser.add_argument(
'--gradaccumulation', default=1, type=int,
help='how many iterations per gradient accumulation; cannot be less than 1'
)
parser.add_argument(
'--gradaverage', default=0, type=int,
help="""if true, will average accumulated grads before optimizer.step
if false, gradaccumulation will occur without averaging (i.e. no hooks)
value does not matter if gradaccumulation is equal to 1"""
)
# model training
parser.add_argument(
'--numblocks', default=12, type=int,
help='number of unrolled blocks in total'
)
parser.add_argument(
'--network', default='varnet',
help='type of network ie unet or varnet'
)
parser.add_argument(
'--weightsdir', default=None,
help="""for transfer learning, give directory for loading weights;
i.e. '/mnt/dense/vliu/summer_runs_models/models/STL_baselines/STL_nojoint_varnet_div_coronal_pd/N=481_l1.pt'
default to None because we're not usually doing transfer
"""
)
parser.add_argument(
'--device', default='cuda:2',
help='cuda:2 device default'
)
# dataset properties
parser.add_argument(
'--datadir', default='/mnt/dense/vliu/summer_dset/',
help='data root directory; where are datasets contained'
)
parser.add_argument(
'--mixeddata', default=1, type=int,
help="""If true, the model trained on mixed data;
almost always true except for STL trained on single task;
0 for False; 1 for True"""
)
parser.add_argument(
'--datasets', nargs='+',
help='names of one or two sets of data files i.e. div_coronal_pd_fs div_coronal_pd; input the downsampled dataset first',
required = True
)
parser.add_argument(
'--bothdatasets', default = [
'div_coronal_pd_fs', 'div_coronal_pd',
],
nargs='+',
help="""names of both datasets i.e. div_coronal_pd_fs div_coronal_pd;
different from opt.datasets if opt.datasets only has one dataset;
used to determine tensorboard MR images;
this is annoying but allows for MR image to match across runs
input the downsampled dataset first"""
)
parser.add_argument(
'--scarcities', default=[0, 1, 2, 3], type=int, nargs='+',
help='number of samples in second task will be decreased by 1/2^N; i.e. 0 1 2'
)
parser.add_argument(
'--accelerations', default=[5, 6, 7], type=int, nargs='+',
help='list of undersampling factor of k-space for training; validation is average acceleration '
)
parser.add_argument(
'--centerfracs', default=[0.05, 0.06, 0.07], type=int, nargs='+',
help='list of center fractions sampled of k-space for training; val is average centerfracs'
)
parser.add_argument(
'--stratified', default=0, type=int,
help="""if true, stratifies the dataloader"""
)
parser.add_argument(
'--stratifymethod', default='upsample',
help="""
one of [upsample, downsample] for
scarce, abundant dataset, respectively
does not matter if --stratified is false"""
)
parser.add_argument(
'--numworkers', default=16, type=int,
help='number of workers for PyTorch dataloader'
)
# save / display data
parser.add_argument(
'--experimentname', default='unnamed_experiment',
help='experiment name i.e. STL or MTAN_pareto etc.'
)
parser.add_argument(
'--verbose', default=1, type=int,
help="""if true, prints to console average costs / metrics"""
)
parser.add_argument(
'--tensorboard', default=1, type=int,
help='if true, creates TensorBoard'
)
parser.add_argument(
'--savefreq', default=10, type=int,
help='how many epochs per saved recon image'
)
opt = parser.parse_args()
"""
=========== Runs ============
"""
def main(opt):
"""Calls wrappers.py for training
Creates data loaders, initializes model, and defines learning parameters.
Trains STL from command line.
Parameters
----------
opt : argparse.ArgumentParser
Refer to help documentation above.
Returns
-------
None
See Also
--------
single_task_trainer from wrappers.py
"""
basedirs = [
os.path.join(opt.datadir, dataset)
for dataset in opt.datasets
]
for scarcity in opt.scarcities:
print(f'experiment w scarcity {scarcity}')
train_dloader = genDataLoader(
[f'{basedir}/Train' for basedir in basedirs], # choose randomly
[scarcity, 0], # downsample
center_fractions = opt.centerfracs,
accelerations = opt.accelerations,
shuffle = True,
num_workers= opt.numworkers,
)
val_dloader = genDataLoader(
[f'{basedir}/Val' for basedir in basedirs],
[0, 0], # no downsampling
center_fractions = [np.mean(opt.centerfracs)],
accelerations = [int(np.mean(opt.accelerations))],
shuffle = False, # no shuffling to allow visualization
num_workers= opt.numworkers,
)
print('generated dataloaders')
# other inputs to STL wrapper
device = torch.device(opt.device if torch.cuda.is_available() else "cpu")
varnet = STL_VarNet(opt.numblocks).to(device)
# load weights if doing transfer learning
if opt.weightsdir:
varnet.load_state_dict(torch.load(
opt.weightsdir,
map_location = device,
)
)
optimizer = torch.optim.Adam(varnet.parameters(),lr = opt.lr)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=200, gamma=0.5)
print('start training')
single_task_trainer(
train_dloader[0], val_dloader[0],
train_dloader[1], val_dloader[1], # ratios dicts
varnet, device, writer_tensorboard,
optimizer, scheduler,
opt,
)
if __name__ == '__main__':
# run / model name
run_name = f"runs/{opt.experimentname}_{opt.network}_{'_'.join(opt.datasets)}/"
model_name = f"models/{opt.experimentname}_" + \
f"{'strat_' if opt.stratified else ''}" + \
f"{opt.network}_{'_'.join(opt.datasets)}/"
if not os.path.isdir(model_name):
os.makedirs(model_name)
writer_tensorboard = SummaryWriter(log_dir = run_name)
# write json files to models and runs directories; for future reference
with open(
os.path.join(run_name,'parameters.json'), 'w'
) as parameter_file:
json.dump(vars(opt), parameter_file)
with open(
os.path.join(model_name,'parameters.json'), 'w'
) as parameter_file:
json.dump(vars(opt), parameter_file)
main(opt)
writer_tensorboard.flush()
writer_tensorboard.close()
| """Docstring for stl.py
This is the user-facing module for STL training.
Run this module from the command line, and pass in the
appropriate arguments to define the model and data.
"""
import argparse
import json
import os
import numpy as np
import torch
import torch.nn as nn
from torch.utils.tensorboard import SummaryWriter
from dloader import genDataLoader
from wrappers import single_task_trainer
"""
=========== Model ============
"""
from models import STL_VarNet
"""
=========== command line parser ============
"""
# command line argument parser
parser = argparse.ArgumentParser(
description = 'define parameters and roots for STL training'
)
# hyperparameters
parser.add_argument(
'--epochs', default=100, type=int,
help='number of epochs to run'
)
parser.add_argument(
'--lr', default=0.0002, type=float,
help='learning rate'
)
parser.add_argument(
'--gradaccumulation', default=1, type=int,
help='how many iterations per gradient accumulation; cannot be less than 1'
)
parser.add_argument(
'--gradaverage', default=0, type=int,
help="""if true, will average accumulated grads before optimizer.step
if false, gradaccumulation will occur without averaging (i.e. no hooks)
value does not matter if gradaccumulation is equal to 1"""
)
# model training
parser.add_argument(
'--numblocks', default=12, type=int,
help='number of unrolled blocks in total'
)
parser.add_argument(
'--network', default='varnet',
help='type of network ie unet or varnet'
)
parser.add_argument(
'--weightsdir', default=None,
help="""for transfer learning, give directory for loading weights;
i.e. '/mnt/dense/vliu/summer_runs_models/models/STL_baselines/STL_nojoint_varnet_div_coronal_pd/N=481_l1.pt'
default to None because we're not usually doing transfer
"""
)
parser.add_argument(
'--device', default='cuda:2',
help='cuda:2 device default'
)
# dataset properties
parser.add_argument(
'--datadir', default='/mnt/dense/vliu/summer_dset/',
help='data root directory; where are datasets contained'
)
parser.add_argument(
'--mixeddata', default=1, type=int,
help="""If true, the model trained on mixed data;
almost always true except for STL trained on single task;
0 for False; 1 for True"""
)
parser.add_argument(
'--datasets', nargs='+',
help='names of one or two sets of data files i.e. div_coronal_pd_fs div_coronal_pd; input the downsampled dataset first',
required = True
)
parser.add_argument(
'--bothdatasets', default = [
'div_coronal_pd_fs', 'div_coronal_pd',
],
nargs='+',
help="""names of both datasets i.e. div_coronal_pd_fs div_coronal_pd;
different from opt.datasets if opt.datasets only has one dataset;
used to determine tensorboard MR images;
this is annoying but allows for MR image to match across runs
input the downsampled dataset first"""
)
parser.add_argument(
'--scarcities', default=[0, 1, 2, 3], type=int, nargs='+',
help='number of samples in second task will be decreased by 1/2^N; i.e. 0 1 2'
)
parser.add_argument(
'--accelerations', default=[5, 6, 7], type=int, nargs='+',
help='list of undersampling factor of k-space for training; validation is average acceleration '
)
parser.add_argument(
'--centerfracs', default=[0.05, 0.06, 0.07], type=int, nargs='+',
help='list of center fractions sampled of k-space for training; val is average centerfracs'
)
parser.add_argument(
'--stratified', default=0, type=int,
help="""if true, stratifies the dataloader"""
)
parser.add_argument(
'--stratifymethod', default='upsample',
help="""
one of [upsample, downsample] for
scarce, abundant dataset, respectively
does not matter if --stratified is false"""
)
parser.add_argument(
'--numworkers', default=16, type=int,
help='number of workers for PyTorch dataloader'
)
# save / display data
parser.add_argument(
'--experimentname', default='unnamed_experiment',
help='experiment name i.e. STL or MTAN_pareto etc.'
)
parser.add_argument(
'--verbose', default=1, type=int,
help="""if true, prints to console average costs / metrics"""
)
parser.add_argument(
'--tensorboard', default=1, type=int,
help='if true, creates TensorBoard'
)
parser.add_argument(
'--savefreq', default=10, type=int,
help='how many epochs per saved recon image'
)
opt = parser.parse_args()
"""
=========== Runs ============
"""
def main(opt):
"""Calls wrappers.py for training
Creates data loaders, initializes model, and defines learning parameters.
Trains STL from command line.
Parameters
----------
opt : argparse.ArgumentParser
Refer to help documentation above.
Returns
-------
None
See Also
--------
single_task_trainer from wrappers.py
"""
basedirs = [
os.path.join(opt.datadir, dataset)
for dataset in opt.datasets
]
for scarcity in opt.scarcities:
print(f'experiment w scarcity {scarcity}')
train_dloader = genDataLoader(
[f'{basedir}/Train' for basedir in basedirs], # choose randomly
[scarcity, 0], # downsample
center_fractions = opt.centerfracs,
accelerations = opt.accelerations,
shuffle = True,
num_workers= opt.numworkers,
)
val_dloader = genDataLoader(
[f'{basedir}/Val' for basedir in basedirs],
[0, 0], # no downsampling
center_fractions = [np.mean(opt.centerfracs)],
accelerations = [int(np.mean(opt.accelerations))],
shuffle = False, # no shuffling to allow visualization
num_workers= opt.numworkers,
)
print('generated dataloaders')
# other inputs to STL wrapper
device = torch.device(opt.device if torch.cuda.is_available() else "cpu")
varnet = STL_VarNet(opt.numblocks).to(device)
# load weights if doing transfer learning
if opt.weightsdir:
varnet.load_state_dict(torch.load(
opt.weightsdir,
map_location = device,
)
)
optimizer = torch.optim.Adam(varnet.parameters(),lr = opt.lr)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=200, gamma=0.5)
print('start training')
single_task_trainer(
train_dloader[0], val_dloader[0],
train_dloader[1], val_dloader[1], # ratios dicts
varnet, device, writer_tensorboard,
optimizer, scheduler,
opt,
)
if __name__ == '__main__':
# run / model name
run_name = f"runs/{opt.experimentname}_{opt.network}_{'_'.join(opt.datasets)}/"
model_name = f"models/{opt.experimentname}_" + \
f"{'strat_' if opt.stratified else ''}" + \
f"{opt.network}_{'_'.join(opt.datasets)}/"
if not os.path.isdir(model_name):
os.makedirs(model_name)
writer_tensorboard = SummaryWriter(log_dir = run_name)
# write json files to models and runs directories; for future reference
with open(
os.path.join(run_name,'parameters.json'), 'w'
) as parameter_file:
json.dump(vars(opt), parameter_file)
with open(
os.path.join(model_name,'parameters.json'), 'w'
) as parameter_file:
json.dump(vars(opt), parameter_file)
main(opt)
writer_tensorboard.flush()
writer_tensorboard.close()
| en | 0.701136 | Docstring for stl.py This is the user-facing module for STL training. Run this module from the command line, and pass in the appropriate arguments to define the model and data. =========== Model ============ =========== command line parser ============ # command line argument parser # hyperparameters if true, will average accumulated grads before optimizer.step if false, gradaccumulation will occur without averaging (i.e. no hooks) value does not matter if gradaccumulation is equal to 1 # model training for transfer learning, give directory for loading weights; i.e. '/mnt/dense/vliu/summer_runs_models/models/STL_baselines/STL_nojoint_varnet_div_coronal_pd/N=481_l1.pt' default to None because we're not usually doing transfer # dataset properties If true, the model trained on mixed data; almost always true except for STL trained on single task; 0 for False; 1 for True names of both datasets i.e. div_coronal_pd_fs div_coronal_pd; different from opt.datasets if opt.datasets only has one dataset; used to determine tensorboard MR images; this is annoying but allows for MR image to match across runs input the downsampled dataset first if true, stratifies the dataloader one of [upsample, downsample] for scarce, abundant dataset, respectively does not matter if --stratified is false # save / display data if true, prints to console average costs / metrics =========== Runs ============ Calls wrappers.py for training Creates data loaders, initializes model, and defines learning parameters. Trains STL from command line. Parameters ---------- opt : argparse.ArgumentParser Refer to help documentation above. Returns ------- None See Also -------- single_task_trainer from wrappers.py # choose randomly # downsample # no downsampling # no shuffling to allow visualization # other inputs to STL wrapper # load weights if doing transfer learning # ratios dicts # run / model name # write json files to models and runs directories; for future reference | 2.608317 | 3 |
accounts/urls.py | Cep08751610/ANALYTICS | 0 | 6612968 | <gh_stars>0
from django.urls import path
from . import views
app_name = "accounts"
urlpatterns = [
path('login-usuario/', views.user_login, name="user_login"),
path('logout-usuario/', views.user_logout, name="user_logout"),
] | from django.urls import path
from . import views
app_name = "accounts"
urlpatterns = [
path('login-usuario/', views.user_login, name="user_login"),
path('logout-usuario/', views.user_logout, name="user_logout"),
] | none | 1 | 1.759883 | 2 | |
mongo_test/utils/models/cluster.py | Vuong02011996/data_base_test | 0 | 6612969 | <filename>mongo_test/utils/models/cluster.py<gh_stars>0
from mongoengine import (
DateTimeField,
Document,
LazyReferenceField,
)
from datetime import datetime
class Cluster(Document):
identity = LazyReferenceField("Identity")
created_at = DateTimeField(default=datetime.utcnow, required=True)
meta = {"collection": "clusters"}
| <filename>mongo_test/utils/models/cluster.py<gh_stars>0
from mongoengine import (
DateTimeField,
Document,
LazyReferenceField,
)
from datetime import datetime
class Cluster(Document):
identity = LazyReferenceField("Identity")
created_at = DateTimeField(default=datetime.utcnow, required=True)
meta = {"collection": "clusters"}
| none | 1 | 1.834981 | 2 | |
asyncio/plugin_percpu.py | nicolargo/glancesarena | 0 | 6612970 | #!/usr/bin/env python3
from plugin import GlancesPlugin
class PerCpu(GlancesPlugin):
"""CPU (per core) plugin
Stat example:
[{'cpu_percent': 14.9, 'user': 13.9, 'nice': 0.0, 'system': 1.0,
'idle': 85.1, 'iowait': 0.0, 'irq': 0.0, 'softirq': 0.0,
'steal': 0.0, 'guest': 0.0, 'guest_nice': 0.0}, ... ]
"""
def __init__(self):
super(PerCpu, self).__init__()
# Init the stats
self.args['psutil_fct'] = [{'name': 'cpu_percent', 'args': {'percpu': True, 'interval': 0.0}},
{'name': 'cpu_times_percent', 'args': {'percpu': True, 'interval': 0.0}}]
# Init the view layout
# user system idle iowait steal
self.args['view_layout'] = {
# We will iterate the second line (index of first line is 0)
'line_to_iter': 1,
'columns': [
# First column
{
'lines': [['user'],
['{user}']],
},
# Second column
{
'lines': [['system'],
['{system}']],
},
# Third column
{
'lines': [['idle'],
['{idle}']],
},
# Fourth column
{
'lines': [['iowait'],
['{iowait}']],
},
# Fifth column
{
'lines': [['steal'],
['{steal}']],
}
]
}
percpu = PerCpu()
| #!/usr/bin/env python3
from plugin import GlancesPlugin
class PerCpu(GlancesPlugin):
"""CPU (per core) plugin
Stat example:
[{'cpu_percent': 14.9, 'user': 13.9, 'nice': 0.0, 'system': 1.0,
'idle': 85.1, 'iowait': 0.0, 'irq': 0.0, 'softirq': 0.0,
'steal': 0.0, 'guest': 0.0, 'guest_nice': 0.0}, ... ]
"""
def __init__(self):
super(PerCpu, self).__init__()
# Init the stats
self.args['psutil_fct'] = [{'name': 'cpu_percent', 'args': {'percpu': True, 'interval': 0.0}},
{'name': 'cpu_times_percent', 'args': {'percpu': True, 'interval': 0.0}}]
# Init the view layout
# user system idle iowait steal
self.args['view_layout'] = {
# We will iterate the second line (index of first line is 0)
'line_to_iter': 1,
'columns': [
# First column
{
'lines': [['user'],
['{user}']],
},
# Second column
{
'lines': [['system'],
['{system}']],
},
# Third column
{
'lines': [['idle'],
['{idle}']],
},
# Fourth column
{
'lines': [['iowait'],
['{iowait}']],
},
# Fifth column
{
'lines': [['steal'],
['{steal}']],
}
]
}
percpu = PerCpu()
| en | 0.375866 | #!/usr/bin/env python3 CPU (per core) plugin Stat example: [{'cpu_percent': 14.9, 'user': 13.9, 'nice': 0.0, 'system': 1.0, 'idle': 85.1, 'iowait': 0.0, 'irq': 0.0, 'softirq': 0.0, 'steal': 0.0, 'guest': 0.0, 'guest_nice': 0.0}, ... ] # Init the stats # Init the view layout # user system idle iowait steal # We will iterate the second line (index of first line is 0) # First column # Second column # Third column # Fourth column # Fifth column | 2.411967 | 2 |
idl/Typedef.py | spiricn/libIDL | 0 | 6612971 | <filename>idl/Typedef.py
from idl.Type import Type
class Typedef(Type):
def __init__(self, module, desc):
Type.__init__(self, module, Type.TYPEDEF, desc.typeName)
| <filename>idl/Typedef.py
from idl.Type import Type
class Typedef(Type):
def __init__(self, module, desc):
Type.__init__(self, module, Type.TYPEDEF, desc.typeName)
| none | 1 | 2.046517 | 2 | |
src/preproc.py | pengbohua/age-gender | 2 | 6612972 | <filename>src/preproc.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange
from datetime import datetime
import os
import random
import sys
import threading
import numpy as np
import tensorflow as tf
import json
RESIZE_HEIGHT = 256
RESIZE_WIDTH = 256
tf.app.flags.DEFINE_string('fold_dir', '/home/dpressel/dev/work/AgeGenderDeepLearning/Folds/train_val_txt_files_per_fold/test_fold_is_0',
'Fold directory')
tf.app.flags.DEFINE_string('data_dir', '/data/xdata/age-gender/aligned',
'Data directory')
tf.app.flags.DEFINE_string('output_dir', '/home/dpressel/dev/work/AgeGenderDeepLearning/Folds/tf/test_fold_is_0',
'Output directory')
tf.app.flags.DEFINE_string('train_list', 'age_train.txt',
'Training list')
tf.app.flags.DEFINE_string('valid_list', 'age_val.txt',
'Test list')
tf.app.flags.DEFINE_integer('train_shards', 10,
'Number of shards in training TFRecord files.')
tf.app.flags.DEFINE_integer('valid_shards', 2,
'Number of shards in validation TFRecord files.')
tf.app.flags.DEFINE_integer('num_threads', 2,
'Number of threads to preprocess the images.')
FLAGS = tf.app.flags.FLAGS
def _int64_feature(value):
"""Wrapper for inserting int64 features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _convert_to_example(filename, image_buffer, label, height, width):
"""Build an Example proto for an example.
Args:
filename: string, path to an image file, e.g., '/path/to/example.JPG'
image_buffer: string, JPEG encoding of RGB image
label: integer, identifier for the ground truth for the network
height: integer, image height in pixels
width: integer, image width in pixels
Returns:
Example proto
"""
example = tf.train.Example(features=tf.train.Features(feature={
'image/class/label': _int64_feature(label),
'image/filename': _bytes_feature(str.encode(os.path.basename(filename))),
'image/encoded': _bytes_feature(image_buffer),
'image/height': _int64_feature(height),
'image/width': _int64_feature(width)
}))
return example
class ImageCoder(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self):
# Create a single Session to run all image coding calls.
self._sess = tf.Session()
# Initializes function that converts PNG to JPEG data.
self._png_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_png(self._png_data, channels=3)
self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that decodes RGB JPEG data.
self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)
cropped = tf.image.resize_images(self._decode_jpeg, [RESIZE_HEIGHT, RESIZE_WIDTH])
cropped = tf.cast(cropped, tf.uint8)
self._recoded = tf.image.encode_jpeg(cropped, format='rgb', quality=100)
def png_to_jpeg(self, image_data):
return self._sess.run(self._png_to_jpeg,
feed_dict={self._png_data: image_data})
def resample_jpeg(self, image_data):
image = self._sess.run(self._recoded, #self._decode_jpeg,
feed_dict={self._decode_jpeg_data: image_data})
return image
def _is_png(filename):
"""Determine if a file contains a PNG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a PNG.
"""
return '.png' in filename
def _process_image(filename, coder):
"""Process a single image file.
Args:
filename: string, path to an image file e.g., '/path/to/example.JPG'.
coder: instance of ImageCoder to provide TensorFlow image coding utils.
Returns:
image_buffer: string, JPEG encoding of RGB image.
height: integer, image height in pixels.
width: integer, image width in pixels.
"""
# Read the image file.
with tf.gfile.FastGFile(filename, 'rb') as f:
image_data = f.read()
# Convert any PNG to JPEG's for consistency.
if _is_png(filename):
print('Converting PNG to JPEG for %s' % filename)
image_data = coder.png_to_jpeg(image_data)
# Decode the RGB JPEG.
image = coder.resample_jpeg(image_data)
return image, RESIZE_HEIGHT, RESIZE_WIDTH
def _process_image_files_batch(coder, thread_index, ranges, name, filenames,
labels, num_shards):
"""Processes and saves list of images as TFRecord in 1 thread.
Args:
coder: instance of ImageCoder to provide TensorFlow image coding utils.
thread_index: integer, unique batch to run index is within [0, len(ranges)).
ranges: list of pairs of integers specifying ranges of each batches to
analyze in parallel.
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
labels: list of integer; each integer identifies the ground truth
num_shards: integer number of shards for this data set.
"""
# Each thread produces N shards where N = int(num_shards / num_threads).
# For instance, if num_shards = 128, and the num_threads = 2, then the first
# thread would produce shards [0, 64).
num_threads = len(ranges)
assert not num_shards % num_threads
num_shards_per_batch = int(num_shards / num_threads)
shard_ranges = np.linspace(ranges[thread_index][0],
ranges[thread_index][1],
num_shards_per_batch + 1).astype(int)
num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]
counter = 0
for s in xrange(num_shards_per_batch):
# Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
shard = thread_index * num_shards_per_batch + s
output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards)
output_file = os.path.join(FLAGS.output_dir, output_filename)
writer = tf.python_io.TFRecordWriter(output_file)
shard_counter = 0
files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)
for i in files_in_shard:
filename = filenames[i]
label = int(labels[i])
image_buffer, height, width = _process_image(filename, coder)
example = _convert_to_example(filename, image_buffer, label,
height, width)
writer.write(example.SerializeToString())
shard_counter += 1
counter += 1
if not counter % 1000:
print('%s [thread %d]: Processed %d of %d images in thread batch.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
writer.close()
print('%s [thread %d]: Wrote %d images to %s' %
(datetime.now(), thread_index, shard_counter, output_file))
sys.stdout.flush()
shard_counter = 0
print('%s [thread %d]: Wrote %d images to %d shards.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
def _process_image_files(name, filenames, labels, num_shards):
"""Process and save list of images as TFRecord of Example protos.
Args:
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
labels: list of integer; each integer identifies the ground truth
num_shards: integer number of shards for this data set.
"""
assert len(filenames) == len(labels)
# Break all images into batches with a [ranges[i][0], ranges[i][1]].
spacing = np.linspace(0, len(filenames), FLAGS.num_threads + 1).astype(np.int)
ranges = []
threads = []
for i in xrange(len(spacing) - 1):
ranges.append([spacing[i], spacing[i+1]])
# Launch a thread for each batch.
print('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges))
sys.stdout.flush()
# Create a mechanism for monitoring when all threads are finished.
coord = tf.train.Coordinator()
coder = ImageCoder()
threads = []
for thread_index in xrange(len(ranges)):
args = (coder, thread_index, ranges, name, filenames, labels, num_shards)
t = threading.Thread(target=_process_image_files_batch, args=args)
t.start()
threads.append(t)
# Wait for all the threads to terminate.
coord.join(threads)
print('%s: Finished writing all %d images in data set.' %
(datetime.now(), len(filenames)))
sys.stdout.flush()
def _find_image_files(list_file, data_dir):
print('Determining list of input files and labels from %s.' % list_file)
files_labels = [l.strip().split(' ') for l in tf.gfile.FastGFile(
list_file, 'r').readlines()]
labels = []
filenames = []
# Leave label index 0 empty as a background class.
label_index = 1
# Construct the list of JPEG files and labels.
for path, label in files_labels:
jpeg_file_path = '%s/%s' % (data_dir, path)
if os.path.exists(jpeg_file_path):
filenames.append(jpeg_file_path)
labels.append(label)
unique_labels = set(labels)
# Shuffle the ordering of all image files in order to guarantee
# random ordering of the images with respect to label in the
# saved TFRecord files. Make the randomization repeatable.
shuffled_index = list(range(len(filenames)))
random.seed(12345)
random.shuffle(shuffled_index)
filenames = [filenames[i] for i in shuffled_index]
labels = [labels[i] for i in shuffled_index]
print('Found %d JPEG files across %d labels inside %s.' %
(len(filenames), len(unique_labels), data_dir))
return filenames, labels
def _process_dataset(name, filename, directory, num_shards):
"""Process a complete data set and save it as a TFRecord.
Args:
name: string, unique identifier specifying the data set.
directory: string, root path to the data set.
num_shards: integer number of shards for this data set.
labels_file: string, path to the labels file.
"""
filenames, labels = _find_image_files(filename, directory)
_process_image_files(name, filenames, labels, num_shards)
unique_labels = set(labels)
return len(labels), unique_labels
def main(unused_argv):
assert not FLAGS.train_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with FLAGS.train_shards')
assert not FLAGS.valid_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with '
'FLAGS.valid_shards')
print('Saving results to %s' % FLAGS.output_dir)
if os.path.exists(FLAGS.output_dir) is False:
print('creating %s' % FLAGS.output_dir)
os.makedirs(FLAGS.output_dir)
# Run it!
valid, valid_outcomes = _process_dataset('validation', '%s/%s' % (FLAGS.fold_dir, FLAGS.valid_list), FLAGS.data_dir,
FLAGS.valid_shards)
train, train_outcomes = _process_dataset('train', '%s/%s' % (FLAGS.fold_dir, FLAGS.train_list), FLAGS.data_dir,
FLAGS.train_shards)
if len(valid_outcomes) != len(valid_outcomes | train_outcomes):
print('Warning: unattested labels in training data [%s]' % (', '.join((valid_outcomes | train_outcomes) - valid_outcomes)))
output_file = os.path.join(FLAGS.output_dir, 'md.json')
md = { 'num_valid_shards': FLAGS.valid_shards,
'num_train_shards': FLAGS.train_shards,
'valid_counts': valid,
'train_counts': train,
'timestamp': str(datetime.now()),
'nlabels': len(train_outcomes) }
with open(output_file, 'w') as f:
json.dump(md, f)
if __name__ == '__main__':
tf.app.run()
| <filename>src/preproc.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange
from datetime import datetime
import os
import random
import sys
import threading
import numpy as np
import tensorflow as tf
import json
RESIZE_HEIGHT = 256
RESIZE_WIDTH = 256
tf.app.flags.DEFINE_string('fold_dir', '/home/dpressel/dev/work/AgeGenderDeepLearning/Folds/train_val_txt_files_per_fold/test_fold_is_0',
'Fold directory')
tf.app.flags.DEFINE_string('data_dir', '/data/xdata/age-gender/aligned',
'Data directory')
tf.app.flags.DEFINE_string('output_dir', '/home/dpressel/dev/work/AgeGenderDeepLearning/Folds/tf/test_fold_is_0',
'Output directory')
tf.app.flags.DEFINE_string('train_list', 'age_train.txt',
'Training list')
tf.app.flags.DEFINE_string('valid_list', 'age_val.txt',
'Test list')
tf.app.flags.DEFINE_integer('train_shards', 10,
'Number of shards in training TFRecord files.')
tf.app.flags.DEFINE_integer('valid_shards', 2,
'Number of shards in validation TFRecord files.')
tf.app.flags.DEFINE_integer('num_threads', 2,
'Number of threads to preprocess the images.')
FLAGS = tf.app.flags.FLAGS
def _int64_feature(value):
"""Wrapper for inserting int64 features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _convert_to_example(filename, image_buffer, label, height, width):
"""Build an Example proto for an example.
Args:
filename: string, path to an image file, e.g., '/path/to/example.JPG'
image_buffer: string, JPEG encoding of RGB image
label: integer, identifier for the ground truth for the network
height: integer, image height in pixels
width: integer, image width in pixels
Returns:
Example proto
"""
example = tf.train.Example(features=tf.train.Features(feature={
'image/class/label': _int64_feature(label),
'image/filename': _bytes_feature(str.encode(os.path.basename(filename))),
'image/encoded': _bytes_feature(image_buffer),
'image/height': _int64_feature(height),
'image/width': _int64_feature(width)
}))
return example
class ImageCoder(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self):
# Create a single Session to run all image coding calls.
self._sess = tf.Session()
# Initializes function that converts PNG to JPEG data.
self._png_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_png(self._png_data, channels=3)
self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that decodes RGB JPEG data.
self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)
cropped = tf.image.resize_images(self._decode_jpeg, [RESIZE_HEIGHT, RESIZE_WIDTH])
cropped = tf.cast(cropped, tf.uint8)
self._recoded = tf.image.encode_jpeg(cropped, format='rgb', quality=100)
def png_to_jpeg(self, image_data):
return self._sess.run(self._png_to_jpeg,
feed_dict={self._png_data: image_data})
def resample_jpeg(self, image_data):
image = self._sess.run(self._recoded, #self._decode_jpeg,
feed_dict={self._decode_jpeg_data: image_data})
return image
def _is_png(filename):
"""Determine if a file contains a PNG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a PNG.
"""
return '.png' in filename
def _process_image(filename, coder):
"""Process a single image file.
Args:
filename: string, path to an image file e.g., '/path/to/example.JPG'.
coder: instance of ImageCoder to provide TensorFlow image coding utils.
Returns:
image_buffer: string, JPEG encoding of RGB image.
height: integer, image height in pixels.
width: integer, image width in pixels.
"""
# Read the image file.
with tf.gfile.FastGFile(filename, 'rb') as f:
image_data = f.read()
# Convert any PNG to JPEG's for consistency.
if _is_png(filename):
print('Converting PNG to JPEG for %s' % filename)
image_data = coder.png_to_jpeg(image_data)
# Decode the RGB JPEG.
image = coder.resample_jpeg(image_data)
return image, RESIZE_HEIGHT, RESIZE_WIDTH
def _process_image_files_batch(coder, thread_index, ranges, name, filenames,
labels, num_shards):
"""Processes and saves list of images as TFRecord in 1 thread.
Args:
coder: instance of ImageCoder to provide TensorFlow image coding utils.
thread_index: integer, unique batch to run index is within [0, len(ranges)).
ranges: list of pairs of integers specifying ranges of each batches to
analyze in parallel.
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
labels: list of integer; each integer identifies the ground truth
num_shards: integer number of shards for this data set.
"""
# Each thread produces N shards where N = int(num_shards / num_threads).
# For instance, if num_shards = 128, and the num_threads = 2, then the first
# thread would produce shards [0, 64).
num_threads = len(ranges)
assert not num_shards % num_threads
num_shards_per_batch = int(num_shards / num_threads)
shard_ranges = np.linspace(ranges[thread_index][0],
ranges[thread_index][1],
num_shards_per_batch + 1).astype(int)
num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]
counter = 0
for s in xrange(num_shards_per_batch):
# Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
shard = thread_index * num_shards_per_batch + s
output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards)
output_file = os.path.join(FLAGS.output_dir, output_filename)
writer = tf.python_io.TFRecordWriter(output_file)
shard_counter = 0
files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)
for i in files_in_shard:
filename = filenames[i]
label = int(labels[i])
image_buffer, height, width = _process_image(filename, coder)
example = _convert_to_example(filename, image_buffer, label,
height, width)
writer.write(example.SerializeToString())
shard_counter += 1
counter += 1
if not counter % 1000:
print('%s [thread %d]: Processed %d of %d images in thread batch.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
writer.close()
print('%s [thread %d]: Wrote %d images to %s' %
(datetime.now(), thread_index, shard_counter, output_file))
sys.stdout.flush()
shard_counter = 0
print('%s [thread %d]: Wrote %d images to %d shards.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
def _process_image_files(name, filenames, labels, num_shards):
"""Process and save list of images as TFRecord of Example protos.
Args:
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
labels: list of integer; each integer identifies the ground truth
num_shards: integer number of shards for this data set.
"""
assert len(filenames) == len(labels)
# Break all images into batches with a [ranges[i][0], ranges[i][1]].
spacing = np.linspace(0, len(filenames), FLAGS.num_threads + 1).astype(np.int)
ranges = []
threads = []
for i in xrange(len(spacing) - 1):
ranges.append([spacing[i], spacing[i+1]])
# Launch a thread for each batch.
print('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges))
sys.stdout.flush()
# Create a mechanism for monitoring when all threads are finished.
coord = tf.train.Coordinator()
coder = ImageCoder()
threads = []
for thread_index in xrange(len(ranges)):
args = (coder, thread_index, ranges, name, filenames, labels, num_shards)
t = threading.Thread(target=_process_image_files_batch, args=args)
t.start()
threads.append(t)
# Wait for all the threads to terminate.
coord.join(threads)
print('%s: Finished writing all %d images in data set.' %
(datetime.now(), len(filenames)))
sys.stdout.flush()
def _find_image_files(list_file, data_dir):
print('Determining list of input files and labels from %s.' % list_file)
files_labels = [l.strip().split(' ') for l in tf.gfile.FastGFile(
list_file, 'r').readlines()]
labels = []
filenames = []
# Leave label index 0 empty as a background class.
label_index = 1
# Construct the list of JPEG files and labels.
for path, label in files_labels:
jpeg_file_path = '%s/%s' % (data_dir, path)
if os.path.exists(jpeg_file_path):
filenames.append(jpeg_file_path)
labels.append(label)
unique_labels = set(labels)
# Shuffle the ordering of all image files in order to guarantee
# random ordering of the images with respect to label in the
# saved TFRecord files. Make the randomization repeatable.
shuffled_index = list(range(len(filenames)))
random.seed(12345)
random.shuffle(shuffled_index)
filenames = [filenames[i] for i in shuffled_index]
labels = [labels[i] for i in shuffled_index]
print('Found %d JPEG files across %d labels inside %s.' %
(len(filenames), len(unique_labels), data_dir))
return filenames, labels
def _process_dataset(name, filename, directory, num_shards):
"""Process a complete data set and save it as a TFRecord.
Args:
name: string, unique identifier specifying the data set.
directory: string, root path to the data set.
num_shards: integer number of shards for this data set.
labels_file: string, path to the labels file.
"""
filenames, labels = _find_image_files(filename, directory)
_process_image_files(name, filenames, labels, num_shards)
unique_labels = set(labels)
return len(labels), unique_labels
def main(unused_argv):
assert not FLAGS.train_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with FLAGS.train_shards')
assert not FLAGS.valid_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with '
'FLAGS.valid_shards')
print('Saving results to %s' % FLAGS.output_dir)
if os.path.exists(FLAGS.output_dir) is False:
print('creating %s' % FLAGS.output_dir)
os.makedirs(FLAGS.output_dir)
# Run it!
valid, valid_outcomes = _process_dataset('validation', '%s/%s' % (FLAGS.fold_dir, FLAGS.valid_list), FLAGS.data_dir,
FLAGS.valid_shards)
train, train_outcomes = _process_dataset('train', '%s/%s' % (FLAGS.fold_dir, FLAGS.train_list), FLAGS.data_dir,
FLAGS.train_shards)
if len(valid_outcomes) != len(valid_outcomes | train_outcomes):
print('Warning: unattested labels in training data [%s]' % (', '.join((valid_outcomes | train_outcomes) - valid_outcomes)))
output_file = os.path.join(FLAGS.output_dir, 'md.json')
md = { 'num_valid_shards': FLAGS.valid_shards,
'num_train_shards': FLAGS.train_shards,
'valid_counts': valid,
'train_counts': train,
'timestamp': str(datetime.now()),
'nlabels': len(train_outcomes) }
with open(output_file, 'w') as f:
json.dump(md, f)
if __name__ == '__main__':
tf.app.run()
| en | 0.717407 | Wrapper for inserting int64 features into Example proto. Wrapper for inserting bytes features into Example proto. Build an Example proto for an example. Args: filename: string, path to an image file, e.g., '/path/to/example.JPG' image_buffer: string, JPEG encoding of RGB image label: integer, identifier for the ground truth for the network height: integer, image height in pixels width: integer, image width in pixels Returns: Example proto Helper class that provides TensorFlow image coding utilities. # Create a single Session to run all image coding calls. # Initializes function that converts PNG to JPEG data. # Initializes function that decodes RGB JPEG data. #self._decode_jpeg, Determine if a file contains a PNG format image. Args: filename: string, path of the image file. Returns: boolean indicating if the image is a PNG. Process a single image file. Args: filename: string, path to an image file e.g., '/path/to/example.JPG'. coder: instance of ImageCoder to provide TensorFlow image coding utils. Returns: image_buffer: string, JPEG encoding of RGB image. height: integer, image height in pixels. width: integer, image width in pixels. # Read the image file. # Convert any PNG to JPEG's for consistency. # Decode the RGB JPEG. Processes and saves list of images as TFRecord in 1 thread. Args: coder: instance of ImageCoder to provide TensorFlow image coding utils. thread_index: integer, unique batch to run index is within [0, len(ranges)). ranges: list of pairs of integers specifying ranges of each batches to analyze in parallel. name: string, unique identifier specifying the data set filenames: list of strings; each string is a path to an image file labels: list of integer; each integer identifies the ground truth num_shards: integer number of shards for this data set. # Each thread produces N shards where N = int(num_shards / num_threads). # For instance, if num_shards = 128, and the num_threads = 2, then the first # thread would produce shards [0, 64). # Generate a sharded version of the file name, e.g. 'train-00002-of-00010' Process and save list of images as TFRecord of Example protos. Args: name: string, unique identifier specifying the data set filenames: list of strings; each string is a path to an image file labels: list of integer; each integer identifies the ground truth num_shards: integer number of shards for this data set. # Break all images into batches with a [ranges[i][0], ranges[i][1]]. # Launch a thread for each batch. # Create a mechanism for monitoring when all threads are finished. # Wait for all the threads to terminate. # Leave label index 0 empty as a background class. # Construct the list of JPEG files and labels. # Shuffle the ordering of all image files in order to guarantee # random ordering of the images with respect to label in the # saved TFRecord files. Make the randomization repeatable. Process a complete data set and save it as a TFRecord. Args: name: string, unique identifier specifying the data set. directory: string, root path to the data set. num_shards: integer number of shards for this data set. labels_file: string, path to the labels file. # Run it! | 1.999387 | 2 |
venv/lib/python3.8/site-packages/cryptography/hazmat/backends/openssl/dh.py | GiulianaPola/select_repeats | 2 | 6612973 | /home/runner/.cache/pip/pool/21/ba/d4/9081c03433cfa7a8c6f9469405b08168172c6eff9f6aae0bf3ab9ee7fb | /home/runner/.cache/pip/pool/21/ba/d4/9081c03433cfa7a8c6f9469405b08168172c6eff9f6aae0bf3ab9ee7fb | none | 1 | 0.831379 | 1 | |
local_cache.py | minh-t-nguyen/googlecrisismap | 40 | 6612974 | #!/usr/bin/python
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at: http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distrib-
# uted under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# specific language governing permissions and limitations under the License.
"""An application cache that uses only local RAM."""
import copy
import threading
import time
SWEEP_INTERVAL_SECONDS = 60
class _CacheEntry(object):
"""Entry to be stored in LocalCache."""
def __init__(self, value, expiry):
"""Cache Entry."""
self._value = copy.deepcopy(value)
self._expiry = expiry
@property
def value(self):
return copy.deepcopy(self._value)
@property
def expiry(self):
return self._expiry
class LocalCache(object):
"""A simple RAM cache that is similar to cache.py's Cache.
This is the backing store for cache.py's local cache, but can be used
directly as well. It isn't quite a drop-in replacement for cache.py, but is
close. Unlike cache.py:
- it is single tier and doesn't use memcache to share values between processes
- it doesn't take a namespace because the values are stored in the LocalCache
object instead of in the global memcache. If you need another namespace
just create another LocalCache.
- it doesn't have ull as that doesn't make sense in the local context.
- it doesn't support rate limiting or cache refreshing, as anything you'd want
to do that for, you should probably just use cache.py/memcache.
- it doesn't support make_value for Get, though that wouldn't be too hard to
implement when it's needed.
- it doesn't support Add. If you need Add you're probably trying to build a
lock and are better off using a real python threading.Lock.
"""
def __init__(self, ttl=0):
"""Constructor for LocalCache.
Args:
ttl: How long values should stay in cache. Default (0) is don't expire.
"""
self._cache = {} # key => _CacheEntry
self._ttl = ttl
self._sweep_lock = threading.Lock() # lock held while sweeping _cache
self._next_sweep_time = 0
def Clear(self):
"""Clear the state of this cache. For use in tests only."""
self._cache.clear()
def _Sweep(self):
"""Walk through all cache entries and delete any that are expired."""
now = time.time()
next_sweep_time_snapshot = self._next_sweep_time
if now >= next_sweep_time_snapshot and self._sweep_lock.acquire(False):
# Only one thread can advance next_sweep_time; that thread does the sweep.
try:
if self._next_sweep_time == next_sweep_time_snapshot:
# This thread got the lock first; proceed to sweep the cache.
self._next_sweep_time = now + SWEEP_INTERVAL_SECONDS
for key_json, entry in self._cache.items():
if 0 < entry.expiry < now:
# Use pop() instead of del because the item can be concurrently
# removed by Cache.Delete(), which doesn't hold _sweep_lock.
self._cache.pop(key_json, None)
finally:
self._sweep_lock.release()
def Get(self, key):
"""Get the value referenced by key. Returns None if it doesn't exist."""
v = self._cache.get(key)
if v and (v.expiry == 0 or time.time() < v.expiry):
return v.value
return None
def Set(self, key, value, ttl=None, expiry=None):
"""Set the key/value pair with the specified expiry.
The ttl and expiry are mutually exclusive. If you use neither, the cache
level ttl will be used. A ttl or expiry of 0 means don't expire.
Args:
key: The cache key.
value: The value to store in the cache. Must be picklable.
ttl: How long to keep this value, relative time in seconds.
expiry: When to expiry this value, absolute timestamp in seconds.
Returns:
True if it was stored, False otherwise.
Raises:
ValueError: ttl and expiry are mutually exclusive. ttl should be < 1 year.
"""
if ttl and expiry:
raise ValueError('Received ttl and expiry. Please only use one.')
if ttl > 365*86400:
raise ValueError('ttl > 1 year is likely intended as an expiry, not ttl.')
now = time.time()
if expiry is None:
if ttl is None:
ttl = self._ttl
expiry = ttl + now if ttl > 0 else 0
if expiry == 0 or now < expiry:
self._cache[key] = _CacheEntry(value, expiry)
self._Sweep()
return True
return False
def Delete(self, key):
"""Delete the entry referenced by key, if it exists."""
self._cache.pop(key, None)
def Add(self, key, value, expiry): # pylint:disable=unused-argument
# pylint: disable=g-doc-args
"""Would atomically add an element to the cache if it was implemented.
Implementing this correctly without locks seems hard or even impossible.
It would be easy to use self._cache.setdefault, but that ignores the case of
an expired entry. Both threads might try to remove the old entry but one can
succeed to remove the old and add the new just in time for the second thread
to remove the new and also think it successfully added a new entry. You
therefore need to hold the sweep_lock to do this correctly. You could argue
that this is correct and the value was just expired early, but that makes it
less useful as a lock, which is the usual use of Add. Arguably Set should
also use the lock, but that seems less important and then adds extra
overhead. I'm leaving it NotImplemented until it's needed to avoid the
complexity of locks. Given that this is local anyway it's probably better
to just use a real python threading.Lock.
"""
raise NotImplementedError
| #!/usr/bin/python
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at: http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distrib-
# uted under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# specific language governing permissions and limitations under the License.
"""An application cache that uses only local RAM."""
import copy
import threading
import time
SWEEP_INTERVAL_SECONDS = 60
class _CacheEntry(object):
"""Entry to be stored in LocalCache."""
def __init__(self, value, expiry):
"""Cache Entry."""
self._value = copy.deepcopy(value)
self._expiry = expiry
@property
def value(self):
return copy.deepcopy(self._value)
@property
def expiry(self):
return self._expiry
class LocalCache(object):
"""A simple RAM cache that is similar to cache.py's Cache.
This is the backing store for cache.py's local cache, but can be used
directly as well. It isn't quite a drop-in replacement for cache.py, but is
close. Unlike cache.py:
- it is single tier and doesn't use memcache to share values between processes
- it doesn't take a namespace because the values are stored in the LocalCache
object instead of in the global memcache. If you need another namespace
just create another LocalCache.
- it doesn't have ull as that doesn't make sense in the local context.
- it doesn't support rate limiting or cache refreshing, as anything you'd want
to do that for, you should probably just use cache.py/memcache.
- it doesn't support make_value for Get, though that wouldn't be too hard to
implement when it's needed.
- it doesn't support Add. If you need Add you're probably trying to build a
lock and are better off using a real python threading.Lock.
"""
def __init__(self, ttl=0):
"""Constructor for LocalCache.
Args:
ttl: How long values should stay in cache. Default (0) is don't expire.
"""
self._cache = {} # key => _CacheEntry
self._ttl = ttl
self._sweep_lock = threading.Lock() # lock held while sweeping _cache
self._next_sweep_time = 0
def Clear(self):
"""Clear the state of this cache. For use in tests only."""
self._cache.clear()
def _Sweep(self):
"""Walk through all cache entries and delete any that are expired."""
now = time.time()
next_sweep_time_snapshot = self._next_sweep_time
if now >= next_sweep_time_snapshot and self._sweep_lock.acquire(False):
# Only one thread can advance next_sweep_time; that thread does the sweep.
try:
if self._next_sweep_time == next_sweep_time_snapshot:
# This thread got the lock first; proceed to sweep the cache.
self._next_sweep_time = now + SWEEP_INTERVAL_SECONDS
for key_json, entry in self._cache.items():
if 0 < entry.expiry < now:
# Use pop() instead of del because the item can be concurrently
# removed by Cache.Delete(), which doesn't hold _sweep_lock.
self._cache.pop(key_json, None)
finally:
self._sweep_lock.release()
def Get(self, key):
"""Get the value referenced by key. Returns None if it doesn't exist."""
v = self._cache.get(key)
if v and (v.expiry == 0 or time.time() < v.expiry):
return v.value
return None
def Set(self, key, value, ttl=None, expiry=None):
"""Set the key/value pair with the specified expiry.
The ttl and expiry are mutually exclusive. If you use neither, the cache
level ttl will be used. A ttl or expiry of 0 means don't expire.
Args:
key: The cache key.
value: The value to store in the cache. Must be picklable.
ttl: How long to keep this value, relative time in seconds.
expiry: When to expiry this value, absolute timestamp in seconds.
Returns:
True if it was stored, False otherwise.
Raises:
ValueError: ttl and expiry are mutually exclusive. ttl should be < 1 year.
"""
if ttl and expiry:
raise ValueError('Received ttl and expiry. Please only use one.')
if ttl > 365*86400:
raise ValueError('ttl > 1 year is likely intended as an expiry, not ttl.')
now = time.time()
if expiry is None:
if ttl is None:
ttl = self._ttl
expiry = ttl + now if ttl > 0 else 0
if expiry == 0 or now < expiry:
self._cache[key] = _CacheEntry(value, expiry)
self._Sweep()
return True
return False
def Delete(self, key):
"""Delete the entry referenced by key, if it exists."""
self._cache.pop(key, None)
def Add(self, key, value, expiry): # pylint:disable=unused-argument
# pylint: disable=g-doc-args
"""Would atomically add an element to the cache if it was implemented.
Implementing this correctly without locks seems hard or even impossible.
It would be easy to use self._cache.setdefault, but that ignores the case of
an expired entry. Both threads might try to remove the old entry but one can
succeed to remove the old and add the new just in time for the second thread
to remove the new and also think it successfully added a new entry. You
therefore need to hold the sweep_lock to do this correctly. You could argue
that this is correct and the value was just expired early, but that makes it
less useful as a lock, which is the usual use of Add. Arguably Set should
also use the lock, but that seems less important and then adds extra
overhead. I'm leaving it NotImplemented until it's needed to avoid the
complexity of locks. Given that this is local anyway it's probably better
to just use a real python threading.Lock.
"""
raise NotImplementedError
| en | 0.915503 | #!/usr/bin/python # Copyright 2012 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at: http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distrib- # uted under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES # OR CONDITIONS OF ANY KIND, either express or implied. See the License for # specific language governing permissions and limitations under the License. An application cache that uses only local RAM. Entry to be stored in LocalCache. Cache Entry. A simple RAM cache that is similar to cache.py's Cache. This is the backing store for cache.py's local cache, but can be used directly as well. It isn't quite a drop-in replacement for cache.py, but is close. Unlike cache.py: - it is single tier and doesn't use memcache to share values between processes - it doesn't take a namespace because the values are stored in the LocalCache object instead of in the global memcache. If you need another namespace just create another LocalCache. - it doesn't have ull as that doesn't make sense in the local context. - it doesn't support rate limiting or cache refreshing, as anything you'd want to do that for, you should probably just use cache.py/memcache. - it doesn't support make_value for Get, though that wouldn't be too hard to implement when it's needed. - it doesn't support Add. If you need Add you're probably trying to build a lock and are better off using a real python threading.Lock. Constructor for LocalCache. Args: ttl: How long values should stay in cache. Default (0) is don't expire. # key => _CacheEntry # lock held while sweeping _cache Clear the state of this cache. For use in tests only. Walk through all cache entries and delete any that are expired. # Only one thread can advance next_sweep_time; that thread does the sweep. # This thread got the lock first; proceed to sweep the cache. # Use pop() instead of del because the item can be concurrently # removed by Cache.Delete(), which doesn't hold _sweep_lock. Get the value referenced by key. Returns None if it doesn't exist. Set the key/value pair with the specified expiry. The ttl and expiry are mutually exclusive. If you use neither, the cache level ttl will be used. A ttl or expiry of 0 means don't expire. Args: key: The cache key. value: The value to store in the cache. Must be picklable. ttl: How long to keep this value, relative time in seconds. expiry: When to expiry this value, absolute timestamp in seconds. Returns: True if it was stored, False otherwise. Raises: ValueError: ttl and expiry are mutually exclusive. ttl should be < 1 year. Delete the entry referenced by key, if it exists. # pylint:disable=unused-argument # pylint: disable=g-doc-args Would atomically add an element to the cache if it was implemented. Implementing this correctly without locks seems hard or even impossible. It would be easy to use self._cache.setdefault, but that ignores the case of an expired entry. Both threads might try to remove the old entry but one can succeed to remove the old and add the new just in time for the second thread to remove the new and also think it successfully added a new entry. You therefore need to hold the sweep_lock to do this correctly. You could argue that this is correct and the value was just expired early, but that makes it less useful as a lock, which is the usual use of Add. Arguably Set should also use the lock, but that seems less important and then adds extra overhead. I'm leaving it NotImplemented until it's needed to avoid the complexity of locks. Given that this is local anyway it's probably better to just use a real python threading.Lock. | 3.154179 | 3 |
cronman/scheduler/files.py | ryancheley/django-cronman | 17 | 6612975 | <gh_stars>10-100
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
from __future__ import unicode_literals
import os
class BaseCronSchedulerFile(object):
"""Base class for Cron Scheduler file wrappers"""
name = None # must be set in subclass
def __init__(self, data_dir):
self.path = os.path.join(data_dir, self.name)
def create(self):
"""Creates the lock file"""
open(self.path, "w").close()
def delete(self):
"""Deletes the lock file"""
os.unlink(self.path)
def exists(self):
"""Checks if the lock file exists"""
return os.path.exists(self.path)
class CronSchedulerLockFile(BaseCronSchedulerFile):
"""Lock file for Cron Scheduler"""
name = "scheduler.lock"
class CronSchedulerResumeFile(BaseCronSchedulerFile):
"""Resume file for Cron Scheduler"""
name = "scheduler.resume"
| # -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
from __future__ import unicode_literals
import os
class BaseCronSchedulerFile(object):
"""Base class for Cron Scheduler file wrappers"""
name = None # must be set in subclass
def __init__(self, data_dir):
self.path = os.path.join(data_dir, self.name)
def create(self):
"""Creates the lock file"""
open(self.path, "w").close()
def delete(self):
"""Deletes the lock file"""
os.unlink(self.path)
def exists(self):
"""Checks if the lock file exists"""
return os.path.exists(self.path)
class CronSchedulerLockFile(BaseCronSchedulerFile):
"""Lock file for Cron Scheduler"""
name = "scheduler.lock"
class CronSchedulerResumeFile(BaseCronSchedulerFile):
"""Resume file for Cron Scheduler"""
name = "scheduler.resume" | en | 0.676395 | # -*- coding: utf-8 -*- # vi:si:et:sw=4:sts=4:ts=4 Base class for Cron Scheduler file wrappers # must be set in subclass Creates the lock file Deletes the lock file Checks if the lock file exists Lock file for Cron Scheduler Resume file for Cron Scheduler | 2.613751 | 3 |
scripts/create_fluseverity_figs_v5/S_corrCoef_2wkPeriod_altbaseline_v5.py | eclee25/flu-SDI-exploratory-age | 3 | 6612976 | #!/usr/bin/python
##############################################
###Python template
###Author: <NAME>
###Date: 1/15/15
###Function: correlation coefficient between benchmark and zRR vs. moving 2 week window for SDI data for 7 week summer baseline
###Import data: SQL_export/OR_allweeks_outpatient.csv, SQL_export/totalpop.csv, My_Bansal_Lab/Clean_Data_for_Import/ThanksgivingWeekData_cl.csv
###Command Line: python S_corrCoef_2wkPeriod_v5.py
##############################################
### notes ###
# Incidence per 100,000 is normalized by total population by second calendar year of the flu season
### packages/modules ###
import csv
import matplotlib.pyplot as plt
from scipy.stats import pearsonr
import numpy as np
from collections import defaultdict
import random as rnd
## local modules ##
import functions_v5 as fxn
rnd.seed(10)
### data structures ###
### functions ###
### data files ###
incidin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/OR_allweeks_outpatient.csv','r')
incid = csv.reader(incidin, delimiter=',')
popin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/totalpop_age.csv', 'r')
pop = csv.reader(popin, delimiter=',')
ixin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/CDC_Source/Import_Data/cdc_severity_index.csv','r')
ixin.readline()
ix = csv.reader(ixin, delimiter=',')
### called/local plotting parameters ###
ps = fxn.pseasons
fw = fxn.gp_fluweeks
sl = fxn.gp_seasonlabels
colvec = fxn.gp_colors
wklab = fxn.gp_weeklabels
norm = fxn.gp_normweeks
fs = 24
fssml = 16
lw = fxn.gp_linewidth
# custom xticks for window period
wk1 = range(40,54) + range(1,39)
first_wk = [('0'+str(wk))[-2:] for wk in wk1]
wk2 = range(41,54) + range(1,40)
sec_wk = [('0'+str(wk))[-2:] for wk in wk2]
window_xticks = [fir+sec for fir, sec in zip(first_wk, sec_wk)]
nswaps = 250
### program ###
# import benchmark
# d_benchmark[seasonnum] = CDC benchmark index value
d_benchmark = fxn.benchmark_import(ix, 8) # no ILINet
benchmarks = [d_benchmark[s] for s in ps]
###################################
### 7 week summer baseline ###
# dict_wk[wk] = seasonnum
# dict_totIncid53ls[s] = [incid rate per 100000 wk40,... incid rate per 100000 wk 39] (unadjusted ILI incidence)
# dict_totIncidAdj53ls[s] = [adjusted incid rate per 100000 wk 40, ...adj incid wk 39] (total population adjusted for coverage and ILI care-seeking behavior)
# dict_RR53ls[s] = [RR wk 40,... RR wk 39] (children and adults adjusted for SDI data coverage and ILI care-seeking behavior)
# dict_zRR53ls[s] = [zRR wk 40,... zRR wk 39] (children and adults adjusted for SDI data coverage and ILI care-seeking behavior)
d_wk, d_pop, d_totILI53ls, d_totILIadj53ls, d_ageILIadj_season = fxn.week_OR_processing(incid, pop)
d_totIncid53ls, d_totIncidAdj53ls, d_RR53ls, d_zRR53ls = fxn.week_RR_processing_part2_altbaseline(d_pop, d_totILI53ls, d_totILIadj53ls, d_ageILIadj_season)
# preparation of values for Pearson R calculation
d_window_zRRma = fxn.zRR_movingAverage_windows(d_zRR53ls, 2)
# calculate Pearson's correlation coefficient between zRR moving average and benchmark for each window period
benchmark_zRRma_corr = [pearsonr(d_window_zRRma[w], benchmarks)[0] for w in sorted(d_window_zRRma)]
print [np.mean(d_zRR53ls[s][:2]) for s in ps]
print d_window_zRRma[0]
print benchmarks
# create null hypothesis through shuffling
dict_iter_nullCorr = defaultdict(list)
for i in range(nswaps):
null_corr = [pearsonr(fxn.returnShuffled(d_window_zRRma[w][:]), benchmarks)[0] for w in sorted(d_window_zRRma)] # create list copy to shuffle
dict_iter_nullCorr[i] = null_corr
fig1 = plt.figure()
ax1 = fig1.add_subplot(1,1,1)
for i in range(nswaps):
ax1.plot(range(52), dict_iter_nullCorr[i], color='grey', alpha=0.4, linewidth=1) # null line
ax1.plot(range(7), benchmark_zRRma_corr[:7], marker='o', color='black', alpha=0.4, linestyle='solid', linewidth=lw)
ax1.plot(range(6, 52), benchmark_zRRma_corr[6:], marker='o', color='black', linestyle='solid', linewidth=lw)
ax1.set_ylabel(r'Pearson R: $\beta$ & $\sigma(t)$ (2-wk mean)', fontsize=fs)
ax1.set_xlabel('Window Period', fontsize=fs)
plt.xticks(range(52)[::5], window_xticks[::5])
ax1.set_xlim([0,53])
ax1.set_ylim([-0.5,1.0])
plt.savefig('/home/elee/Dropbox/Elizabeth_Bansal_Lab/Manuscripts/Age_Severity/fluseverity_figs_v5/exploratory/corrCoef_window_summerBL_wNull.png', transparent=False, bbox_inches='tight', pad_inches=0)
plt.close()
# plt.show()
| #!/usr/bin/python
##############################################
###Python template
###Author: <NAME>
###Date: 1/15/15
###Function: correlation coefficient between benchmark and zRR vs. moving 2 week window for SDI data for 7 week summer baseline
###Import data: SQL_export/OR_allweeks_outpatient.csv, SQL_export/totalpop.csv, My_Bansal_Lab/Clean_Data_for_Import/ThanksgivingWeekData_cl.csv
###Command Line: python S_corrCoef_2wkPeriod_v5.py
##############################################
### notes ###
# Incidence per 100,000 is normalized by total population by second calendar year of the flu season
### packages/modules ###
import csv
import matplotlib.pyplot as plt
from scipy.stats import pearsonr
import numpy as np
from collections import defaultdict
import random as rnd
## local modules ##
import functions_v5 as fxn
rnd.seed(10)
### data structures ###
### functions ###
### data files ###
incidin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/OR_allweeks_outpatient.csv','r')
incid = csv.reader(incidin, delimiter=',')
popin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/totalpop_age.csv', 'r')
pop = csv.reader(popin, delimiter=',')
ixin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/CDC_Source/Import_Data/cdc_severity_index.csv','r')
ixin.readline()
ix = csv.reader(ixin, delimiter=',')
### called/local plotting parameters ###
ps = fxn.pseasons
fw = fxn.gp_fluweeks
sl = fxn.gp_seasonlabels
colvec = fxn.gp_colors
wklab = fxn.gp_weeklabels
norm = fxn.gp_normweeks
fs = 24
fssml = 16
lw = fxn.gp_linewidth
# custom xticks for window period
wk1 = range(40,54) + range(1,39)
first_wk = [('0'+str(wk))[-2:] for wk in wk1]
wk2 = range(41,54) + range(1,40)
sec_wk = [('0'+str(wk))[-2:] for wk in wk2]
window_xticks = [fir+sec for fir, sec in zip(first_wk, sec_wk)]
nswaps = 250
### program ###
# import benchmark
# d_benchmark[seasonnum] = CDC benchmark index value
d_benchmark = fxn.benchmark_import(ix, 8) # no ILINet
benchmarks = [d_benchmark[s] for s in ps]
###################################
### 7 week summer baseline ###
# dict_wk[wk] = seasonnum
# dict_totIncid53ls[s] = [incid rate per 100000 wk40,... incid rate per 100000 wk 39] (unadjusted ILI incidence)
# dict_totIncidAdj53ls[s] = [adjusted incid rate per 100000 wk 40, ...adj incid wk 39] (total population adjusted for coverage and ILI care-seeking behavior)
# dict_RR53ls[s] = [RR wk 40,... RR wk 39] (children and adults adjusted for SDI data coverage and ILI care-seeking behavior)
# dict_zRR53ls[s] = [zRR wk 40,... zRR wk 39] (children and adults adjusted for SDI data coverage and ILI care-seeking behavior)
d_wk, d_pop, d_totILI53ls, d_totILIadj53ls, d_ageILIadj_season = fxn.week_OR_processing(incid, pop)
d_totIncid53ls, d_totIncidAdj53ls, d_RR53ls, d_zRR53ls = fxn.week_RR_processing_part2_altbaseline(d_pop, d_totILI53ls, d_totILIadj53ls, d_ageILIadj_season)
# preparation of values for Pearson R calculation
d_window_zRRma = fxn.zRR_movingAverage_windows(d_zRR53ls, 2)
# calculate Pearson's correlation coefficient between zRR moving average and benchmark for each window period
benchmark_zRRma_corr = [pearsonr(d_window_zRRma[w], benchmarks)[0] for w in sorted(d_window_zRRma)]
print [np.mean(d_zRR53ls[s][:2]) for s in ps]
print d_window_zRRma[0]
print benchmarks
# create null hypothesis through shuffling
dict_iter_nullCorr = defaultdict(list)
for i in range(nswaps):
null_corr = [pearsonr(fxn.returnShuffled(d_window_zRRma[w][:]), benchmarks)[0] for w in sorted(d_window_zRRma)] # create list copy to shuffle
dict_iter_nullCorr[i] = null_corr
fig1 = plt.figure()
ax1 = fig1.add_subplot(1,1,1)
for i in range(nswaps):
ax1.plot(range(52), dict_iter_nullCorr[i], color='grey', alpha=0.4, linewidth=1) # null line
ax1.plot(range(7), benchmark_zRRma_corr[:7], marker='o', color='black', alpha=0.4, linestyle='solid', linewidth=lw)
ax1.plot(range(6, 52), benchmark_zRRma_corr[6:], marker='o', color='black', linestyle='solid', linewidth=lw)
ax1.set_ylabel(r'Pearson R: $\beta$ & $\sigma(t)$ (2-wk mean)', fontsize=fs)
ax1.set_xlabel('Window Period', fontsize=fs)
plt.xticks(range(52)[::5], window_xticks[::5])
ax1.set_xlim([0,53])
ax1.set_ylim([-0.5,1.0])
plt.savefig('/home/elee/Dropbox/Elizabeth_Bansal_Lab/Manuscripts/Age_Severity/fluseverity_figs_v5/exploratory/corrCoef_window_summerBL_wNull.png', transparent=False, bbox_inches='tight', pad_inches=0)
plt.close()
# plt.show()
| en | 0.575007 | #!/usr/bin/python ############################################## ###Python template ###Author: <NAME> ###Date: 1/15/15 ###Function: correlation coefficient between benchmark and zRR vs. moving 2 week window for SDI data for 7 week summer baseline ###Import data: SQL_export/OR_allweeks_outpatient.csv, SQL_export/totalpop.csv, My_Bansal_Lab/Clean_Data_for_Import/ThanksgivingWeekData_cl.csv ###Command Line: python S_corrCoef_2wkPeriod_v5.py ############################################## ### notes ### # Incidence per 100,000 is normalized by total population by second calendar year of the flu season ### packages/modules ### ## local modules ## ### data structures ### ### functions ### ### data files ### ### called/local plotting parameters ### # custom xticks for window period ### program ### # import benchmark # d_benchmark[seasonnum] = CDC benchmark index value # no ILINet ################################### ### 7 week summer baseline ### # dict_wk[wk] = seasonnum # dict_totIncid53ls[s] = [incid rate per 100000 wk40,... incid rate per 100000 wk 39] (unadjusted ILI incidence) # dict_totIncidAdj53ls[s] = [adjusted incid rate per 100000 wk 40, ...adj incid wk 39] (total population adjusted for coverage and ILI care-seeking behavior) # dict_RR53ls[s] = [RR wk 40,... RR wk 39] (children and adults adjusted for SDI data coverage and ILI care-seeking behavior) # dict_zRR53ls[s] = [zRR wk 40,... zRR wk 39] (children and adults adjusted for SDI data coverage and ILI care-seeking behavior) # preparation of values for Pearson R calculation # calculate Pearson's correlation coefficient between zRR moving average and benchmark for each window period # create null hypothesis through shuffling # create list copy to shuffle # null line # plt.show() | 1.88676 | 2 |
Beecrowd/Python/1002.py | felipemsalles/Programming-Studies | 0 | 6612977 | <filename>Beecrowd/Python/1002.py
# 1002
raio = float(input())
area = 3.14159 * (raio ** 2)
print('A=%.4f' % area)
# radius (English) == raio (Portuguese) | <filename>Beecrowd/Python/1002.py
# 1002
raio = float(input())
area = 3.14159 * (raio ** 2)
print('A=%.4f' % area)
# radius (English) == raio (Portuguese) | en | 0.554992 | # 1002 # radius (English) == raio (Portuguese) | 3.486475 | 3 |
src/txkube/testing/_testcase.py | LeastAuthority/txkube | 14 | 6612978 | <gh_stars>10-100
# Copyright Least Authority Enterprises.
# See LICENSE for details.
"""
xUnit TestCase for txkube testing.
"""
from os import environ
from hypothesis import (
HealthCheck,
settings,
)
from fixtures import CompoundFixture
from testtools import TestCase as TesttoolsTestCase
from testtools.twistedsupport import AsynchronousDeferredRunTest
from twisted.python.failure import Failure
from ._eliot import CaptureEliotLogs
def _setup_hypothesis():
settings.register_profile(
"ci",
suppress_health_check=[
# CPU resources available to CI builds typically varies
# significantly from run to run making it difficult to determine
# if "too slow" data generation is a result of the code or the
# execution environment. Prevent these checks from
# (intermittently) failing tests that are otherwise fine.
HealthCheck.too_slow,
],
# By the same reasoning as above, disable the deadline check.
deadline=None,
)
settings.load_profile(environ.get("TXKUBE_HYPOTHESIS_PROFILE", "default"))
_setup_hypothesis()
class AsynchronousDeferredRunTest(AsynchronousDeferredRunTest):
"""
An asynchronous runner supporting Eliot.
"""
def _get_log_fixture(self):
"""
Add ``CaptureEliotLogs`` to the log fixtures which receive special
treatment so as to be "cleaned up" in the timeout case.
This ensures eliot logs are reported when tests time out - something
that will not happen using the normal ``useFixture`` API.
See <https://bugs.launchpad.net/testtools/+bug/897196>.
"""
return CompoundFixture([
super(AsynchronousDeferredRunTest, self)._get_log_fixture(),
CaptureEliotLogs(),
])
class TestCase(TesttoolsTestCase):
"""
A base class for test cases which automatically uses the
``CaptureEliotLogs`` fixture.
"""
# expectThat and Hypothesis don't communicate well about when the
# test has failed. Give them a little help. These two Hypothesis
# hooks will check for a flag that testtools sets when it thinks
# the test has failed and turn it into something Hypothesis can
# recognize.
def setup_example(self):
try:
# TesttoolsTestCase starts without this attribute set at all. Get
# us back to that state. It won't be set at all on the first
# setup_example call, nor if the previous run didn't have a failed
# expectation.
del self.force_failure
except AttributeError:
pass
def teardown_example(self, ignored):
if getattr(self, "force_failure", False):
# If testtools things it's time to stop, translate this into a
# test failure exception that Hypothesis can see. This lets
# Hypothesis know when it has found a falsifying example. Without
# it, Hypothesis can't see which of its example runs caused
# problems.
self.fail("expectation failed")
def assertNoResult(case, d):
"""
Assert that ``d`` does not have a result at this point.
"""
result = []
d.addBoth(result.append)
if result:
if isinstance(result[0], Failure):
result[0].raiseException()
else:
case.fail("Got {} but expected no result".format(result[0]))
| # Copyright Least Authority Enterprises.
# See LICENSE for details.
"""
xUnit TestCase for txkube testing.
"""
from os import environ
from hypothesis import (
HealthCheck,
settings,
)
from fixtures import CompoundFixture
from testtools import TestCase as TesttoolsTestCase
from testtools.twistedsupport import AsynchronousDeferredRunTest
from twisted.python.failure import Failure
from ._eliot import CaptureEliotLogs
def _setup_hypothesis():
settings.register_profile(
"ci",
suppress_health_check=[
# CPU resources available to CI builds typically varies
# significantly from run to run making it difficult to determine
# if "too slow" data generation is a result of the code or the
# execution environment. Prevent these checks from
# (intermittently) failing tests that are otherwise fine.
HealthCheck.too_slow,
],
# By the same reasoning as above, disable the deadline check.
deadline=None,
)
settings.load_profile(environ.get("TXKUBE_HYPOTHESIS_PROFILE", "default"))
_setup_hypothesis()
class AsynchronousDeferredRunTest(AsynchronousDeferredRunTest):
"""
An asynchronous runner supporting Eliot.
"""
def _get_log_fixture(self):
"""
Add ``CaptureEliotLogs`` to the log fixtures which receive special
treatment so as to be "cleaned up" in the timeout case.
This ensures eliot logs are reported when tests time out - something
that will not happen using the normal ``useFixture`` API.
See <https://bugs.launchpad.net/testtools/+bug/897196>.
"""
return CompoundFixture([
super(AsynchronousDeferredRunTest, self)._get_log_fixture(),
CaptureEliotLogs(),
])
class TestCase(TesttoolsTestCase):
"""
A base class for test cases which automatically uses the
``CaptureEliotLogs`` fixture.
"""
# expectThat and Hypothesis don't communicate well about when the
# test has failed. Give them a little help. These two Hypothesis
# hooks will check for a flag that testtools sets when it thinks
# the test has failed and turn it into something Hypothesis can
# recognize.
def setup_example(self):
try:
# TesttoolsTestCase starts without this attribute set at all. Get
# us back to that state. It won't be set at all on the first
# setup_example call, nor if the previous run didn't have a failed
# expectation.
del self.force_failure
except AttributeError:
pass
def teardown_example(self, ignored):
if getattr(self, "force_failure", False):
# If testtools things it's time to stop, translate this into a
# test failure exception that Hypothesis can see. This lets
# Hypothesis know when it has found a falsifying example. Without
# it, Hypothesis can't see which of its example runs caused
# problems.
self.fail("expectation failed")
def assertNoResult(case, d):
"""
Assert that ``d`` does not have a result at this point.
"""
result = []
d.addBoth(result.append)
if result:
if isinstance(result[0], Failure):
result[0].raiseException()
else:
case.fail("Got {} but expected no result".format(result[0])) | en | 0.91648 | # Copyright Least Authority Enterprises. # See LICENSE for details. xUnit TestCase for txkube testing. # CPU resources available to CI builds typically varies # significantly from run to run making it difficult to determine # if "too slow" data generation is a result of the code or the # execution environment. Prevent these checks from # (intermittently) failing tests that are otherwise fine. # By the same reasoning as above, disable the deadline check. An asynchronous runner supporting Eliot. Add ``CaptureEliotLogs`` to the log fixtures which receive special treatment so as to be "cleaned up" in the timeout case. This ensures eliot logs are reported when tests time out - something that will not happen using the normal ``useFixture`` API. See <https://bugs.launchpad.net/testtools/+bug/897196>. A base class for test cases which automatically uses the ``CaptureEliotLogs`` fixture. # expectThat and Hypothesis don't communicate well about when the # test has failed. Give them a little help. These two Hypothesis # hooks will check for a flag that testtools sets when it thinks # the test has failed and turn it into something Hypothesis can # recognize. # TesttoolsTestCase starts without this attribute set at all. Get # us back to that state. It won't be set at all on the first # setup_example call, nor if the previous run didn't have a failed # expectation. # If testtools things it's time to stop, translate this into a # test failure exception that Hypothesis can see. This lets # Hypothesis know when it has found a falsifying example. Without # it, Hypothesis can't see which of its example runs caused # problems. Assert that ``d`` does not have a result at this point. | 1.988013 | 2 |
users/models.py | rijkerd/Smart-Class | 4 | 6612979 | <filename>users/models.py
import uuid
from django.contrib.auth.models import AbstractUser
from django.db import models
# TODO: Setup email verification
# TODO: Setup user roles and permissions IE: Student and Teacher permissions
class User(AbstractUser):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
class Meta:
ordering = ['last_login']
def __str__(self):
return self.email
| <filename>users/models.py
import uuid
from django.contrib.auth.models import AbstractUser
from django.db import models
# TODO: Setup email verification
# TODO: Setup user roles and permissions IE: Student and Teacher permissions
class User(AbstractUser):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
class Meta:
ordering = ['last_login']
def __str__(self):
return self.email
| en | 0.703032 | # TODO: Setup email verification # TODO: Setup user roles and permissions IE: Student and Teacher permissions | 2.531046 | 3 |
Util/util.py | InnovAnon-Inc/sceadar | 0 | 6612980 | <reponame>InnovAnon-Inc/sceadar
class Util:
@staticmethod
#def range (vector): return max (vector) - min (vector) + 1
def range (min_value, max_value): return max_value - min_value + 1
@staticmethod
def approxEqual(x, y, tolerance=0.001):
return abs (x-y) <= 0.5 * tolerance * (x + y)
@staticmethod
def rangeIntersection (min1, max1, min2, max2):
if max1 < min2: return 1
if max2 < min1: return 2
if min1 < min2 and max1 < max2: return 3
if min2 < min1 and max2 < max1: return 4
if min1 < min2 and max2 < max1: return 5
if min2 < min1 and max1 < max2: return 6
return 0 | class Util:
@staticmethod
#def range (vector): return max (vector) - min (vector) + 1
def range (min_value, max_value): return max_value - min_value + 1
@staticmethod
def approxEqual(x, y, tolerance=0.001):
return abs (x-y) <= 0.5 * tolerance * (x + y)
@staticmethod
def rangeIntersection (min1, max1, min2, max2):
if max1 < min2: return 1
if max2 < min1: return 2
if min1 < min2 and max1 < max2: return 3
if min2 < min1 and max2 < max1: return 4
if min1 < min2 and max2 < max1: return 5
if min2 < min1 and max1 < max2: return 6
return 0 | fr | 0.30896 | #def range (vector): return max (vector) - min (vector) + 1 | 3.303801 | 3 |
src/onixcheck/data/__init__.py | titusz/onixcheck | 23 | 6612981 | # -*- coding: utf-8 -*-
from os.path import abspath, dirname, join
ROOT = abspath(dirname(__file__))
VALID_ONIX2_REF = join(ROOT, 'valid_onix2_ref.xml')
VALID_ONIX3_REF = join(ROOT, 'valid_onix3_ref.xml')
VALID_ONIX3_SHORT = join(ROOT, 'valid_onix3_short.xml')
VALID_ONIX3_REF_NS = join(ROOT, 'valid_onix3_ref_ns.xml')
INVALID_ONIX3_REF = join(ROOT, 'invalid_onix3_ref.xml')
INVALID_ONIX_ROOT = join(ROOT, 'invalid_onix_root.xml')
VALID_GOOGLE_SAMPLE = join(ROOT, 'Google Sample ONIX 2.1.xml')
VALID_GOOGLE_ONIX_30_SAMPLE = join(ROOT, 'valid_google_onix_3.xml')
INVALID_GOOGLE_ONIX_30_SAMPLE = join(ROOT, 'invalid_google_onix_3.xml')
WIN_CONSOLE_ISSUE = join(ROOT, 'win_console_issue.xml')
| # -*- coding: utf-8 -*-
from os.path import abspath, dirname, join
ROOT = abspath(dirname(__file__))
VALID_ONIX2_REF = join(ROOT, 'valid_onix2_ref.xml')
VALID_ONIX3_REF = join(ROOT, 'valid_onix3_ref.xml')
VALID_ONIX3_SHORT = join(ROOT, 'valid_onix3_short.xml')
VALID_ONIX3_REF_NS = join(ROOT, 'valid_onix3_ref_ns.xml')
INVALID_ONIX3_REF = join(ROOT, 'invalid_onix3_ref.xml')
INVALID_ONIX_ROOT = join(ROOT, 'invalid_onix_root.xml')
VALID_GOOGLE_SAMPLE = join(ROOT, 'Google Sample ONIX 2.1.xml')
VALID_GOOGLE_ONIX_30_SAMPLE = join(ROOT, 'valid_google_onix_3.xml')
INVALID_GOOGLE_ONIX_30_SAMPLE = join(ROOT, 'invalid_google_onix_3.xml')
WIN_CONSOLE_ISSUE = join(ROOT, 'win_console_issue.xml')
| en | 0.769321 | # -*- coding: utf-8 -*- | 1.862518 | 2 |
robot_pose_ekf/scripts/wtf.py | SNU-SF4/viwo | 0 | 6612982 | #!/usr/bin/env python
import rospy
from robot_pose_ekf.srv import GetStatus, GetStatusRequest
if __name__ == '__main__':
rospy.init_node('spawner', anonymous=True)
print ('looking for node robot_pose_ekf...')
rospy.wait_for_service('robot_pose_ekf/get_status')
s = rospy.ServiceProxy('robot_pose_ekf/get_status', GetStatus)
resp = s.call(GetStatusRequest())
print (resp.status)
| #!/usr/bin/env python
import rospy
from robot_pose_ekf.srv import GetStatus, GetStatusRequest
if __name__ == '__main__':
rospy.init_node('spawner', anonymous=True)
print ('looking for node robot_pose_ekf...')
rospy.wait_for_service('robot_pose_ekf/get_status')
s = rospy.ServiceProxy('robot_pose_ekf/get_status', GetStatus)
resp = s.call(GetStatusRequest())
print (resp.status)
| ru | 0.26433 | #!/usr/bin/env python | 2.205149 | 2 |
ctools/defragment_sharded_collection.py | graetzer/workscripts | 0 | 6612983 | <reponame>graetzer/workscripts<gh_stars>0
#!/usr/bin/env python3
#
import argparse
import asyncio
import logging
import math
import pymongo
import sys
from common import Cluster, yes_no
from copy import deepcopy
from pymongo import errors as pymongo_errors
from tqdm import tqdm
# Ensure that the caller is using python 3
if (sys.version_info[0] < 3):
raise Exception("Must be using Python 3")
class ShardedCollection:
def __init__(self, cluster, ns):
self.cluster = cluster
self.name = ns
self.ns = {'db': self.name.split('.', 1)[0], 'coll': self.name.split('.', 1)[1]}
self._direct_config_connection = None
async def init(self):
collection_entry = await self.cluster.configDb.collections.find_one({'_id': self.name})
self.uuid = collection_entry['uuid']
self.shard_key_pattern = collection_entry['key']
async def data_size_kb_from_shard(self, range):
data_size_response = await self.cluster.client[self.ns['db']].command({
'dataSize': self.name,
'keyPattern': self.shard_key_pattern,
'min': range[0],
'max': range[1],
'estimate': True
}, codec_options=self.cluster.client.codec_options)
# Round up the data size of the chunk to the nearest kilobyte
return math.ceil(max(float(data_size_response['size']), 1024.0) / 1024.0)
async def move_chunk(self, chunk, to):
await self.cluster.adminDb.command({
'moveChunk': self.name,
'bounds': [chunk['min'], chunk['max']],
'to': to
}, codec_options=self.cluster.client.codec_options)
async def merge_chunks(self, consecutive_chunks, unsafe_mode):
assert (len(consecutive_chunks) > 1)
if unsafe_mode == 'no':
await self.cluster.adminDb.command({
'mergeChunks': self.name,
'bounds': [consecutive_chunks[0]['min'], consecutive_chunks[-1]['max']]
}, codec_options=self.cluster.client.codec_options)
elif unsafe_mode == 'unsafe_direct_commit_against_configsvr':
if not self._direct_config_connection:
self._direct_config_connection = await self.cluster.make_direct_config_server_connection(
)
# TODO: Implement the unsafe_direct_commit_against_configsvr option
raise NotImplementedError(
'The unsafe_direct_commit_against_configsvr option is not yet implemented')
elif unsafe_mode == 'super_unsafe_direct_apply_ops_aginst_configsvr':
first_chunk = deepcopy(consecutive_chunks[0])
first_chunk['max'] = consecutive_chunks[-1]['max']
# TODO: Bump first_chunk['version'] to the collection version
first_chunk.pop('history', None)
first_chunk_update = [{
'op': 'u',
'b': False, # No upsert
'ns': 'config.chunks',
'o': first_chunk,
'o2': {
'_id': first_chunk['_id']
},
}]
remaining_chunks_delete = list(
map(lambda x: {
'op': 'd',
'ns': 'config.chunks',
'o': {
'_id': x['_id']
},
}, consecutive_chunks[1:]))
precondition = [
# TODO: Include the precondition
]
apply_ops_cmd = {
'applyOps': first_chunk_update + remaining_chunks_delete,
'preCondition': precondition,
}
if not self._direct_config_connection:
self._direct_config_connection = await self.cluster.make_direct_config_server_connection(
)
await self._direct_config_connection.admin.command(
apply_ops_cmd, codec_options=self.cluster.client.codec_options)
async def try_write_chunk_size(self, range, expected_owning_shard, size_to_write_kb):
try:
update_result = await self.cluster.configDb.chunks.update_one({
'ns': self.name,
'min': range[0],
'max': range[1],
'shard': expected_owning_shard
}, {'$set': {
'defrag_collection_est_size': size_to_write_kb
}})
if update_result.matched_count != 1:
raise Exception(
f"Chunk [{range[0]}, {range[1]}] wasn't updated: {update_result.raw_result}")
except Exception as ex:
logging.warning(f'Error {ex} occurred while writing the chunk size')
async def clear_chunk_size_estimations(self):
update_result = await self.cluster.configDb.chunks.update_many(
{'ns': self.name}, {'$unset': {
'defrag_collection_est_size': ''
}})
return update_result.modified_count
async def main(args):
cluster = Cluster(args.uri, asyncio.get_event_loop())
await cluster.check_is_mongos(warn_only=args.dryrun)
coll = ShardedCollection(cluster, args.ns)
await coll.init()
num_chunks = await cluster.configDb.chunks.count_documents({'ns': coll.name})
print(
f"""Collection {coll.name} has a shardKeyPattern of {coll.shard_key_pattern} and {num_chunks} chunks.
For optimisation and for dry runs will assume a chunk size of {args.phase_1_estimated_chunk_size_kb} KB."""
)
###############################################################################################
# Sanity checks (Read-Only): Ensure that the balancer and auto-splitter are stopped and that the
# MaxChunkSize has been configured appropriately
#
balancer_doc = await cluster.configDb.settings.find_one({'_id': 'balancer'})
if not args.dryrun and (balancer_doc is None or balancer_doc['mode'] != 'off'):
raise Exception("""The balancer must be stopped before running this script. Please run:
sh.stopBalancer()""")
auto_splitter_doc = await cluster.configDb.settings.find_one({'_id': 'autosplit'})
if not args.dryrun and (auto_splitter_doc is None or auto_splitter_doc['enabled']):
raise Exception(
"""The auto-splitter must be disabled before running this script. Please run:
db.getSiblingDB('config').settings.update({_id:'autosplit'}, {$set: {enabled: false}}, {upsert: true})"""
)
chunk_size_doc = await cluster.configDb.settings.find_one({'_id': 'chunksize'})
if chunk_size_doc is None or chunk_size_doc['value'] < 128:
if not args.dryrun:
raise Exception(
"""The MaxChunkSize must be configured to at least 128 MB before running this script. Please run:
db.getSiblingDB('config').settings.update({_id:'chunksize'}, {$set: {value: 128}}, {upsert: true})"""
)
else:
target_chunk_size_kb = args.dryrun
else:
target_chunk_size_kb = chunk_size_doc['value'] * 1024
if args.dryrun:
print(f"""Performing a dry run with target chunk size of {target_chunk_size_kb} KB.
No actual modifications to the cluster will occur.""")
else:
yes_no(
f'The next steps will perform an actual merge with target chunk size of {target_chunk_size_kb} KB.'
)
if args.phase_1_reset_progress:
yes_no(f'Previous defragmentation progress will be reset.')
num_cleared = await coll.clear_chunk_size_estimations()
print(f'Cleared {num_cleared} already processed chunks.')
###############################################################################################
# Initialisation (Read-Only): Fetch all chunks in memory and calculate the collection version
# in preparation for the subsequent write phase.
###############################################################################################
shard_to_chunks = {}
collectionVersion = None
with tqdm(total=num_chunks, unit=' chunks') as progress:
async for c in cluster.configDb.chunks.find({'ns': coll.name}, sort=[('min',
pymongo.ASCENDING)]):
shard_id = c['shard']
if collectionVersion is None:
collectionVersion = c['lastmod']
if c['lastmod'] > collectionVersion:
collectionVersion = c['lastmod']
if shard_id not in shard_to_chunks:
shard_to_chunks[shard_id] = {'chunks': [], 'num_merges_performed': 0}
shard = shard_to_chunks[shard_id]
shard['chunks'].append(c)
progress.update()
print(
f'Collection version is {collectionVersion} and chunks are spread over {len(shard_to_chunks)} shards'
)
###############################################################################################
#
# WRITE PHASES START FROM HERE ONWARDS
#
###############################################################################################
###############################################################################################
# PHASE 1 (Merge-only): The purpose of this phase is to merge as many chunks as possible without
# actually moving any data. It is intended to achieve the maximum number of merged chunks with
# the minimum possible intrusion to the ongoing CRUD workload due to refresh stalls.
#
# The stage is also resumable, because for every chunk/chunk range that it processes, it will
# persist a field called 'defrag_collection_est_size' on the chunk, which estimates its size as
# of the time the script ran. Resuming Phase 1 will skip over any chunks which already contain
# this field, because it indicates that previous execution already ran and performed all the
# possible merges.
#
# These are the parameters that control the operation of this phase and their purpose is
# explaned below:
max_merges_on_shards_at_less_than_collection_version = 1
max_merges_on_shards_at_collection_version = 10
# The way Phase 1 (merge-only) operates is by running:
#
# (1) Up to `max_merges_on_shards_at_less_than_collection_version` concurrent mergeChunks
# across all shards which are below the collection major version
# AND
# (2) Up to `max_merges_on_shards_at_collection_version` concurrent mergeChunks across all
# shards which are already on the collection major version
#
# Merges due to (1) will bring the respective shard's major version to that of the collection,
# which unfortunately is interpreted by the routers as "something routing-related changed" and
# will result in refresh and a stall on the critical CRUD path. Because of this, the script only
# runs one at a time of these by default. On the other hand, merges due to (2) only increment
# the minor version and will not cause stalls on the CRUD path, so these can run with higher
# concurrency.
#
# The expectation is that at the end of this phase, not all possible defragmentation would have
# been achieved, but the number of chunks on the cluster would have been significantly reduced
# in a way that would make Phase 2 much less invasive due to refreshes after moveChunk.
#
# For example in a collection with 1 million chunks, a refresh due to moveChunk could be
# expected to take up to a second. However with the number of chunks reduced to 500,000 due to
# Phase 1, the refresh time would be on the order of ~100-200msec.
###############################################################################################
sem_at_less_than_collection_version = asyncio.Semaphore(
max_merges_on_shards_at_less_than_collection_version)
sem_at_collection_version = asyncio.Semaphore(max_merges_on_shards_at_collection_version)
async def merge_chunks_on_shard(shard, collection_version, progress):
shard_entry = shard_to_chunks[shard]
shard_chunks = shard_entry['chunks']
if len(shard_chunks) == 0:
return
chunk_at_shard_version = max(shard_chunks, key=lambda c: c['lastmod'])
shard_version = chunk_at_shard_version['lastmod']
shard_is_at_collection_version = shard_version.time == collection_version.time
progress.write(f'{shard}: {shard_version}: ', end='')
if shard_is_at_collection_version:
progress.write('Merge will start without major version bump')
else:
progress.write('Merge will start with a major version bump')
consecutive_chunks = []
estimated_size_of_consecutive_chunks = 0
num_lock_busy_errors_encountered = 0
def lookahead(iterable):
"""Pass through all values from the given iterable, augmented by the
information if there are more values to come after the current one
(True), or if it is the last value (False).
"""
# Get an iterator and pull the first value.
it = iter(iterable)
last = next(it)
# Run the iterator to exhaustion (starting from the second value).
for val in it:
# Report the *previous* value (more to come).
yield last, True
last = val
# Report the last value.
yield last, False
for c, has_more in lookahead(shard_chunks):
progress.update()
if len(consecutive_chunks) == 0:
consecutive_chunks = [c]
estimated_size_of_consecutive_chunks = args.phase_1_estimated_chunk_size_kb
if not args.dryrun and not has_more and not 'defrag_collection_est_size' in consecutive_chunks[
0]:
chunk_range = [consecutive_chunks[0]['min'], consecutive_chunks[0]['max']]
data_size_kb = await coll.data_size_kb_from_shard(chunk_range)
await coll.try_write_chunk_size(chunk_range, shard, data_size_kb)
continue
merge_consecutive_chunks_without_size_check = False
if consecutive_chunks[-1]['max'] == c['min']:
consecutive_chunks.append(c)
estimated_size_of_consecutive_chunks += args.phase_1_estimated_chunk_size_kb
elif len(consecutive_chunks) == 1:
if not args.dryrun and not 'defrag_collection_est_size' in consecutive_chunks[0]:
chunk_range = [consecutive_chunks[0]['min'], consecutive_chunks[0]['max']]
data_size_kb = await coll.data_size_kb_from_shard(chunk_range)
await coll.try_write_chunk_size(chunk_range, shard, data_size_kb)
consecutive_chunks = [c]
estimated_size_of_consecutive_chunks = args.phase_1_estimated_chunk_size_kb
if not args.dryrun and not has_more and not 'defrag_collection_est_size' in consecutive_chunks[0]:
chunk_range = [consecutive_chunks[0]['min'], consecutive_chunks[0]['max']]
data_size_kb = await coll.data_size_kb_from_shard(chunk_range)
await coll.try_write_chunk_size(chunk_range, shard, data_size_kb)
continue
else:
merge_consecutive_chunks_without_size_check = True
# To proceed to this stage we must have at least 2 consecutive chunks as candidates to
# be merged
assert (len(consecutive_chunks) > 1)
# After we have collected a run of chunks whose estimated size is 90% of the maximum
# chunk size, invoke `dataSize` in order to determine whether we can merge them or if
# we should continue adding more chunks to be merged
if (estimated_size_of_consecutive_chunks < target_chunk_size_kb * 0.90
) and not merge_consecutive_chunks_without_size_check and has_more:
continue
merge_bounds = [consecutive_chunks[0]['min'], consecutive_chunks[-1]['max']]
# Determine the "exact" (not 100% exact because we use the 'estimate' option) size of
# the currently accumulated bounds via the `dataSize` command in order to decide
# whether this run should be merged or if we should continue adding chunks to it.
actual_size_of_consecutive_chunks = estimated_size_of_consecutive_chunks
if not args.dryrun:
actual_size_of_consecutive_chunks = await coll.data_size_kb_from_shard(merge_bounds)
if merge_consecutive_chunks_without_size_check or not has_more:
pass
elif actual_size_of_consecutive_chunks < target_chunk_size_kb * 0.75:
# If the actual range size is sill 25% less than the target size, continue adding
# consecutive chunks
estimated_size_of_consecutive_chunks = actual_size_of_consecutive_chunks
continue
elif actual_size_of_consecutive_chunks > target_chunk_size_kb * 1.10:
# TODO: If the actual range size is 10% more than the target size, use `splitVector`
# to determine a better merge/split sequence so as not to generate huge chunks which
# will have to be split later on
pass
# Perform the actual merge, obeying the configured concurrency
async with (sem_at_collection_version
if shard_is_at_collection_version else sem_at_less_than_collection_version):
if not args.dryrun:
try:
await coll.merge_chunks(consecutive_chunks,
args.phase_1_perform_unsafe_merge)
await coll.try_write_chunk_size(merge_bounds, shard,
actual_size_of_consecutive_chunks)
except pymongo_errors.OperationFailure as ex:
if ex.details['code'] == 46: # The code for LockBusy
num_lock_busy_errors_encountered += 1
if num_lock_busy_errors_encountered == 1:
logging.warning(
f"""Lock error occurred while trying to merge chunk range {merge_bounds}.
This indicates the presence of an older MongoDB version.""")
else:
raise
else:
progress.write(
f'Merging {len(consecutive_chunks)} consecutive chunks on {shard}: {merge_bounds}'
)
# Reset the accumulator so far. If we are merging due to
# merge_consecutive_chunks_without_size_check, need to make sure that we don't forget
# the current entry since it is not part of the run
if merge_consecutive_chunks_without_size_check:
consecutive_chunks = [c]
estimated_size_of_consecutive_chunks = args.phase_1_estimated_chunk_size_kb
else:
consecutive_chunks = []
estimated_size_of_consecutive_chunks = 0
shard_entry['num_merges_performed'] += 1
shard_is_at_collection_version = True
with tqdm(total=num_chunks, unit=' chunks') as progress:
tasks = []
for s in shard_to_chunks:
tasks.append(
asyncio.ensure_future(merge_chunks_on_shard(s, collectionVersion, progress)))
await asyncio.gather(*tasks)
###############################################################################################
# PHASE 2 (Move-and-merge): The purpose of this phase is to move chunks, which are not
# contiguous on a shard (and couldn't be merged by Phase 1) to a shard where they could be
# further merged to adjacent chunks.
#
# This stage relies on the 'defrag_collection_est_size' fields written to every chunk from
# Phase 1 in order to calculate the most optimal move strategy.
#
async def get_chunk_size(ch):
if 'defrag_collection_est_size' in ch:
return ch['defrag_collection_est_size']
chunk_range = [ch['min'], ch['max']]
data_size_kb = await coll.data_size_kb_from_shard(chunk_range)
await coll.try_write_chunk_size(chunk_range, shard, data_size_kb)
return data_size_kb
async def move_merge_chunks_by_size(shard, idealNumChunks, progress):
# TODO move down
if args.dryrun:
return
num_chunks = await cluster.configDb.chunks.count_documents({'ns': coll.name, 'shard': shard})
async for c in cluster.configDb.chunks.find({'ns': coll.name, 'shard': shard}):
progress.update()
# Abort if we have too few chunks already
if num_chunks + 1 < idealNumChunks:
return
center_size = await get_chunk_size(c)
if center_size > target_chunk_size_kb:
continue
left_chunk = await cluster.configDb.chunks.find_one({'ns':coll.name, 'max': c['min']})
if not (left_chunk is None):
left_size = await get_chunk_size(left_chunk)
new_size = left_size + center_size
if new_size < target_chunk_size_kb * 2 and center_size <= left_size:
# TODO abort if target shard has too much data already
merge_bounds = [left_chunk['min'], c['max']]
progress.write(f'Moving chunk from {c["shard"]} to {left_chunk["shard"]}, merging {merge_bounds}, new size: {new_size}')
await coll.move_chunk(c, left_chunk['shard'])
await coll.merge_chunks([left_chunk, c], args.phase_1_perform_unsafe_merge)
await coll.try_write_chunk_size(merge_bounds, left_chunk['shard'], new_size)
num_chunks -= 1
continue
right_chunk = await cluster.configDb.chunks.find_one({'ns':coll.name, 'min': c['max']})
if not (right_chunk is None):
right_size = await get_chunk_size(left_chunk)
new_size = right_size + center_size
if new_size < target_chunk_size_kb * 2 and center_size <= right_size:
# TODO abort if target shard has too much data already
merge_bounds = [c['min'], right_chunk['max']]
progress.write(f'Moving chunk from {c["shard"]} to {right_chunk["shard"]}, merging {merge_bounds}, new size: {new_size}')
await coll.move_chunk(c, right_chunk['shard'])
await coll.merge_chunks([c, right_chunk], args.phase_1_perform_unsafe_merge)
await coll.try_write_chunk_size(merge_bounds, right_chunk['shard'], new_size)
num_chunks -= 1
continue
# Update chunk contents to latest version
with tqdm(total=num_chunks, unit=' chunks') as progress:
num_shards = await cluster.configDb.shards.count_documents({})
async for s in cluster.configDb.shards.find({}):
num_chunks = await cluster.configDb.chunks.count_documents({'ns': coll.name})
ideal_num_Chunks = num_chunks / num_shards
print(f"""Processing shard {s["_id"]}""")
await move_merge_chunks_by_size(s["_id"], ideal_num_Chunks, progress)
if __name__ == "__main__":
argsParser = argparse.ArgumentParser(
description=
"""Tool to defragment a sharded cluster in a way which minimises the rate at which the major
shard version gets bumped in order to minimise the amount of stalls due to refresh.""")
argsParser.add_argument(
'uri', help='URI of the mongos to connect to in the mongodb://[user:password@]host format',
metavar='uri', type=str)
argsParser.add_argument(
'--dryrun', help=
"""Indicates whether the script should perform actual durable changes to the cluster or just
print the commands which will be executed. If specified, it needs to be passed a value
(in MB) which indicates the target chunk size to be used for the simulation in case the
cluster doesn't have the chunkSize setting enabled. Since some phases of the script
depend on certain state of the cluster to have been reached by previous phases, if this
mode is selected, the script will stop early.""", metavar='target_chunk_size',
type=lambda x: int(x) * 1024, required=False)
argsParser.add_argument('--ns', help="""The namespace on which to perform defragmentation""",
metavar='ns', type=str, required=True)
argsParser.add_argument(
'--phase_1_reset_progress',
help="""Applies only to Phase 1 and instructs the script to clear the chunk size estimation
and merge progress which may have been made by an earlier invocation""",
action='store_true')
argsParser.add_argument(
'--phase_1_estimated_chunk_size_mb',
help="""Applies only to Phase 1 and specifies the amount of data to estimate per chunk
(in MB) before invoking dataSize in order to obtain the exact size. This value is just an
optimisation under Phase 1 order to collect as large of a candidate range to merge as
possible before invoking dataSize on the entire candidate range. Otherwise, the script
would be invoking dataSize for every single chunk and blocking for the results, which
would reduce its parallelism.
The default is chosen as 40%% of 64MB, which states that we project that under the
current 64MB chunkSize default and the way the auto-splitter operates, the collection's
chunks are only about 40%% full.
For dry-runs, because dataSize is not invoked, this parameter is also used to simulate
the exact chunk size (i.e., instead of actually calling dataSize, the script pretends
that it returned phase_1_estimated_chunk_size_mb).
""", metavar='phase_1_estimated_chunk_size_mb', dest='phase_1_estimated_chunk_size_kb',
type=lambda x: int(x) * 1024, default=64 * 1024 * 0.40)
argsParser.add_argument(
'--phase_1_perform_unsafe_merge',
help="""Applies only to Phase 1 and instructs the script to directly write the merged chunks
to the config.chunks collection rather than going through the `mergeChunks` command.""",
metavar='phase_1_perform_unsafe_merge', type=str, default='no', choices=[
'no', 'unsafe_direct_commit_against_configsvr',
'super_unsafe_direct_apply_ops_aginst_configsvr'
])
args = argsParser.parse_args()
loop = asyncio.get_event_loop()
loop.run_until_complete(main(args))
| #!/usr/bin/env python3
#
import argparse
import asyncio
import logging
import math
import pymongo
import sys
from common import Cluster, yes_no
from copy import deepcopy
from pymongo import errors as pymongo_errors
from tqdm import tqdm
# Ensure that the caller is using python 3
if (sys.version_info[0] < 3):
raise Exception("Must be using Python 3")
class ShardedCollection:
def __init__(self, cluster, ns):
self.cluster = cluster
self.name = ns
self.ns = {'db': self.name.split('.', 1)[0], 'coll': self.name.split('.', 1)[1]}
self._direct_config_connection = None
async def init(self):
collection_entry = await self.cluster.configDb.collections.find_one({'_id': self.name})
self.uuid = collection_entry['uuid']
self.shard_key_pattern = collection_entry['key']
async def data_size_kb_from_shard(self, range):
data_size_response = await self.cluster.client[self.ns['db']].command({
'dataSize': self.name,
'keyPattern': self.shard_key_pattern,
'min': range[0],
'max': range[1],
'estimate': True
}, codec_options=self.cluster.client.codec_options)
# Round up the data size of the chunk to the nearest kilobyte
return math.ceil(max(float(data_size_response['size']), 1024.0) / 1024.0)
async def move_chunk(self, chunk, to):
await self.cluster.adminDb.command({
'moveChunk': self.name,
'bounds': [chunk['min'], chunk['max']],
'to': to
}, codec_options=self.cluster.client.codec_options)
async def merge_chunks(self, consecutive_chunks, unsafe_mode):
assert (len(consecutive_chunks) > 1)
if unsafe_mode == 'no':
await self.cluster.adminDb.command({
'mergeChunks': self.name,
'bounds': [consecutive_chunks[0]['min'], consecutive_chunks[-1]['max']]
}, codec_options=self.cluster.client.codec_options)
elif unsafe_mode == 'unsafe_direct_commit_against_configsvr':
if not self._direct_config_connection:
self._direct_config_connection = await self.cluster.make_direct_config_server_connection(
)
# TODO: Implement the unsafe_direct_commit_against_configsvr option
raise NotImplementedError(
'The unsafe_direct_commit_against_configsvr option is not yet implemented')
elif unsafe_mode == 'super_unsafe_direct_apply_ops_aginst_configsvr':
first_chunk = deepcopy(consecutive_chunks[0])
first_chunk['max'] = consecutive_chunks[-1]['max']
# TODO: Bump first_chunk['version'] to the collection version
first_chunk.pop('history', None)
first_chunk_update = [{
'op': 'u',
'b': False, # No upsert
'ns': 'config.chunks',
'o': first_chunk,
'o2': {
'_id': first_chunk['_id']
},
}]
remaining_chunks_delete = list(
map(lambda x: {
'op': 'd',
'ns': 'config.chunks',
'o': {
'_id': x['_id']
},
}, consecutive_chunks[1:]))
precondition = [
# TODO: Include the precondition
]
apply_ops_cmd = {
'applyOps': first_chunk_update + remaining_chunks_delete,
'preCondition': precondition,
}
if not self._direct_config_connection:
self._direct_config_connection = await self.cluster.make_direct_config_server_connection(
)
await self._direct_config_connection.admin.command(
apply_ops_cmd, codec_options=self.cluster.client.codec_options)
async def try_write_chunk_size(self, range, expected_owning_shard, size_to_write_kb):
try:
update_result = await self.cluster.configDb.chunks.update_one({
'ns': self.name,
'min': range[0],
'max': range[1],
'shard': expected_owning_shard
}, {'$set': {
'defrag_collection_est_size': size_to_write_kb
}})
if update_result.matched_count != 1:
raise Exception(
f"Chunk [{range[0]}, {range[1]}] wasn't updated: {update_result.raw_result}")
except Exception as ex:
logging.warning(f'Error {ex} occurred while writing the chunk size')
async def clear_chunk_size_estimations(self):
update_result = await self.cluster.configDb.chunks.update_many(
{'ns': self.name}, {'$unset': {
'defrag_collection_est_size': ''
}})
return update_result.modified_count
async def main(args):
cluster = Cluster(args.uri, asyncio.get_event_loop())
await cluster.check_is_mongos(warn_only=args.dryrun)
coll = ShardedCollection(cluster, args.ns)
await coll.init()
num_chunks = await cluster.configDb.chunks.count_documents({'ns': coll.name})
print(
f"""Collection {coll.name} has a shardKeyPattern of {coll.shard_key_pattern} and {num_chunks} chunks.
For optimisation and for dry runs will assume a chunk size of {args.phase_1_estimated_chunk_size_kb} KB."""
)
###############################################################################################
# Sanity checks (Read-Only): Ensure that the balancer and auto-splitter are stopped and that the
# MaxChunkSize has been configured appropriately
#
balancer_doc = await cluster.configDb.settings.find_one({'_id': 'balancer'})
if not args.dryrun and (balancer_doc is None or balancer_doc['mode'] != 'off'):
raise Exception("""The balancer must be stopped before running this script. Please run:
sh.stopBalancer()""")
auto_splitter_doc = await cluster.configDb.settings.find_one({'_id': 'autosplit'})
if not args.dryrun and (auto_splitter_doc is None or auto_splitter_doc['enabled']):
raise Exception(
"""The auto-splitter must be disabled before running this script. Please run:
db.getSiblingDB('config').settings.update({_id:'autosplit'}, {$set: {enabled: false}}, {upsert: true})"""
)
chunk_size_doc = await cluster.configDb.settings.find_one({'_id': 'chunksize'})
if chunk_size_doc is None or chunk_size_doc['value'] < 128:
if not args.dryrun:
raise Exception(
"""The MaxChunkSize must be configured to at least 128 MB before running this script. Please run:
db.getSiblingDB('config').settings.update({_id:'chunksize'}, {$set: {value: 128}}, {upsert: true})"""
)
else:
target_chunk_size_kb = args.dryrun
else:
target_chunk_size_kb = chunk_size_doc['value'] * 1024
if args.dryrun:
print(f"""Performing a dry run with target chunk size of {target_chunk_size_kb} KB.
No actual modifications to the cluster will occur.""")
else:
yes_no(
f'The next steps will perform an actual merge with target chunk size of {target_chunk_size_kb} KB.'
)
if args.phase_1_reset_progress:
yes_no(f'Previous defragmentation progress will be reset.')
num_cleared = await coll.clear_chunk_size_estimations()
print(f'Cleared {num_cleared} already processed chunks.')
###############################################################################################
# Initialisation (Read-Only): Fetch all chunks in memory and calculate the collection version
# in preparation for the subsequent write phase.
###############################################################################################
shard_to_chunks = {}
collectionVersion = None
with tqdm(total=num_chunks, unit=' chunks') as progress:
async for c in cluster.configDb.chunks.find({'ns': coll.name}, sort=[('min',
pymongo.ASCENDING)]):
shard_id = c['shard']
if collectionVersion is None:
collectionVersion = c['lastmod']
if c['lastmod'] > collectionVersion:
collectionVersion = c['lastmod']
if shard_id not in shard_to_chunks:
shard_to_chunks[shard_id] = {'chunks': [], 'num_merges_performed': 0}
shard = shard_to_chunks[shard_id]
shard['chunks'].append(c)
progress.update()
print(
f'Collection version is {collectionVersion} and chunks are spread over {len(shard_to_chunks)} shards'
)
###############################################################################################
#
# WRITE PHASES START FROM HERE ONWARDS
#
###############################################################################################
###############################################################################################
# PHASE 1 (Merge-only): The purpose of this phase is to merge as many chunks as possible without
# actually moving any data. It is intended to achieve the maximum number of merged chunks with
# the minimum possible intrusion to the ongoing CRUD workload due to refresh stalls.
#
# The stage is also resumable, because for every chunk/chunk range that it processes, it will
# persist a field called 'defrag_collection_est_size' on the chunk, which estimates its size as
# of the time the script ran. Resuming Phase 1 will skip over any chunks which already contain
# this field, because it indicates that previous execution already ran and performed all the
# possible merges.
#
# These are the parameters that control the operation of this phase and their purpose is
# explaned below:
max_merges_on_shards_at_less_than_collection_version = 1
max_merges_on_shards_at_collection_version = 10
# The way Phase 1 (merge-only) operates is by running:
#
# (1) Up to `max_merges_on_shards_at_less_than_collection_version` concurrent mergeChunks
# across all shards which are below the collection major version
# AND
# (2) Up to `max_merges_on_shards_at_collection_version` concurrent mergeChunks across all
# shards which are already on the collection major version
#
# Merges due to (1) will bring the respective shard's major version to that of the collection,
# which unfortunately is interpreted by the routers as "something routing-related changed" and
# will result in refresh and a stall on the critical CRUD path. Because of this, the script only
# runs one at a time of these by default. On the other hand, merges due to (2) only increment
# the minor version and will not cause stalls on the CRUD path, so these can run with higher
# concurrency.
#
# The expectation is that at the end of this phase, not all possible defragmentation would have
# been achieved, but the number of chunks on the cluster would have been significantly reduced
# in a way that would make Phase 2 much less invasive due to refreshes after moveChunk.
#
# For example in a collection with 1 million chunks, a refresh due to moveChunk could be
# expected to take up to a second. However with the number of chunks reduced to 500,000 due to
# Phase 1, the refresh time would be on the order of ~100-200msec.
###############################################################################################
sem_at_less_than_collection_version = asyncio.Semaphore(
max_merges_on_shards_at_less_than_collection_version)
sem_at_collection_version = asyncio.Semaphore(max_merges_on_shards_at_collection_version)
async def merge_chunks_on_shard(shard, collection_version, progress):
shard_entry = shard_to_chunks[shard]
shard_chunks = shard_entry['chunks']
if len(shard_chunks) == 0:
return
chunk_at_shard_version = max(shard_chunks, key=lambda c: c['lastmod'])
shard_version = chunk_at_shard_version['lastmod']
shard_is_at_collection_version = shard_version.time == collection_version.time
progress.write(f'{shard}: {shard_version}: ', end='')
if shard_is_at_collection_version:
progress.write('Merge will start without major version bump')
else:
progress.write('Merge will start with a major version bump')
consecutive_chunks = []
estimated_size_of_consecutive_chunks = 0
num_lock_busy_errors_encountered = 0
def lookahead(iterable):
"""Pass through all values from the given iterable, augmented by the
information if there are more values to come after the current one
(True), or if it is the last value (False).
"""
# Get an iterator and pull the first value.
it = iter(iterable)
last = next(it)
# Run the iterator to exhaustion (starting from the second value).
for val in it:
# Report the *previous* value (more to come).
yield last, True
last = val
# Report the last value.
yield last, False
for c, has_more in lookahead(shard_chunks):
progress.update()
if len(consecutive_chunks) == 0:
consecutive_chunks = [c]
estimated_size_of_consecutive_chunks = args.phase_1_estimated_chunk_size_kb
if not args.dryrun and not has_more and not 'defrag_collection_est_size' in consecutive_chunks[
0]:
chunk_range = [consecutive_chunks[0]['min'], consecutive_chunks[0]['max']]
data_size_kb = await coll.data_size_kb_from_shard(chunk_range)
await coll.try_write_chunk_size(chunk_range, shard, data_size_kb)
continue
merge_consecutive_chunks_without_size_check = False
if consecutive_chunks[-1]['max'] == c['min']:
consecutive_chunks.append(c)
estimated_size_of_consecutive_chunks += args.phase_1_estimated_chunk_size_kb
elif len(consecutive_chunks) == 1:
if not args.dryrun and not 'defrag_collection_est_size' in consecutive_chunks[0]:
chunk_range = [consecutive_chunks[0]['min'], consecutive_chunks[0]['max']]
data_size_kb = await coll.data_size_kb_from_shard(chunk_range)
await coll.try_write_chunk_size(chunk_range, shard, data_size_kb)
consecutive_chunks = [c]
estimated_size_of_consecutive_chunks = args.phase_1_estimated_chunk_size_kb
if not args.dryrun and not has_more and not 'defrag_collection_est_size' in consecutive_chunks[0]:
chunk_range = [consecutive_chunks[0]['min'], consecutive_chunks[0]['max']]
data_size_kb = await coll.data_size_kb_from_shard(chunk_range)
await coll.try_write_chunk_size(chunk_range, shard, data_size_kb)
continue
else:
merge_consecutive_chunks_without_size_check = True
# To proceed to this stage we must have at least 2 consecutive chunks as candidates to
# be merged
assert (len(consecutive_chunks) > 1)
# After we have collected a run of chunks whose estimated size is 90% of the maximum
# chunk size, invoke `dataSize` in order to determine whether we can merge them or if
# we should continue adding more chunks to be merged
if (estimated_size_of_consecutive_chunks < target_chunk_size_kb * 0.90
) and not merge_consecutive_chunks_without_size_check and has_more:
continue
merge_bounds = [consecutive_chunks[0]['min'], consecutive_chunks[-1]['max']]
# Determine the "exact" (not 100% exact because we use the 'estimate' option) size of
# the currently accumulated bounds via the `dataSize` command in order to decide
# whether this run should be merged or if we should continue adding chunks to it.
actual_size_of_consecutive_chunks = estimated_size_of_consecutive_chunks
if not args.dryrun:
actual_size_of_consecutive_chunks = await coll.data_size_kb_from_shard(merge_bounds)
if merge_consecutive_chunks_without_size_check or not has_more:
pass
elif actual_size_of_consecutive_chunks < target_chunk_size_kb * 0.75:
# If the actual range size is sill 25% less than the target size, continue adding
# consecutive chunks
estimated_size_of_consecutive_chunks = actual_size_of_consecutive_chunks
continue
elif actual_size_of_consecutive_chunks > target_chunk_size_kb * 1.10:
# TODO: If the actual range size is 10% more than the target size, use `splitVector`
# to determine a better merge/split sequence so as not to generate huge chunks which
# will have to be split later on
pass
# Perform the actual merge, obeying the configured concurrency
async with (sem_at_collection_version
if shard_is_at_collection_version else sem_at_less_than_collection_version):
if not args.dryrun:
try:
await coll.merge_chunks(consecutive_chunks,
args.phase_1_perform_unsafe_merge)
await coll.try_write_chunk_size(merge_bounds, shard,
actual_size_of_consecutive_chunks)
except pymongo_errors.OperationFailure as ex:
if ex.details['code'] == 46: # The code for LockBusy
num_lock_busy_errors_encountered += 1
if num_lock_busy_errors_encountered == 1:
logging.warning(
f"""Lock error occurred while trying to merge chunk range {merge_bounds}.
This indicates the presence of an older MongoDB version.""")
else:
raise
else:
progress.write(
f'Merging {len(consecutive_chunks)} consecutive chunks on {shard}: {merge_bounds}'
)
# Reset the accumulator so far. If we are merging due to
# merge_consecutive_chunks_without_size_check, need to make sure that we don't forget
# the current entry since it is not part of the run
if merge_consecutive_chunks_without_size_check:
consecutive_chunks = [c]
estimated_size_of_consecutive_chunks = args.phase_1_estimated_chunk_size_kb
else:
consecutive_chunks = []
estimated_size_of_consecutive_chunks = 0
shard_entry['num_merges_performed'] += 1
shard_is_at_collection_version = True
with tqdm(total=num_chunks, unit=' chunks') as progress:
tasks = []
for s in shard_to_chunks:
tasks.append(
asyncio.ensure_future(merge_chunks_on_shard(s, collectionVersion, progress)))
await asyncio.gather(*tasks)
###############################################################################################
# PHASE 2 (Move-and-merge): The purpose of this phase is to move chunks, which are not
# contiguous on a shard (and couldn't be merged by Phase 1) to a shard where they could be
# further merged to adjacent chunks.
#
# This stage relies on the 'defrag_collection_est_size' fields written to every chunk from
# Phase 1 in order to calculate the most optimal move strategy.
#
async def get_chunk_size(ch):
if 'defrag_collection_est_size' in ch:
return ch['defrag_collection_est_size']
chunk_range = [ch['min'], ch['max']]
data_size_kb = await coll.data_size_kb_from_shard(chunk_range)
await coll.try_write_chunk_size(chunk_range, shard, data_size_kb)
return data_size_kb
async def move_merge_chunks_by_size(shard, idealNumChunks, progress):
# TODO move down
if args.dryrun:
return
num_chunks = await cluster.configDb.chunks.count_documents({'ns': coll.name, 'shard': shard})
async for c in cluster.configDb.chunks.find({'ns': coll.name, 'shard': shard}):
progress.update()
# Abort if we have too few chunks already
if num_chunks + 1 < idealNumChunks:
return
center_size = await get_chunk_size(c)
if center_size > target_chunk_size_kb:
continue
left_chunk = await cluster.configDb.chunks.find_one({'ns':coll.name, 'max': c['min']})
if not (left_chunk is None):
left_size = await get_chunk_size(left_chunk)
new_size = left_size + center_size
if new_size < target_chunk_size_kb * 2 and center_size <= left_size:
# TODO abort if target shard has too much data already
merge_bounds = [left_chunk['min'], c['max']]
progress.write(f'Moving chunk from {c["shard"]} to {left_chunk["shard"]}, merging {merge_bounds}, new size: {new_size}')
await coll.move_chunk(c, left_chunk['shard'])
await coll.merge_chunks([left_chunk, c], args.phase_1_perform_unsafe_merge)
await coll.try_write_chunk_size(merge_bounds, left_chunk['shard'], new_size)
num_chunks -= 1
continue
right_chunk = await cluster.configDb.chunks.find_one({'ns':coll.name, 'min': c['max']})
if not (right_chunk is None):
right_size = await get_chunk_size(left_chunk)
new_size = right_size + center_size
if new_size < target_chunk_size_kb * 2 and center_size <= right_size:
# TODO abort if target shard has too much data already
merge_bounds = [c['min'], right_chunk['max']]
progress.write(f'Moving chunk from {c["shard"]} to {right_chunk["shard"]}, merging {merge_bounds}, new size: {new_size}')
await coll.move_chunk(c, right_chunk['shard'])
await coll.merge_chunks([c, right_chunk], args.phase_1_perform_unsafe_merge)
await coll.try_write_chunk_size(merge_bounds, right_chunk['shard'], new_size)
num_chunks -= 1
continue
# Update chunk contents to latest version
with tqdm(total=num_chunks, unit=' chunks') as progress:
num_shards = await cluster.configDb.shards.count_documents({})
async for s in cluster.configDb.shards.find({}):
num_chunks = await cluster.configDb.chunks.count_documents({'ns': coll.name})
ideal_num_Chunks = num_chunks / num_shards
print(f"""Processing shard {s["_id"]}""")
await move_merge_chunks_by_size(s["_id"], ideal_num_Chunks, progress)
if __name__ == "__main__":
argsParser = argparse.ArgumentParser(
description=
"""Tool to defragment a sharded cluster in a way which minimises the rate at which the major
shard version gets bumped in order to minimise the amount of stalls due to refresh.""")
argsParser.add_argument(
'uri', help='URI of the mongos to connect to in the mongodb://[user:password@]host format',
metavar='uri', type=str)
argsParser.add_argument(
'--dryrun', help=
"""Indicates whether the script should perform actual durable changes to the cluster or just
print the commands which will be executed. If specified, it needs to be passed a value
(in MB) which indicates the target chunk size to be used for the simulation in case the
cluster doesn't have the chunkSize setting enabled. Since some phases of the script
depend on certain state of the cluster to have been reached by previous phases, if this
mode is selected, the script will stop early.""", metavar='target_chunk_size',
type=lambda x: int(x) * 1024, required=False)
argsParser.add_argument('--ns', help="""The namespace on which to perform defragmentation""",
metavar='ns', type=str, required=True)
argsParser.add_argument(
'--phase_1_reset_progress',
help="""Applies only to Phase 1 and instructs the script to clear the chunk size estimation
and merge progress which may have been made by an earlier invocation""",
action='store_true')
argsParser.add_argument(
'--phase_1_estimated_chunk_size_mb',
help="""Applies only to Phase 1 and specifies the amount of data to estimate per chunk
(in MB) before invoking dataSize in order to obtain the exact size. This value is just an
optimisation under Phase 1 order to collect as large of a candidate range to merge as
possible before invoking dataSize on the entire candidate range. Otherwise, the script
would be invoking dataSize for every single chunk and blocking for the results, which
would reduce its parallelism.
The default is chosen as 40%% of 64MB, which states that we project that under the
current 64MB chunkSize default and the way the auto-splitter operates, the collection's
chunks are only about 40%% full.
For dry-runs, because dataSize is not invoked, this parameter is also used to simulate
the exact chunk size (i.e., instead of actually calling dataSize, the script pretends
that it returned phase_1_estimated_chunk_size_mb).
""", metavar='phase_1_estimated_chunk_size_mb', dest='phase_1_estimated_chunk_size_kb',
type=lambda x: int(x) * 1024, default=64 * 1024 * 0.40)
argsParser.add_argument(
'--phase_1_perform_unsafe_merge',
help="""Applies only to Phase 1 and instructs the script to directly write the merged chunks
to the config.chunks collection rather than going through the `mergeChunks` command.""",
metavar='phase_1_perform_unsafe_merge', type=str, default='no', choices=[
'no', 'unsafe_direct_commit_against_configsvr',
'super_unsafe_direct_apply_ops_aginst_configsvr'
])
args = argsParser.parse_args()
loop = asyncio.get_event_loop()
loop.run_until_complete(main(args)) | en | 0.829323 | #!/usr/bin/env python3 # # Ensure that the caller is using python 3 # Round up the data size of the chunk to the nearest kilobyte # TODO: Implement the unsafe_direct_commit_against_configsvr option # TODO: Bump first_chunk['version'] to the collection version # No upsert # TODO: Include the precondition Collection {coll.name} has a shardKeyPattern of {coll.shard_key_pattern} and {num_chunks} chunks. For optimisation and for dry runs will assume a chunk size of {args.phase_1_estimated_chunk_size_kb} KB. ############################################################################################### # Sanity checks (Read-Only): Ensure that the balancer and auto-splitter are stopped and that the # MaxChunkSize has been configured appropriately # The balancer must be stopped before running this script. Please run: sh.stopBalancer() The auto-splitter must be disabled before running this script. Please run: db.getSiblingDB('config').settings.update({_id:'autosplit'}, {$set: {enabled: false}}, {upsert: true}) The MaxChunkSize must be configured to at least 128 MB before running this script. Please run: db.getSiblingDB('config').settings.update({_id:'chunksize'}, {$set: {value: 128}}, {upsert: true}) Performing a dry run with target chunk size of {target_chunk_size_kb} KB. No actual modifications to the cluster will occur. ############################################################################################### # Initialisation (Read-Only): Fetch all chunks in memory and calculate the collection version # in preparation for the subsequent write phase. ############################################################################################### ############################################################################################### # # WRITE PHASES START FROM HERE ONWARDS # ############################################################################################### ############################################################################################### # PHASE 1 (Merge-only): The purpose of this phase is to merge as many chunks as possible without # actually moving any data. It is intended to achieve the maximum number of merged chunks with # the minimum possible intrusion to the ongoing CRUD workload due to refresh stalls. # # The stage is also resumable, because for every chunk/chunk range that it processes, it will # persist a field called 'defrag_collection_est_size' on the chunk, which estimates its size as # of the time the script ran. Resuming Phase 1 will skip over any chunks which already contain # this field, because it indicates that previous execution already ran and performed all the # possible merges. # # These are the parameters that control the operation of this phase and their purpose is # explaned below: # The way Phase 1 (merge-only) operates is by running: # # (1) Up to `max_merges_on_shards_at_less_than_collection_version` concurrent mergeChunks # across all shards which are below the collection major version # AND # (2) Up to `max_merges_on_shards_at_collection_version` concurrent mergeChunks across all # shards which are already on the collection major version # # Merges due to (1) will bring the respective shard's major version to that of the collection, # which unfortunately is interpreted by the routers as "something routing-related changed" and # will result in refresh and a stall on the critical CRUD path. Because of this, the script only # runs one at a time of these by default. On the other hand, merges due to (2) only increment # the minor version and will not cause stalls on the CRUD path, so these can run with higher # concurrency. # # The expectation is that at the end of this phase, not all possible defragmentation would have # been achieved, but the number of chunks on the cluster would have been significantly reduced # in a way that would make Phase 2 much less invasive due to refreshes after moveChunk. # # For example in a collection with 1 million chunks, a refresh due to moveChunk could be # expected to take up to a second. However with the number of chunks reduced to 500,000 due to # Phase 1, the refresh time would be on the order of ~100-200msec. ############################################################################################### Pass through all values from the given iterable, augmented by the information if there are more values to come after the current one (True), or if it is the last value (False). # Get an iterator and pull the first value. # Run the iterator to exhaustion (starting from the second value). # Report the *previous* value (more to come). # Report the last value. # To proceed to this stage we must have at least 2 consecutive chunks as candidates to # be merged # After we have collected a run of chunks whose estimated size is 90% of the maximum # chunk size, invoke `dataSize` in order to determine whether we can merge them or if # we should continue adding more chunks to be merged # Determine the "exact" (not 100% exact because we use the 'estimate' option) size of # the currently accumulated bounds via the `dataSize` command in order to decide # whether this run should be merged or if we should continue adding chunks to it. # If the actual range size is sill 25% less than the target size, continue adding # consecutive chunks # TODO: If the actual range size is 10% more than the target size, use `splitVector` # to determine a better merge/split sequence so as not to generate huge chunks which # will have to be split later on # Perform the actual merge, obeying the configured concurrency # The code for LockBusy Lock error occurred while trying to merge chunk range {merge_bounds}. This indicates the presence of an older MongoDB version. # Reset the accumulator so far. If we are merging due to # merge_consecutive_chunks_without_size_check, need to make sure that we don't forget # the current entry since it is not part of the run ############################################################################################### # PHASE 2 (Move-and-merge): The purpose of this phase is to move chunks, which are not # contiguous on a shard (and couldn't be merged by Phase 1) to a shard where they could be # further merged to adjacent chunks. # # This stage relies on the 'defrag_collection_est_size' fields written to every chunk from # Phase 1 in order to calculate the most optimal move strategy. # # TODO move down # Abort if we have too few chunks already # TODO abort if target shard has too much data already # TODO abort if target shard has too much data already # Update chunk contents to latest version Processing shard {s["_id"]} Tool to defragment a sharded cluster in a way which minimises the rate at which the major shard version gets bumped in order to minimise the amount of stalls due to refresh. Indicates whether the script should perform actual durable changes to the cluster or just print the commands which will be executed. If specified, it needs to be passed a value (in MB) which indicates the target chunk size to be used for the simulation in case the cluster doesn't have the chunkSize setting enabled. Since some phases of the script depend on certain state of the cluster to have been reached by previous phases, if this mode is selected, the script will stop early. The namespace on which to perform defragmentation Applies only to Phase 1 and instructs the script to clear the chunk size estimation and merge progress which may have been made by an earlier invocation Applies only to Phase 1 and specifies the amount of data to estimate per chunk (in MB) before invoking dataSize in order to obtain the exact size. This value is just an optimisation under Phase 1 order to collect as large of a candidate range to merge as possible before invoking dataSize on the entire candidate range. Otherwise, the script would be invoking dataSize for every single chunk and blocking for the results, which would reduce its parallelism. The default is chosen as 40%% of 64MB, which states that we project that under the current 64MB chunkSize default and the way the auto-splitter operates, the collection's chunks are only about 40%% full. For dry-runs, because dataSize is not invoked, this parameter is also used to simulate the exact chunk size (i.e., instead of actually calling dataSize, the script pretends that it returned phase_1_estimated_chunk_size_mb). Applies only to Phase 1 and instructs the script to directly write the merged chunks to the config.chunks collection rather than going through the `mergeChunks` command. | 2.104757 | 2 |
refreshDB.py | sartho/GreenAnt | 0 | 6612984 | from Myna import db
from Myna.models import User
u=User.query.filter_by(username='admin').first()
print (u.id)
db.session.delete(u)
db.session.commit() | from Myna import db
from Myna.models import User
u=User.query.filter_by(username='admin').first()
print (u.id)
db.session.delete(u)
db.session.commit() | none | 1 | 2.183167 | 2 | |
cogs/roles.py | Drowrin/Weeabot | 5 | 6612985 | from collections import defaultdict
import discord
from discord.ext import commands
import checks
from cogs.requestsystem import request
from Weeabot import Weeabot
class Roles:
def __init__(self, bot: Weeabot):
self.bot = bot
async def check_config(self, ctx):
if ctx.message.server.id not in self.bot.server_configs:
self.bot.server_configs[ctx.message.server.id] = {}
if 'hidden_channels' not in self.bot.server_configs[ctx.message.server.id]:
self.bot.server_configs[ctx.message.server.id]['hidden_channels'] = {}
async def get_roles_list(self, ctx):
await self.check_config(ctx)
await self.update_roles(ctx)
roles_list = defaultdict(list)
for chan, r in self.bot.server_configs[ctx.message.server.id]["hidden_channels"].items():
chann = ctx.message.server.get_channel(chan)
for role in r:
roles_list[role].append(chann)
return roles_list
async def update_roles(self, ctx):
for chan_id, r in self.bot.server_configs[ctx.message.server.id]['hidden_channels'].items():
rs = [t[0].id for t in ctx.message.server.get_channel(chan_id).overwrites if t[1].read_messages]
self.bot.server_configs[ctx.message.server.id]['hidden_channels'][chan_id] = rs
self.bot.dump_server_configs()
@commands.command(pass_context=True)
@checks.is_server_owner()
async def hide(self, ctx):
await self.check_config(ctx)
await self.bot.edit_channel_permissions(
channel=ctx.message.channel,
target=ctx.message.server.default_role,
overwrite=discord.PermissionOverwrite(read_messages=False)
)
self.bot.server_configs[ctx.message.server.id]['hidden_channels'][ctx.message.channel.id] = []
await self.update_roles(ctx)
@commands.command(pass_context=True)
@checks.is_server_owner()
async def unhide(self, ctx):
await self.check_config(ctx)
for t in ctx.message.channel.overwrites:
await self.bot.delete_channel_permissions(
channel=ctx.message.channel,
target=t[0]
)
del self.bot.server_configs[ctx.message.server.id]['hidden_channels'][ctx.message.channel.id]
await self.update_roles(ctx)
@commands.command(pass_context=True)
@request()
@checks.is_server_owner()
async def make_channel(self, ctx, channel_name, role_name):
await self.check_config(ctx)
try:
everyone_perms = discord.PermissionOverwrite(read_messages=False)
everyone = discord.ChannelPermissions(target=ctx.message.server.default_role, overwrite=everyone_perms)
can_read = discord.PermissionOverwrite(read_messages=True)
new_role = await self.bot.create_role(ctx.message.server, name=role_name)
channel = await self.bot.create_channel(ctx.message.server, channel_name, everyone, (new_role, can_read))
await self.bot.add_roles(ctx.message.author, new_role)
self.bot.server_configs[ctx.message.server.id]['hidden_channels'][channel.id] = [new_role.id]
except discord.errors.HTTPException:
await self.bot.say("Invalid name or that name is taken. Names must be alphanumeric.")
@commands.command(pass_context=True)
async def roles(self, ctx):
roles = await self.get_roles_list(ctx)
e: discord.Embed = discord.Embed()
for role, channels in roles.items():
try:
role_name = commands.RoleConverter(ctx, role).convert().name
message = '\n'.join([f'__{channel.name}__\n\t{channel.topic}' for channel in channels])
e.add_field(name=role_name, value=message, inline=False)
except commands.BadArgument:
pass
await self.bot.say('**Opt-in Roles**', embed=e)
@commands.command(pass_context=True)
async def makeme(self, ctx, *, role: discord.Role):
roles = await self.get_roles_list(ctx)
if role.id not in roles:
await self.bot.say("Sorry, that role isn't an opt-in role.")
return
await self.bot.add_roles(ctx.message.author, role)
def setup(bot):
bot.add_cog(Roles(bot))
| from collections import defaultdict
import discord
from discord.ext import commands
import checks
from cogs.requestsystem import request
from Weeabot import Weeabot
class Roles:
def __init__(self, bot: Weeabot):
self.bot = bot
async def check_config(self, ctx):
if ctx.message.server.id not in self.bot.server_configs:
self.bot.server_configs[ctx.message.server.id] = {}
if 'hidden_channels' not in self.bot.server_configs[ctx.message.server.id]:
self.bot.server_configs[ctx.message.server.id]['hidden_channels'] = {}
async def get_roles_list(self, ctx):
await self.check_config(ctx)
await self.update_roles(ctx)
roles_list = defaultdict(list)
for chan, r in self.bot.server_configs[ctx.message.server.id]["hidden_channels"].items():
chann = ctx.message.server.get_channel(chan)
for role in r:
roles_list[role].append(chann)
return roles_list
async def update_roles(self, ctx):
for chan_id, r in self.bot.server_configs[ctx.message.server.id]['hidden_channels'].items():
rs = [t[0].id for t in ctx.message.server.get_channel(chan_id).overwrites if t[1].read_messages]
self.bot.server_configs[ctx.message.server.id]['hidden_channels'][chan_id] = rs
self.bot.dump_server_configs()
@commands.command(pass_context=True)
@checks.is_server_owner()
async def hide(self, ctx):
await self.check_config(ctx)
await self.bot.edit_channel_permissions(
channel=ctx.message.channel,
target=ctx.message.server.default_role,
overwrite=discord.PermissionOverwrite(read_messages=False)
)
self.bot.server_configs[ctx.message.server.id]['hidden_channels'][ctx.message.channel.id] = []
await self.update_roles(ctx)
@commands.command(pass_context=True)
@checks.is_server_owner()
async def unhide(self, ctx):
await self.check_config(ctx)
for t in ctx.message.channel.overwrites:
await self.bot.delete_channel_permissions(
channel=ctx.message.channel,
target=t[0]
)
del self.bot.server_configs[ctx.message.server.id]['hidden_channels'][ctx.message.channel.id]
await self.update_roles(ctx)
@commands.command(pass_context=True)
@request()
@checks.is_server_owner()
async def make_channel(self, ctx, channel_name, role_name):
await self.check_config(ctx)
try:
everyone_perms = discord.PermissionOverwrite(read_messages=False)
everyone = discord.ChannelPermissions(target=ctx.message.server.default_role, overwrite=everyone_perms)
can_read = discord.PermissionOverwrite(read_messages=True)
new_role = await self.bot.create_role(ctx.message.server, name=role_name)
channel = await self.bot.create_channel(ctx.message.server, channel_name, everyone, (new_role, can_read))
await self.bot.add_roles(ctx.message.author, new_role)
self.bot.server_configs[ctx.message.server.id]['hidden_channels'][channel.id] = [new_role.id]
except discord.errors.HTTPException:
await self.bot.say("Invalid name or that name is taken. Names must be alphanumeric.")
@commands.command(pass_context=True)
async def roles(self, ctx):
roles = await self.get_roles_list(ctx)
e: discord.Embed = discord.Embed()
for role, channels in roles.items():
try:
role_name = commands.RoleConverter(ctx, role).convert().name
message = '\n'.join([f'__{channel.name}__\n\t{channel.topic}' for channel in channels])
e.add_field(name=role_name, value=message, inline=False)
except commands.BadArgument:
pass
await self.bot.say('**Opt-in Roles**', embed=e)
@commands.command(pass_context=True)
async def makeme(self, ctx, *, role: discord.Role):
roles = await self.get_roles_list(ctx)
if role.id not in roles:
await self.bot.say("Sorry, that role isn't an opt-in role.")
return
await self.bot.add_roles(ctx.message.author, role)
def setup(bot):
bot.add_cog(Roles(bot))
| none | 1 | 2.326673 | 2 | |
google-cloud-sdk/lib/googlecloudsdk/command_lib/meta/cache_util.py | bopopescu/searchparty | 0 | 6612986 | <reponame>bopopescu/searchparty
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The meta cache command library support."""
from googlecloudsdk.api_lib.util import apis_util
from googlecloudsdk.calliope import parser_completer
from googlecloudsdk.calliope import walker
from googlecloudsdk.core import exceptions
from googlecloudsdk.core import module_util
from googlecloudsdk.core import resources
from googlecloudsdk.core.cache import exceptions as cache_exceptions
from googlecloudsdk.core.cache import file_cache
from googlecloudsdk.core.cache import resource_cache
_CACHE_RI_DEFAULT = 'resource://'
class Error(exceptions.Error):
"""Base cache exception."""
class NoTablesMatched(Error):
"""No table names matched the patterns."""
def GetCache(name, create=False):
"""Returns the cache given a cache indentfier name.
Args:
name: The cache name to operate on. May be prefixed by "resource://" for
resource cache names or "file://" for persistent file cache names. If
only the prefix is specified then the default cache name for that prefix
is used.
create: Creates the persistent cache if it exists if True.
Raises:
CacheNotFound: If the cache does not exist.
Returns:
The cache object.
"""
types = {
'file': file_cache.Cache,
'resource': resource_cache.ResourceCache,
}
def _OpenCache(cache_class, name):
try:
return cache_class(name, create=create)
except cache_exceptions.Error as e:
raise Error(e)
if name:
for cache_id, cache_class in types.iteritems():
if name.startswith(cache_id + '://'):
name = name[len(cache_id) + 3:]
if not name:
name = None
return _OpenCache(cache_class, name)
return _OpenCache(resource_cache.Cache, name)
def AddCacheFlag(parser):
"""Adds the persistent cache flag to the parser."""
parser.add_argument(
'--cache',
metavar='CACHE_NAME',
default='resource://',
help=('The cache name to operate on. May be prefixed by '
'"resource://" for resource cache names. If only the prefix is '
'specified then the default cache name for that prefix is used.'))
class _CompleterModule(object):
def __init__(self, module_path, collection, api_version):
self.module_path = module_path
self.collection = collection
self.api_version = api_version
self.attachments = []
self._attachments_dict = {}
class _CompleterAttachment(object):
def __init__(self, command):
self.command = command
self.arguments = []
class _CompleterModuleGenerator(walker.Walker):
"""Constructs a CLI command dict tree."""
def __init__(self, cli):
super(_CompleterModuleGenerator, self).__init__(cli)
self._modules_dict = {}
def Visit(self, command, parent, is_group):
"""Visits each command in the CLI command tree to construct the module list.
Args:
command: group/command CommandCommon info.
parent: The parent Visit() return value, None at the top level.
is_group: True if command is a group, otherwise its is a command.
Returns:
The subtree module list.
"""
args = command.ai
for arg in sorted(args.flag_args + args.positional_args):
try:
completer_class = arg.completer
except AttributeError:
continue
collection = None
api_version = None
if isinstance(completer_class, parser_completer.ArgumentCompleter):
completer_class = completer_class.completer_class
module_path = module_util.GetModulePath(completer_class)
if isinstance(completer_class, type):
try:
completer = completer_class()
try:
collection = completer.collection
except AttributeError:
pass
try:
api_version = completer.api_version
except AttributeError:
pass
except (apis_util.UnknownAPIError,
resources.InvalidCollectionException) as e:
collection = u'ERROR: {}'.format(e)
if arg.option_strings:
name = arg.option_strings[0]
else:
name = arg.dest.replace('_', '-')
module = self._modules_dict.get(module_path)
if not module:
module = _CompleterModule(
collection=collection,
api_version=api_version,
module_path=module_path,
)
self._modules_dict[module_path] = module
command_path = ' '.join(command.GetPath())
# pylint: disable=protected-access
attachment = module._attachments_dict.get(command_path)
if not attachment:
attachment = _CompleterAttachment(command_path)
module._attachments_dict[command_path] = attachment
module.attachments.append(attachment)
attachment.arguments.append(name)
return self._modules_dict
def ListAttachedCompleters(cli):
"""Returns the list of all attached CompleterModule objects in cli."""
return _CompleterModuleGenerator(cli).Walk().values()
| # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The meta cache command library support."""
from googlecloudsdk.api_lib.util import apis_util
from googlecloudsdk.calliope import parser_completer
from googlecloudsdk.calliope import walker
from googlecloudsdk.core import exceptions
from googlecloudsdk.core import module_util
from googlecloudsdk.core import resources
from googlecloudsdk.core.cache import exceptions as cache_exceptions
from googlecloudsdk.core.cache import file_cache
from googlecloudsdk.core.cache import resource_cache
_CACHE_RI_DEFAULT = 'resource://'
class Error(exceptions.Error):
"""Base cache exception."""
class NoTablesMatched(Error):
"""No table names matched the patterns."""
def GetCache(name, create=False):
"""Returns the cache given a cache indentfier name.
Args:
name: The cache name to operate on. May be prefixed by "resource://" for
resource cache names or "file://" for persistent file cache names. If
only the prefix is specified then the default cache name for that prefix
is used.
create: Creates the persistent cache if it exists if True.
Raises:
CacheNotFound: If the cache does not exist.
Returns:
The cache object.
"""
types = {
'file': file_cache.Cache,
'resource': resource_cache.ResourceCache,
}
def _OpenCache(cache_class, name):
try:
return cache_class(name, create=create)
except cache_exceptions.Error as e:
raise Error(e)
if name:
for cache_id, cache_class in types.iteritems():
if name.startswith(cache_id + '://'):
name = name[len(cache_id) + 3:]
if not name:
name = None
return _OpenCache(cache_class, name)
return _OpenCache(resource_cache.Cache, name)
def AddCacheFlag(parser):
"""Adds the persistent cache flag to the parser."""
parser.add_argument(
'--cache',
metavar='CACHE_NAME',
default='resource://',
help=('The cache name to operate on. May be prefixed by '
'"resource://" for resource cache names. If only the prefix is '
'specified then the default cache name for that prefix is used.'))
class _CompleterModule(object):
def __init__(self, module_path, collection, api_version):
self.module_path = module_path
self.collection = collection
self.api_version = api_version
self.attachments = []
self._attachments_dict = {}
class _CompleterAttachment(object):
def __init__(self, command):
self.command = command
self.arguments = []
class _CompleterModuleGenerator(walker.Walker):
"""Constructs a CLI command dict tree."""
def __init__(self, cli):
super(_CompleterModuleGenerator, self).__init__(cli)
self._modules_dict = {}
def Visit(self, command, parent, is_group):
"""Visits each command in the CLI command tree to construct the module list.
Args:
command: group/command CommandCommon info.
parent: The parent Visit() return value, None at the top level.
is_group: True if command is a group, otherwise its is a command.
Returns:
The subtree module list.
"""
args = command.ai
for arg in sorted(args.flag_args + args.positional_args):
try:
completer_class = arg.completer
except AttributeError:
continue
collection = None
api_version = None
if isinstance(completer_class, parser_completer.ArgumentCompleter):
completer_class = completer_class.completer_class
module_path = module_util.GetModulePath(completer_class)
if isinstance(completer_class, type):
try:
completer = completer_class()
try:
collection = completer.collection
except AttributeError:
pass
try:
api_version = completer.api_version
except AttributeError:
pass
except (apis_util.UnknownAPIError,
resources.InvalidCollectionException) as e:
collection = u'ERROR: {}'.format(e)
if arg.option_strings:
name = arg.option_strings[0]
else:
name = arg.dest.replace('_', '-')
module = self._modules_dict.get(module_path)
if not module:
module = _CompleterModule(
collection=collection,
api_version=api_version,
module_path=module_path,
)
self._modules_dict[module_path] = module
command_path = ' '.join(command.GetPath())
# pylint: disable=protected-access
attachment = module._attachments_dict.get(command_path)
if not attachment:
attachment = _CompleterAttachment(command_path)
module._attachments_dict[command_path] = attachment
module.attachments.append(attachment)
attachment.arguments.append(name)
return self._modules_dict
def ListAttachedCompleters(cli):
"""Returns the list of all attached CompleterModule objects in cli."""
return _CompleterModuleGenerator(cli).Walk().values() | en | 0.737321 | # Copyright 2017 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. The meta cache command library support. Base cache exception. No table names matched the patterns. Returns the cache given a cache indentfier name. Args: name: The cache name to operate on. May be prefixed by "resource://" for resource cache names or "file://" for persistent file cache names. If only the prefix is specified then the default cache name for that prefix is used. create: Creates the persistent cache if it exists if True. Raises: CacheNotFound: If the cache does not exist. Returns: The cache object. Adds the persistent cache flag to the parser. Constructs a CLI command dict tree. Visits each command in the CLI command tree to construct the module list. Args: command: group/command CommandCommon info. parent: The parent Visit() return value, None at the top level. is_group: True if command is a group, otherwise its is a command. Returns: The subtree module list. # pylint: disable=protected-access Returns the list of all attached CompleterModule objects in cli. | 2.000838 | 2 |
thehivebackup/backup.py | IFX-CDC/thehivebackup | 3 | 6612987 | import datetime
import json
import os
from multiprocessing import Pool
import urllib3
from thehive4py.api import TheHiveApi
from thehive4py.query import Between
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
class Backupper:
def __init__(self, backupdir: str, url: str, api_key: str, org: str = None, verify: bool = True):
url = url.strip("/")
self.api = TheHiveApi(url, api_key, organisation=org)
if not verify:
self.api.cert = False
self.backupdir = f'{backupdir}-{int(datetime.datetime.utcnow().timestamp())}'
os.makedirs(self.backupdir, exist_ok=True)
self.case_file = os.path.join(self.backupdir, 'cases.jsonl')
self.alert_file = os.path.join(self.backupdir, 'alerts.jsonl')
def get_file(self, attachment_id: str):
os.makedirs(os.path.join(self.backupdir, 'attachments'), exist_ok=True)
response = self.api.download_attachment(attachment_id)
with open(os.path.join(self.backupdir, 'attachments', attachment_id), 'wb') as io:
io.write(response.content)
def backup_cases_all(self) -> [dict]:
cases = self.api.find_cases(query={}, sort=['-createdAt'], range='all').json()
self._backup_cases(cases)
def backup_cases_range(self, start, end) -> [dict]:
query = Between("createdAt", start, end)
cases = self.api.find_cases(query=query, sort=['-createdAt'], range='all').json()
self._backup_cases(cases)
def _backup_cases(self, cases: [dict]):
with open(self.case_file, 'w+', encoding='utf8') as io:
with Pool(processes=8) as pool:
for case in cases:
json.dump(case, io)
io.write('\n')
pool.map(self._backup_case, cases)
pool.close()
pool.join()
def _backup_case(self, case: dict):
self.backup_observables(case['id'])
self.backup_tasks(case['id'])
def backup_tasks(self, case_id: str) -> [dict]:
tasks = self.api.get_case_tasks(case_id=case_id).json()
if tasks:
case_path = os.path.join(self.backupdir, 'cases', case_id)
os.makedirs(case_path, exist_ok=True)
with open(os.path.join(case_path, 'tasks.jsonl'), 'w+', encoding='utf8') as io:
for task in tasks:
json.dump(task, io)
io.write('\n')
self.backup_logs(case_id, task['id'])
def backup_logs(self, case_id: str, task_id: str) -> [dict]:
logs = self.api.get_task_logs(task_id).json()
if logs:
task_path = os.path.join(self.backupdir, 'cases', case_id, 'tasks', task_id)
os.makedirs(task_path, exist_ok=True)
with open(os.path.join(task_path, 'logs.jsonl'), 'w+', encoding='utf8') as io:
for log in logs:
json.dump(log, io)
io.write('\n')
if 'attachment' in log:
self.get_file(log['attachment']['id'])
def backup_observables(self, case_id: str):
observables = self.api.get_case_observables(case_id).json()
if observables:
os.makedirs(os.path.join(self.backupdir, 'cases', case_id), exist_ok=True)
with open(os.path.join(self.backupdir, 'cases', case_id, 'observables.jsonl'), 'w+', encoding='utf8') as io:
for observable in observables:
json.dump(observable, io)
io.write('\n')
if 'attachment' in observable:
self.get_file(observable['attachment']['id'])
def backup_alerts_all(self):
alerts = self.api.find_alerts(query={}, sort=['-createdAt'], range='all').json()
self._backup_alerts(alerts)
def backup_alerts_range(self, start, end):
query = Between("createdAt", start, end)
alerts = self.api.find_alerts(query=query, sort=['-createdAt'], range='all').json()
self._backup_alerts(alerts)
def _backup_alerts(self, alerts):
with open(self.alert_file, 'w+', encoding='utf8') as io:
for alert in alerts:
json.dump(alert, io)
io.write('\n')
| import datetime
import json
import os
from multiprocessing import Pool
import urllib3
from thehive4py.api import TheHiveApi
from thehive4py.query import Between
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
class Backupper:
def __init__(self, backupdir: str, url: str, api_key: str, org: str = None, verify: bool = True):
url = url.strip("/")
self.api = TheHiveApi(url, api_key, organisation=org)
if not verify:
self.api.cert = False
self.backupdir = f'{backupdir}-{int(datetime.datetime.utcnow().timestamp())}'
os.makedirs(self.backupdir, exist_ok=True)
self.case_file = os.path.join(self.backupdir, 'cases.jsonl')
self.alert_file = os.path.join(self.backupdir, 'alerts.jsonl')
def get_file(self, attachment_id: str):
os.makedirs(os.path.join(self.backupdir, 'attachments'), exist_ok=True)
response = self.api.download_attachment(attachment_id)
with open(os.path.join(self.backupdir, 'attachments', attachment_id), 'wb') as io:
io.write(response.content)
def backup_cases_all(self) -> [dict]:
cases = self.api.find_cases(query={}, sort=['-createdAt'], range='all').json()
self._backup_cases(cases)
def backup_cases_range(self, start, end) -> [dict]:
query = Between("createdAt", start, end)
cases = self.api.find_cases(query=query, sort=['-createdAt'], range='all').json()
self._backup_cases(cases)
def _backup_cases(self, cases: [dict]):
with open(self.case_file, 'w+', encoding='utf8') as io:
with Pool(processes=8) as pool:
for case in cases:
json.dump(case, io)
io.write('\n')
pool.map(self._backup_case, cases)
pool.close()
pool.join()
def _backup_case(self, case: dict):
self.backup_observables(case['id'])
self.backup_tasks(case['id'])
def backup_tasks(self, case_id: str) -> [dict]:
tasks = self.api.get_case_tasks(case_id=case_id).json()
if tasks:
case_path = os.path.join(self.backupdir, 'cases', case_id)
os.makedirs(case_path, exist_ok=True)
with open(os.path.join(case_path, 'tasks.jsonl'), 'w+', encoding='utf8') as io:
for task in tasks:
json.dump(task, io)
io.write('\n')
self.backup_logs(case_id, task['id'])
def backup_logs(self, case_id: str, task_id: str) -> [dict]:
logs = self.api.get_task_logs(task_id).json()
if logs:
task_path = os.path.join(self.backupdir, 'cases', case_id, 'tasks', task_id)
os.makedirs(task_path, exist_ok=True)
with open(os.path.join(task_path, 'logs.jsonl'), 'w+', encoding='utf8') as io:
for log in logs:
json.dump(log, io)
io.write('\n')
if 'attachment' in log:
self.get_file(log['attachment']['id'])
def backup_observables(self, case_id: str):
observables = self.api.get_case_observables(case_id).json()
if observables:
os.makedirs(os.path.join(self.backupdir, 'cases', case_id), exist_ok=True)
with open(os.path.join(self.backupdir, 'cases', case_id, 'observables.jsonl'), 'w+', encoding='utf8') as io:
for observable in observables:
json.dump(observable, io)
io.write('\n')
if 'attachment' in observable:
self.get_file(observable['attachment']['id'])
def backup_alerts_all(self):
alerts = self.api.find_alerts(query={}, sort=['-createdAt'], range='all').json()
self._backup_alerts(alerts)
def backup_alerts_range(self, start, end):
query = Between("createdAt", start, end)
alerts = self.api.find_alerts(query=query, sort=['-createdAt'], range='all').json()
self._backup_alerts(alerts)
def _backup_alerts(self, alerts):
with open(self.alert_file, 'w+', encoding='utf8') as io:
for alert in alerts:
json.dump(alert, io)
io.write('\n')
| none | 1 | 2.455505 | 2 | |
Creating a Node.py | SimranSwain/Days_of_Code | 0 | 6612988 | class Node:
def __init__(self,data):
self.data = data
self.next = None
x = Node(10) #it will print data element of x node but will not print the adress field
y = Node(20)
print(x)
print(y)
x.next = y
print(x.next)
print(y.data)
print(y.next)
print(x.data)
print(x.next.data) #it is pointing to the data of next node
#print(y.next.data) #it is pointing to the data of next node y.next is none here..so it will give error
x.data = 50
print(x.data)
| class Node:
def __init__(self,data):
self.data = data
self.next = None
x = Node(10) #it will print data element of x node but will not print the adress field
y = Node(20)
print(x)
print(y)
x.next = y
print(x.next)
print(y.data)
print(y.next)
print(x.data)
print(x.next.data) #it is pointing to the data of next node
#print(y.next.data) #it is pointing to the data of next node y.next is none here..so it will give error
x.data = 50
print(x.data)
| en | 0.755727 | #it will print data element of x node but will not print the adress field #it is pointing to the data of next node #print(y.next.data) #it is pointing to the data of next node y.next is none here..so it will give error | 3.772895 | 4 |
__init__.py | roman-sitewits/fry-skill | 0 | 6612989 | <filename>__init__.py<gh_stars>0
from mycroft import MycroftSkill, intent_file_handler
class Fry(MycroftSkill):
def __init__(self):
MycroftSkill.__init__(self)
@intent_file_handler('fry.intent')
def handle_fry(self, message):
self.speak_dialog('fry')
def create_skill():
return Fry()
| <filename>__init__.py<gh_stars>0
from mycroft import MycroftSkill, intent_file_handler
class Fry(MycroftSkill):
def __init__(self):
MycroftSkill.__init__(self)
@intent_file_handler('fry.intent')
def handle_fry(self, message):
self.speak_dialog('fry')
def create_skill():
return Fry()
| none | 1 | 2.395928 | 2 | |
Exercicios/Exercicio050.py | RicardoMart922/estudo_Python | 0 | 6612990 | <filename>Exercicios/Exercicio050.py<gh_stars>0
# Faça um programa que calcule a soma entre todos os números ímpares que são múltiplos de três e que se encontram no intervalo de 1 até 500.
soma = 0
quantidade = 0
for i in range(1, 501, 2):
if i % 3 == 0:
soma += i
quantidade += 1
print('A soma de todos os {} números ímpares e múltiplos de 3 no intervalo de 1 até 500, foi {}'.format(quantidade, soma))
| <filename>Exercicios/Exercicio050.py<gh_stars>0
# Faça um programa que calcule a soma entre todos os números ímpares que são múltiplos de três e que se encontram no intervalo de 1 até 500.
soma = 0
quantidade = 0
for i in range(1, 501, 2):
if i % 3 == 0:
soma += i
quantidade += 1
print('A soma de todos os {} números ímpares e múltiplos de 3 no intervalo de 1 até 500, foi {}'.format(quantidade, soma))
| pt | 0.857874 | # Faça um programa que calcule a soma entre todos os números ímpares que são múltiplos de três e que se encontram no intervalo de 1 até 500. | 3.758523 | 4 |
algorithms_in_python/_9_priority_queues/examples/adaptable_pq.py | junteudjio/algorithms_in_python | 0 | 6612991 | <reponame>junteudjio/algorithms_in_python
from heap_pq import HeapPriorityQueue
__author__ = '<NAME>'
class AdaptablePQ(HeapPriorityQueue):
__slots__ = '_index'
def __init__(self, data=()):
self._data = [AdaptablePQ.Position(key, value, i) for i, (key, value) in enumerate(data)]
if len(self) > 1:
self._min_heapify()
class Position(HeapPriorityQueue._Item):
def __init__(self,key, value, index):
super(AdaptablePQ.Position, self).__init__(key, value)
self._index = index
class InvalidPositionException(Exception):
pass
def _swap(self, i, j):
super(AdaptablePQ, self)._swap(i, j)
#update the index attributes of the position i/j which are now changed to j/i by _swap(i,j)
self._data[i]._index = i
self._data[j]._index = j
def _bubble(self, i, position):
# i > 0 : because we don't need to upgrade if it's the root and we are also certain it has a parent
if i > 0 and position < self._data[self._parent(i)]:
self._upgrade(i)
else:
self._downgrade(i)
def add(self, key, value):
new_idx = len(self)
new_position = AdaptablePQ.Position(key, value, index=new_idx)
self._data.append(new_position)
self._upgrade(new_idx)
#return it to the client
return new_position
def update(self, position, new_key, new_value):
index = position._index
# check the correctness of the position
if 0 <= index < len(self) and self._data[index] is position:
self._data[index]._key, self._data[index]._value = new_key, new_value
# reset its position
self._bubble(index, position)
else:
raise AdaptablePQ.InvalidPositionException('Invalid Position')
def remove(self, position):
index = position._index
# check correctness of the position
if 0 <= index < len(self) and self._data[index] is position:
# swap element to remove with the last of the PQ/array
self._swap(index, len(self)-1)
# remove the last element which is the one we want to delete
element = self._data.pop()
# downgrade/upgrade back the previously last element to keep the heap-order condition
self._bubble(index, position)
return element._key, element._value
else:
raise AdaptablePQ.InvalidPositionException('Invalid Position')
if __name__ == '__main__':
pq = AdaptablePQ([(90, 'maxim'), (-1, 'minim')])
pq.add(6, 'blabla')
pq.add(2, 'bobo')
bibi = pq.add(4, 'bibi')
bubu = pq.add(3, 'bubu')
pq.add(5, 'bebe')
byby = pq.add(1, 'byby')
pq.add(3, 'beba')
print pq
print pq.min()
print pq.remove_min()
print pq
pq.remove(bibi)
print pq
pq.remove(byby)
print pq
pq.update(bubu, -2, 'BUBU')
print pq
| from heap_pq import HeapPriorityQueue
__author__ = '<NAME>'
class AdaptablePQ(HeapPriorityQueue):
__slots__ = '_index'
def __init__(self, data=()):
self._data = [AdaptablePQ.Position(key, value, i) for i, (key, value) in enumerate(data)]
if len(self) > 1:
self._min_heapify()
class Position(HeapPriorityQueue._Item):
def __init__(self,key, value, index):
super(AdaptablePQ.Position, self).__init__(key, value)
self._index = index
class InvalidPositionException(Exception):
pass
def _swap(self, i, j):
super(AdaptablePQ, self)._swap(i, j)
#update the index attributes of the position i/j which are now changed to j/i by _swap(i,j)
self._data[i]._index = i
self._data[j]._index = j
def _bubble(self, i, position):
# i > 0 : because we don't need to upgrade if it's the root and we are also certain it has a parent
if i > 0 and position < self._data[self._parent(i)]:
self._upgrade(i)
else:
self._downgrade(i)
def add(self, key, value):
new_idx = len(self)
new_position = AdaptablePQ.Position(key, value, index=new_idx)
self._data.append(new_position)
self._upgrade(new_idx)
#return it to the client
return new_position
def update(self, position, new_key, new_value):
index = position._index
# check the correctness of the position
if 0 <= index < len(self) and self._data[index] is position:
self._data[index]._key, self._data[index]._value = new_key, new_value
# reset its position
self._bubble(index, position)
else:
raise AdaptablePQ.InvalidPositionException('Invalid Position')
def remove(self, position):
index = position._index
# check correctness of the position
if 0 <= index < len(self) and self._data[index] is position:
# swap element to remove with the last of the PQ/array
self._swap(index, len(self)-1)
# remove the last element which is the one we want to delete
element = self._data.pop()
# downgrade/upgrade back the previously last element to keep the heap-order condition
self._bubble(index, position)
return element._key, element._value
else:
raise AdaptablePQ.InvalidPositionException('Invalid Position')
if __name__ == '__main__':
pq = AdaptablePQ([(90, 'maxim'), (-1, 'minim')])
pq.add(6, 'blabla')
pq.add(2, 'bobo')
bibi = pq.add(4, 'bibi')
bubu = pq.add(3, 'bubu')
pq.add(5, 'bebe')
byby = pq.add(1, 'byby')
pq.add(3, 'beba')
print pq
print pq.min()
print pq.remove_min()
print pq
pq.remove(bibi)
print pq
pq.remove(byby)
print pq
pq.update(bubu, -2, 'BUBU')
print pq | en | 0.941166 | #update the index attributes of the position i/j which are now changed to j/i by _swap(i,j) # i > 0 : because we don't need to upgrade if it's the root and we are also certain it has a parent #return it to the client # check the correctness of the position # reset its position # check correctness of the position # swap element to remove with the last of the PQ/array # remove the last element which is the one we want to delete # downgrade/upgrade back the previously last element to keep the heap-order condition | 3.338056 | 3 |
app/portal/providers/migrations/0003_auto_20190312_0023.py | Ecotrust/OH4S_Proteins | 0 | 6612992 | # Generated by Django 2.1.7 on 2019-03-12 00:23
from django.db import migrations, models
import phonenumber_field.modelfields
class Migration(migrations.Migration):
dependencies = [
('providers', '0002_auto_20190312_0000'),
]
operations = [
migrations.AlterField(
model_name='provider',
name='businessAddressLine1',
field=models.CharField(blank=True, default=None, max_length=255, null=True, verbose_name='Business Address Line 1'),
),
migrations.AlterField(
model_name='provider',
name='businessAddressZipCode',
field=models.CharField(blank=True, default=None, max_length=25, null=True, verbose_name='Business Address Zip Code'),
),
migrations.AlterField(
model_name='provider',
name='officePhone',
field=phonenumber_field.modelfields.PhoneNumberField(blank=True, default=None, max_length=128, null=True, verbose_name='Office Phone'),
),
]
| # Generated by Django 2.1.7 on 2019-03-12 00:23
from django.db import migrations, models
import phonenumber_field.modelfields
class Migration(migrations.Migration):
dependencies = [
('providers', '0002_auto_20190312_0000'),
]
operations = [
migrations.AlterField(
model_name='provider',
name='businessAddressLine1',
field=models.CharField(blank=True, default=None, max_length=255, null=True, verbose_name='Business Address Line 1'),
),
migrations.AlterField(
model_name='provider',
name='businessAddressZipCode',
field=models.CharField(blank=True, default=None, max_length=25, null=True, verbose_name='Business Address Zip Code'),
),
migrations.AlterField(
model_name='provider',
name='officePhone',
field=phonenumber_field.modelfields.PhoneNumberField(blank=True, default=None, max_length=128, null=True, verbose_name='Office Phone'),
),
]
| en | 0.766936 | # Generated by Django 2.1.7 on 2019-03-12 00:23 | 1.715426 | 2 |
biokit/network/__init__.py | cokelaer/biokit | 45 | 6612993 | """Utilities related to networks (e.g., protein)"""
from .complexes import *
| """Utilities related to networks (e.g., protein)"""
from .complexes import *
| en | 0.842529 | Utilities related to networks (e.g., protein) | 1.009145 | 1 |
src/api/datamanage/pro/dstan/models.py | Chromico/bk-base | 84 | 6612994 | <gh_stars>10-100
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import json
from django.db import models
from django.utils.translation import ugettext_lazy as _
from datamanage.pro.dstan.utils import JsonField
from common.transaction import meta_sync_register
from common.meta.models import MetaSyncSupport
from common.base_utils import model_to_dict
class DmStandardConfig(models.Model):
# 数据标准总表
id = models.AutoField(_('标准id'), primary_key=True)
standard_name = models.CharField(_('标准名称'), max_length=128)
description = models.TextField(_('标准描述'), blank=True, null=True)
category_id = models.IntegerField(_('所属分类'))
active = models.BooleanField(_('是否有效'), default=True)
created_by = models.CharField(_('创建者'), max_length=50)
created_at = models.DateTimeField(_('创建时间'), auto_now_add=True)
updated_by = models.CharField(_('创建者'), max_length=50, blank=True, null=True)
updated_at = models.DateTimeField(_('创建时间'), auto_now_add=True, blank=True, null=True)
def get_latest_online_version(self):
"""
获取最新的版本ID
"""
latest_version = DmStandardVersionConfig.objects.filter(
standard_id=self.id, standard_version_status='online'
).order_by('-id')[0]
return latest_version
class Meta:
db_table = 'dm_standard_config'
managed = False
app_label = 'dstan'
class DmStandardVersionConfig(models.Model):
# 数据标准版本表
id = models.AutoField(_('版本id'), primary_key=True)
standard_id = models.IntegerField(_('关联的dm_standard_config表id'))
standard_version = models.CharField(_('标准版本号,例子:v1.0,v2.0...'), max_length=128)
description = models.TextField(_('版本描述'), blank=True, null=True)
# 版本状态:developing/online/offline
standard_version_status = models.CharField(_('版本状态'), max_length=32)
created_by = models.CharField(_('创建者'), max_length=50)
created_at = models.DateTimeField(_('创建时间'), auto_now_add=True)
updated_by = models.CharField(_('创建者'), max_length=50, blank=True, null=True)
updated_at = models.DateTimeField(_('创建时间'), auto_now_add=True, blank=True, null=True)
class Meta:
db_table = 'dm_standard_version_config'
managed = False
app_label = 'dstan'
class DmStandardContentConfig(MetaSyncSupport):
# 数据标准内容表
id = models.AutoField(_('标准内容id'), primary_key=True)
standard_version_id = models.IntegerField(_('关联的dm_standard_version_config表id'))
standard_content_name = models.CharField(_('标准内容名称'), max_length=128)
# parent_id = models.CharField(u'父表id,格式例子:[1,2]', max_length=256)
parent_id = JsonField(_('约束条件,格式例子:[1,2]'))
source_record_id = models.IntegerField(_('来源记录id'))
standard_content_sql = models.TextField(_('标准模板sql'), blank=True, null=True)
category_id = models.IntegerField(_('所属分类'))
standard_content_type = models.CharField(_('标准内容类型[detaildata/indicator]'), max_length=128)
description = models.TextField(_('标准内容描述'), blank=True, null=True)
# window_period = models.TextField(u'窗口类型,以秒为单位,json表达定义', blank=True, null=True)
window_period = JsonField(_('窗口类型,以秒为单位,json表达定义'))
filter_cond = models.TextField(_('过滤条件'), blank=True, null=True)
active = models.BooleanField(_('是否有效'), default=True)
created_by = models.CharField(_('创建者'), max_length=50)
created_at = models.DateTimeField(_('创建时间'), auto_now_add=True)
updated_by = models.CharField(_('创建者'), max_length=50, blank=True, null=True)
updated_at = models.DateTimeField(_('创建时间'), auto_now_add=True, blank=True, null=True)
class Meta:
db_table = 'dm_standard_content_config'
managed = False
app_label = 'dstan'
def capture_values(self):
val_dict = model_to_dict(self)
parent_id = val_dict.get('parent_id')
window_period = val_dict.get('window_period')
if parent_id is not None:
val_dict['parent_id'] = json.dumps(parent_id)
if window_period is not None:
val_dict['window_period'] = json.dumps(window_period)
return val_dict
class DmDetaildataFieldConfig(models.Model):
# 明细数据标准字段详情表
id = models.AutoField(primary_key=True)
standard_content_id = models.IntegerField(_('关联的dm_standard_content_config的id'))
source_record_id = models.IntegerField(_('来源记录id'))
field_name = models.CharField(_('字段英文名'), max_length=128)
field_alias = models.CharField(_('字段英文名'), max_length=128, blank=True, null=True)
field_type = models.CharField(_('数据类型'), max_length=128)
field_index = models.IntegerField(_('来源记录id'))
unit = models.CharField(_('数据类型'), max_length=128, blank=True, null=True)
description = models.TextField(_('备注'), blank=True, null=True)
constraint_id = models.IntegerField(_('字段在数据集中的顺序'), blank=True, null=True)
active = models.BooleanField(_('是否有效'), default=True)
created_by = models.CharField(_('创建者'), max_length=50)
created_at = models.DateTimeField(_('创建时间'), auto_now_add=True)
updated_by = models.CharField(_('创建者'), max_length=50, blank=True, null=True)
updated_at = models.DateTimeField(_('创建时间'), auto_now_add=True, blank=True, null=True)
class Meta:
db_table = 'dm_detaildata_field_config'
managed = False
app_label = 'dstan'
class DmIndicatorFieldConfig(models.Model):
# 原子指标字段详情表
id = models.AutoField(primary_key=True)
standard_content_id = models.IntegerField(_('关联的dm_standard_content_config的id'))
source_record_id = models.IntegerField(_('来源记录id'))
field_name = models.CharField(_('字段英文名'), max_length=128)
field_alias = models.CharField(_('字段英文名'), max_length=128, blank=True, null=True)
field_type = models.CharField(_('数据类型'), max_length=128)
field_index = models.IntegerField(_('来源记录id'))
unit = models.CharField(_('数据类型'), max_length=128, blank=True, null=True)
is_dimension = models.BooleanField(_('是否维度:0:否;1:是;'), default=True)
# 可加性:yes完全可加;half:部分可加;no:不可加;
add_type = models.CharField(_('可加性'), max_length=128, blank=True, null=True)
compute_model_id = models.IntegerField(_('关联的值约束配置表id'))
constraint_id = models.IntegerField(_('计算方式id'), blank=True, null=True)
description = models.TextField(_('备注'), blank=True, null=True)
active = models.BooleanField(_('是否有效'), default=True)
created_by = models.CharField(_('创建者'), max_length=50)
created_at = models.DateTimeField(_('创建时间'), auto_now_add=True)
updated_by = models.CharField(_('创建者'), max_length=50, blank=True, null=True)
updated_at = models.DateTimeField(_('创建时间'), auto_now_add=True, blank=True, null=True)
class Meta:
db_table = 'dm_indicator_field_config'
managed = False
app_label = 'dstan'
class DmConstraintConfig(MetaSyncSupport):
# 支持的值约束配置表
id = models.AutoField(primary_key=True)
constraint_name = models.CharField(_('约束名称'), max_length=128)
rule = JsonField(_('约束条件'))
active = models.BooleanField(_('是否有效'), default=True)
created_by = models.CharField(_('创建者'), max_length=50)
created_at = models.DateTimeField(_('创建时间'), auto_now_add=True)
updated_by = models.CharField(_('创建者'), max_length=50, blank=True, null=True)
updated_at = models.DateTimeField(_('创建时间'), auto_now_add=True, blank=True, null=True)
class Meta:
db_table = 'dm_constraint_config'
managed = False
app_label = 'dstan'
def capture_values(self):
val_dict = model_to_dict(self)
rule = val_dict.get('rule')
if rule is not None:
val_dict['rule'] = json.dumps(rule)
return val_dict
class DmDataTypeConfig(models.Model):
# 数据类型信息表
id = models.AutoField(primary_key=True)
data_type_name = models.CharField(_('数据类型'), max_length=128)
data_type_alias = models.CharField(_('数据类型名称'), max_length=128, default='')
# 类型: numeric-数值型, string-字符型, time-时间型
data_type_group = models.CharField(_('类型'), max_length=128, default='')
description = models.TextField(_('标准描述'), blank=True, null=True)
created_by = models.CharField('created by', max_length=50)
created_at = models.DateTimeField('create time', auto_now_add=True)
updated_by = models.CharField('updated by', max_length=50)
updated_at = models.DateTimeField('update time', auto_now_add=True)
class Meta:
db_table = 'dm_data_type_config'
managed = False
app_label = 'dstan'
class DmUnitConfig(models.Model):
# 度量单位信息表
id = models.AutoField(_('标准id'), primary_key=True)
name = models.CharField(_('单位英文名'), max_length=64)
alias = models.CharField(_('单位中文名'), max_length=64)
category_name = models.CharField(_('单位类目英文名'), max_length=64)
category_alias = models.CharField(_('单位类目中文名'), max_length=64)
description = models.TextField(_('描述'), blank=True, null=True)
created_by = models.CharField('created by', max_length=50)
created_at = models.DateTimeField('create time', auto_now_add=True)
updated_by = models.CharField('updated by', max_length=50)
updated_at = models.DateTimeField('update time', auto_now_add=True)
class Meta:
db_table = 'dm_unit_config'
managed = False
app_label = 'dstan'
class DmTaskDetailV1(MetaSyncSupport):
id = models.AutoField(primary_key=True)
task_id = models.IntegerField()
task_content_id = models.IntegerField()
standard_version_id = models.IntegerField()
bk_biz_id = models.IntegerField()
project_id = models.IntegerField()
data_set_type = models.CharField(max_length=128)
data_set_id = models.CharField(max_length=128)
task_type = models.CharField(max_length=128)
active = models.IntegerField(default=1)
created_by = models.CharField(max_length=50)
created_at = models.DateTimeField(auto_now_add=True)
updated_by = models.CharField(max_length=50, null=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
managed = False
db_table = 'dm_task_detail'
app_label = 'dstan'
meta_sync_register(DmStandardConfig)
meta_sync_register(DmStandardVersionConfig)
meta_sync_register(DmStandardContentConfig)
meta_sync_register(DmDetaildataFieldConfig)
meta_sync_register(DmIndicatorFieldConfig)
meta_sync_register(DmConstraintConfig)
meta_sync_register(DmTaskDetailV1)
| # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import json
from django.db import models
from django.utils.translation import ugettext_lazy as _
from datamanage.pro.dstan.utils import JsonField
from common.transaction import meta_sync_register
from common.meta.models import MetaSyncSupport
from common.base_utils import model_to_dict
class DmStandardConfig(models.Model):
# 数据标准总表
id = models.AutoField(_('标准id'), primary_key=True)
standard_name = models.CharField(_('标准名称'), max_length=128)
description = models.TextField(_('标准描述'), blank=True, null=True)
category_id = models.IntegerField(_('所属分类'))
active = models.BooleanField(_('是否有效'), default=True)
created_by = models.CharField(_('创建者'), max_length=50)
created_at = models.DateTimeField(_('创建时间'), auto_now_add=True)
updated_by = models.CharField(_('创建者'), max_length=50, blank=True, null=True)
updated_at = models.DateTimeField(_('创建时间'), auto_now_add=True, blank=True, null=True)
def get_latest_online_version(self):
"""
获取最新的版本ID
"""
latest_version = DmStandardVersionConfig.objects.filter(
standard_id=self.id, standard_version_status='online'
).order_by('-id')[0]
return latest_version
class Meta:
db_table = 'dm_standard_config'
managed = False
app_label = 'dstan'
class DmStandardVersionConfig(models.Model):
# 数据标准版本表
id = models.AutoField(_('版本id'), primary_key=True)
standard_id = models.IntegerField(_('关联的dm_standard_config表id'))
standard_version = models.CharField(_('标准版本号,例子:v1.0,v2.0...'), max_length=128)
description = models.TextField(_('版本描述'), blank=True, null=True)
# 版本状态:developing/online/offline
standard_version_status = models.CharField(_('版本状态'), max_length=32)
created_by = models.CharField(_('创建者'), max_length=50)
created_at = models.DateTimeField(_('创建时间'), auto_now_add=True)
updated_by = models.CharField(_('创建者'), max_length=50, blank=True, null=True)
updated_at = models.DateTimeField(_('创建时间'), auto_now_add=True, blank=True, null=True)
class Meta:
db_table = 'dm_standard_version_config'
managed = False
app_label = 'dstan'
class DmStandardContentConfig(MetaSyncSupport):
# 数据标准内容表
id = models.AutoField(_('标准内容id'), primary_key=True)
standard_version_id = models.IntegerField(_('关联的dm_standard_version_config表id'))
standard_content_name = models.CharField(_('标准内容名称'), max_length=128)
# parent_id = models.CharField(u'父表id,格式例子:[1,2]', max_length=256)
parent_id = JsonField(_('约束条件,格式例子:[1,2]'))
source_record_id = models.IntegerField(_('来源记录id'))
standard_content_sql = models.TextField(_('标准模板sql'), blank=True, null=True)
category_id = models.IntegerField(_('所属分类'))
standard_content_type = models.CharField(_('标准内容类型[detaildata/indicator]'), max_length=128)
description = models.TextField(_('标准内容描述'), blank=True, null=True)
# window_period = models.TextField(u'窗口类型,以秒为单位,json表达定义', blank=True, null=True)
window_period = JsonField(_('窗口类型,以秒为单位,json表达定义'))
filter_cond = models.TextField(_('过滤条件'), blank=True, null=True)
active = models.BooleanField(_('是否有效'), default=True)
created_by = models.CharField(_('创建者'), max_length=50)
created_at = models.DateTimeField(_('创建时间'), auto_now_add=True)
updated_by = models.CharField(_('创建者'), max_length=50, blank=True, null=True)
updated_at = models.DateTimeField(_('创建时间'), auto_now_add=True, blank=True, null=True)
class Meta:
db_table = 'dm_standard_content_config'
managed = False
app_label = 'dstan'
def capture_values(self):
val_dict = model_to_dict(self)
parent_id = val_dict.get('parent_id')
window_period = val_dict.get('window_period')
if parent_id is not None:
val_dict['parent_id'] = json.dumps(parent_id)
if window_period is not None:
val_dict['window_period'] = json.dumps(window_period)
return val_dict
class DmDetaildataFieldConfig(models.Model):
# 明细数据标准字段详情表
id = models.AutoField(primary_key=True)
standard_content_id = models.IntegerField(_('关联的dm_standard_content_config的id'))
source_record_id = models.IntegerField(_('来源记录id'))
field_name = models.CharField(_('字段英文名'), max_length=128)
field_alias = models.CharField(_('字段英文名'), max_length=128, blank=True, null=True)
field_type = models.CharField(_('数据类型'), max_length=128)
field_index = models.IntegerField(_('来源记录id'))
unit = models.CharField(_('数据类型'), max_length=128, blank=True, null=True)
description = models.TextField(_('备注'), blank=True, null=True)
constraint_id = models.IntegerField(_('字段在数据集中的顺序'), blank=True, null=True)
active = models.BooleanField(_('是否有效'), default=True)
created_by = models.CharField(_('创建者'), max_length=50)
created_at = models.DateTimeField(_('创建时间'), auto_now_add=True)
updated_by = models.CharField(_('创建者'), max_length=50, blank=True, null=True)
updated_at = models.DateTimeField(_('创建时间'), auto_now_add=True, blank=True, null=True)
class Meta:
db_table = 'dm_detaildata_field_config'
managed = False
app_label = 'dstan'
class DmIndicatorFieldConfig(models.Model):
# 原子指标字段详情表
id = models.AutoField(primary_key=True)
standard_content_id = models.IntegerField(_('关联的dm_standard_content_config的id'))
source_record_id = models.IntegerField(_('来源记录id'))
field_name = models.CharField(_('字段英文名'), max_length=128)
field_alias = models.CharField(_('字段英文名'), max_length=128, blank=True, null=True)
field_type = models.CharField(_('数据类型'), max_length=128)
field_index = models.IntegerField(_('来源记录id'))
unit = models.CharField(_('数据类型'), max_length=128, blank=True, null=True)
is_dimension = models.BooleanField(_('是否维度:0:否;1:是;'), default=True)
# 可加性:yes完全可加;half:部分可加;no:不可加;
add_type = models.CharField(_('可加性'), max_length=128, blank=True, null=True)
compute_model_id = models.IntegerField(_('关联的值约束配置表id'))
constraint_id = models.IntegerField(_('计算方式id'), blank=True, null=True)
description = models.TextField(_('备注'), blank=True, null=True)
active = models.BooleanField(_('是否有效'), default=True)
created_by = models.CharField(_('创建者'), max_length=50)
created_at = models.DateTimeField(_('创建时间'), auto_now_add=True)
updated_by = models.CharField(_('创建者'), max_length=50, blank=True, null=True)
updated_at = models.DateTimeField(_('创建时间'), auto_now_add=True, blank=True, null=True)
class Meta:
db_table = 'dm_indicator_field_config'
managed = False
app_label = 'dstan'
class DmConstraintConfig(MetaSyncSupport):
# 支持的值约束配置表
id = models.AutoField(primary_key=True)
constraint_name = models.CharField(_('约束名称'), max_length=128)
rule = JsonField(_('约束条件'))
active = models.BooleanField(_('是否有效'), default=True)
created_by = models.CharField(_('创建者'), max_length=50)
created_at = models.DateTimeField(_('创建时间'), auto_now_add=True)
updated_by = models.CharField(_('创建者'), max_length=50, blank=True, null=True)
updated_at = models.DateTimeField(_('创建时间'), auto_now_add=True, blank=True, null=True)
class Meta:
db_table = 'dm_constraint_config'
managed = False
app_label = 'dstan'
def capture_values(self):
val_dict = model_to_dict(self)
rule = val_dict.get('rule')
if rule is not None:
val_dict['rule'] = json.dumps(rule)
return val_dict
class DmDataTypeConfig(models.Model):
# 数据类型信息表
id = models.AutoField(primary_key=True)
data_type_name = models.CharField(_('数据类型'), max_length=128)
data_type_alias = models.CharField(_('数据类型名称'), max_length=128, default='')
# 类型: numeric-数值型, string-字符型, time-时间型
data_type_group = models.CharField(_('类型'), max_length=128, default='')
description = models.TextField(_('标准描述'), blank=True, null=True)
created_by = models.CharField('created by', max_length=50)
created_at = models.DateTimeField('create time', auto_now_add=True)
updated_by = models.CharField('updated by', max_length=50)
updated_at = models.DateTimeField('update time', auto_now_add=True)
class Meta:
db_table = 'dm_data_type_config'
managed = False
app_label = 'dstan'
class DmUnitConfig(models.Model):
# 度量单位信息表
id = models.AutoField(_('标准id'), primary_key=True)
name = models.CharField(_('单位英文名'), max_length=64)
alias = models.CharField(_('单位中文名'), max_length=64)
category_name = models.CharField(_('单位类目英文名'), max_length=64)
category_alias = models.CharField(_('单位类目中文名'), max_length=64)
description = models.TextField(_('描述'), blank=True, null=True)
created_by = models.CharField('created by', max_length=50)
created_at = models.DateTimeField('create time', auto_now_add=True)
updated_by = models.CharField('updated by', max_length=50)
updated_at = models.DateTimeField('update time', auto_now_add=True)
class Meta:
db_table = 'dm_unit_config'
managed = False
app_label = 'dstan'
class DmTaskDetailV1(MetaSyncSupport):
id = models.AutoField(primary_key=True)
task_id = models.IntegerField()
task_content_id = models.IntegerField()
standard_version_id = models.IntegerField()
bk_biz_id = models.IntegerField()
project_id = models.IntegerField()
data_set_type = models.CharField(max_length=128)
data_set_id = models.CharField(max_length=128)
task_type = models.CharField(max_length=128)
active = models.IntegerField(default=1)
created_by = models.CharField(max_length=50)
created_at = models.DateTimeField(auto_now_add=True)
updated_by = models.CharField(max_length=50, null=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
managed = False
db_table = 'dm_task_detail'
app_label = 'dstan'
meta_sync_register(DmStandardConfig)
meta_sync_register(DmStandardVersionConfig)
meta_sync_register(DmStandardContentConfig)
meta_sync_register(DmDetaildataFieldConfig)
meta_sync_register(DmIndicatorFieldConfig)
meta_sync_register(DmConstraintConfig)
meta_sync_register(DmTaskDetailV1) | en | 0.549325 | # -*- coding: utf-8 -*- Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available. Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. BK-BASE 蓝鲸基础平台 is licensed under the MIT License. License for BK-BASE 蓝鲸基础平台: -------------------------------------------------------------------- Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # 数据标准总表 获取最新的版本ID # 数据标准版本表 # 版本状态:developing/online/offline # 数据标准内容表 # parent_id = models.CharField(u'父表id,格式例子:[1,2]', max_length=256) # window_period = models.TextField(u'窗口类型,以秒为单位,json表达定义', blank=True, null=True) # 明细数据标准字段详情表 # 原子指标字段详情表 # 可加性:yes完全可加;half:部分可加;no:不可加; # 支持的值约束配置表 # 数据类型信息表 # 类型: numeric-数值型, string-字符型, time-时间型 # 度量单位信息表 | 1.45946 | 1 |
NumpyNeuralNetwork/nnn/utils/arguments.py | jiunbae/ITE4053 | 5 | 6612995 | <filename>NumpyNeuralNetwork/nnn/utils/arguments.py<gh_stars>1-10
import argparse
parser = argparse.ArgumentParser(description='ITE4053 Assignment 1'
'Neural Network implemented by numpy and TensorFlow')
parser.add_argument('--mode', type=str, default='tf',
choices=['np', 'tf'], required=True,
help='Select mode for assignment')
parser.add_argument('--epoch', type=int, default=5000,
help="Epoch size")
parser.add_argument('--size', type=int, default=128,
help="Dataset size")
parser.add_argument('--lr', type=float, default=.1,
help="Learning rate")
parser.add_argument('--optimizer', type=str, default='SGD',
choices=['SGD', 'Adam'],
help="Optimizer")
parser.add_argument('--loss', type=str, default='binary_crossentropy',
choices=['binary_crossentropy', 'mean_squared_error'],
help="Loss function")
parser.add_argument('--normal', action='store_true',
help='Use normalized dataset')
parser.add_argument('-l', '--layer', action='append', nargs='*',
help='Define Layer (input_dim, output_dim, activation)'
'Activation must be one of {sigmoid, relu}')
parser.add_argument('--save', type=str, default='parameters.npz',
help='Save parameter path (only works on np mod)')
parser.add_argument('--repeat', type=int, default=10,
help="Repeat train, valid")
parser.add_argument('--seed', type=int, default=2,
help="Manual seed")
parser.add_argument('--verbose', default=False, action='store_true',
help="More description")
arguments = parser.parse_args()
| <filename>NumpyNeuralNetwork/nnn/utils/arguments.py<gh_stars>1-10
import argparse
parser = argparse.ArgumentParser(description='ITE4053 Assignment 1'
'Neural Network implemented by numpy and TensorFlow')
parser.add_argument('--mode', type=str, default='tf',
choices=['np', 'tf'], required=True,
help='Select mode for assignment')
parser.add_argument('--epoch', type=int, default=5000,
help="Epoch size")
parser.add_argument('--size', type=int, default=128,
help="Dataset size")
parser.add_argument('--lr', type=float, default=.1,
help="Learning rate")
parser.add_argument('--optimizer', type=str, default='SGD',
choices=['SGD', 'Adam'],
help="Optimizer")
parser.add_argument('--loss', type=str, default='binary_crossentropy',
choices=['binary_crossentropy', 'mean_squared_error'],
help="Loss function")
parser.add_argument('--normal', action='store_true',
help='Use normalized dataset')
parser.add_argument('-l', '--layer', action='append', nargs='*',
help='Define Layer (input_dim, output_dim, activation)'
'Activation must be one of {sigmoid, relu}')
parser.add_argument('--save', type=str, default='parameters.npz',
help='Save parameter path (only works on np mod)')
parser.add_argument('--repeat', type=int, default=10,
help="Repeat train, valid")
parser.add_argument('--seed', type=int, default=2,
help="Manual seed")
parser.add_argument('--verbose', default=False, action='store_true',
help="More description")
arguments = parser.parse_args()
| none | 1 | 2.731968 | 3 | |
test/progs/02_valabs_OK_01.py | LeGmask/MrPython | 26 | 6612996 |
def valeur_absolue(x : float) -> float:
"""Retourne la valeur absolue de x.
"""
if x >= 0:
return x
else:
return -x
# Jeu de tests
assert valeur_absolue(3) == 3
assert valeur_absolue(-3) == 3
assert valeur_absolue(1.5 - 2.5) == valeur_absolue(2.5 - 1.5)
assert valeur_absolue(0) == 0
assert valeur_absolue(-0) == 0
|
def valeur_absolue(x : float) -> float:
"""Retourne la valeur absolue de x.
"""
if x >= 0:
return x
else:
return -x
# Jeu de tests
assert valeur_absolue(3) == 3
assert valeur_absolue(-3) == 3
assert valeur_absolue(1.5 - 2.5) == valeur_absolue(2.5 - 1.5)
assert valeur_absolue(0) == 0
assert valeur_absolue(-0) == 0
| fr | 0.806117 | Retourne la valeur absolue de x. # Jeu de tests | 3.641779 | 4 |
cvpysdk/index_server.py | Jayesh-Jain/SDK | 0 | 6612997 | <filename>cvpysdk/index_server.py
# --------------------------------------------------------------------------
# Copyright Commvault Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# --------------------------------------------------------------------------
"""File for performing index server related operations on the commcell
IndexServers, IndexServer and _Roles are 3 classes defined in this file
IndexServers: Class for representing all the index servers associated with the commcell
IndexServer: Class for a instance of a single index server of the commcell
_Roles: Class for storing all the cloud role details
IndexServers
============
__init__() -- initialize object of IndexServers class associated with
the commcell
__str() -- returns all the index servers of the commcell
__repr__() -- returns the string to represent the instance
__len__() -- returns the number of index servers associated
_get_index_servers() -- gets all the index server associated with the commcell
_response_not_success() -- raise exception when response is not 200
_get_all_roles() -- creates an instance of _Roles class
has() -- returns whether the index server is present or not
get() -- returns a IndexServer object for given cloud name
create() -- creates a index server within the commcell
delete() -- deletes a index server associated with commcell
update_roles_data() -- fetches the cloud roles data from commcell
get_properties() -- returns a dict of data of index server for the given
cloud name
refresh() -- refresh the index servers associated with commcell
IndexServers Attributes
-----------------------
**all_index_servers** -- returns the dictionary consisting of all the index
servers associated with the commcell and there details
**roles_data** -- returns the list of cloud roles details
IndexServer
===========
__init()__ -- initializes the object with the specified commcell
object, index server name and the cloud id
__repr__() -- returns the index server's name, the instance is
associated with
_get_cloud_id() -- gets the cloud id
_get_properties() -- gets all the properties of the index server
refresh() -- refresh all the properties of client
update_roles_data() -- fetches the cloud roles data from commcell
modify() -- to modify the index server node details
update_role() -- to update the roles assigned to cloud
IndexServer Attributes
----------------------
**properties** -- returns the properties of this index server
**roles_data** -- returns all the available cloud roles data
**host_name** -- returns the host name for the index server
**internal_cloud_name** -- returns the internal cloud name
**client_name** -- returns the client name for index server
**server_url** -- returns the content indexing server url
**type** -- returns the type of the index server
**base_port** -- returns the base port of this index server
**client_id** -- returns the client id for this index server
**roles** -- returns the array of roles installed
with the index server within the commcell
**cloud_id** -- returns the cloud id of the index server
**server_type** -- returns the server type of the index server
**engine_name** -- returns the engine name that is index server name
**index_server_client_id** -- returns the index server client id
_Roles
======
__init__() -- initializes the class with commcell object
refresh() -- refreshes the attributes
_get_all_roles() -- fetches the cloud roles data from commcell
get_role_id() -- returns role id for given role name
update_roles_data() -- fetches the cloud roles data from commcell
_Roles Attributes
-----------------
**roles_data** -- returns the list of details of all cloud roles
"""
from copy import deepcopy
from past.builtins import basestring
from .exception import SDKException
from .datacube.constants import IndexServerConstants
class IndexServers(object):
"""Class for representing all the index servers associated with the commcell."""
def __init__(self, commcell_object):
"""Initialize object of the IndexServers class.
Args:
commcell_object (object) -- instance of the Commcell class
Returns:
object - instance of the IndexServers class
"""
self._commcell_object = commcell_object
self._cvpysdk_object = commcell_object._cvpysdk_object
self._services = commcell_object._services
self._update_response_ = commcell_object._update_response_
self._all_index_servers = None
self._roles_obj = None
self.refresh()
def __str__(self):
"""Representation string consisting of all index servers of the commcell.
Returns:
str - string of all the index servers with different roles associated
with the commcell
"""
representation_string = '{:^5}\t{:^20}\n\n'.format('S. No.', 'IS Name')
index = 1
for index_server in self._all_index_servers:
representation_string += '{:^5}\t{:^20}\n'.format(
index, index_server['engineName'])
index += 1
return representation_string
def __repr__(self):
"""Representation string for the instance of the IndexServers class."""
return "IndexServers class instance for Commcell: '{0}'".format(
self._commcell_object.commserv_name
)
def __len__(self):
"""Returns the number of the index servers associated with the commcell"""
return len(self._all_index_servers)
def _response_not_success(self, response):
"""Helper method to raise exception when response is not 200 (ok)
Raises:
SDKException:
Response was not success
"""
raise SDKException(
'Response',
'101',
self._update_response_(
response.text))
def _get_index_servers(self):
"""Method to retrieve all the index server available on commcell.
Raises:
SDKException:
Failed to get the list of analytics engines
Response was not success
"""
flag, response = self._cvpysdk_object.make_request(
'GET', self._services['GET_ALL_INDEX_SERVERS'])
if flag:
if response.json() and 'listOfCIServer' in response.json():
for item in response.json()['listOfCIServer']:
if item['cloudID'] in self._all_index_servers:
self._all_index_servers[item['cloudID']]['version'].append(
item['version'])
else:
item['version'] = [item['version']]
self._all_index_servers[item['cloudID']] = item
else:
self._all_index_servers = {}
else:
self._response_not_success(response)
def _get_all_roles(self):
"""Creates an instance of _Roles class and adds it to the IndexServer class"""
self._roles_obj = _Roles(self._commcell_object)
@property
def all_index_servers(self):
"""Returns the details of all the index server for associated commcell.
Returns:
dict - dictionary consisting details of all the index servers
associated with commcell
Sample - {
<cloud_id_1> :
{
"engineName" : <property_value>,
"internalCloudName" : <property_value>,
...
},
<cloud_id_2> :
{
"engineName" : <property_value>,
"cloudID" : <property_value>,
...
}
}
"""
return self._all_index_servers
@property
def roles_data(self):
"""Returns the details of all the cloud roles data
Returns:
list - list of dictionary containing details of the cloud roles
"""
return self._roles_obj.roles_data
def refresh(self):
"""Refresh the properties of IndexServers class"""
self._all_index_servers = {}
self._get_index_servers()
if not self._roles_obj:
self._get_all_roles()
def update_roles_data(self):
"""Synchronises all the cloud roles details with the commcell"""
self._roles_obj.update_roles_data()
def get_properties(self, cloud_name):
"""Returns all details of a index server with the cloud name
Args:
cloud_name (str) -- cloud name of index server
Returns:
dict - dict consisting details of the index server
"""
for index_server in self._all_index_servers:
if self._all_index_servers[index_server]['engineName'] == cloud_name:
return self._all_index_servers[index_server]
raise SDKException('IndexServers', '102')
def has(self, cloud_name):
"""Returns True if the index server with given name is present in commcell.
Args:
cloud_name (str) -- the engine name of index server
Returns:
boolean - True if index server with given name as is_name
is associated with the commcell else returns False
Raises:
SDKExecption:
Data type of the input(s) is not valid
"""
if isinstance(cloud_name, basestring):
for index_server in self._all_index_servers:
if self._all_index_servers[index_server]["engineName"].lower() == cloud_name.lower():
return True
return False
raise SDKException('IndexServers', '101')
def get(self, cloud_data):
"""Returns IndexServer object if a index server is found.
Args:
cloud_data (int/str) -- cloud name or
cloud ID of index server
Returns:
object (IndexServer) -- Instance on index server with
the engine name or cloud id as item
Raises:
SDKException:
Index Server not found.
Data type of the input(s) is not valid.
"""
if isinstance(cloud_data, int):
if cloud_data in self._all_index_servers:
return IndexServer(
self._commcell_object,
self._all_index_servers[cloud_data]['engineName'],
cloud_data)
SDKException('IndexServers', '102')
elif isinstance(cloud_data, basestring):
name = cloud_data.lower()
for itter in self._all_index_servers:
if self._all_index_servers[itter]['engineName'].lower(
) == name:
return IndexServer(
self._commcell_object,
self._all_index_servers[itter]['engineName'],
self._all_index_servers[itter]['cloudID'])
raise SDKException('IndexServers', '102')
raise SDKException('IndexServers', '101')
def create(
self,
index_server_name,
index_server_node_names,
index_directory,
index_server_roles,
index_pool_name=None,
is_cloud=False,
cloud_param=None):
"""Creates an index server within the commcell
Args:
index_server_node_names (list) -- client names for index server node
index_server_name (str) -- name for the index server
index_directory (str) -- index location for the index server
index_server_roles (list) -- list of role names to be assigned
index_pool_name (str) -- name for the index pool to used by cloud index server
cloud_param (list) -- list of custom parameters to be parsed
into the json for index server meta info
[
{
"name": <name>,
"value": <value>
}
]
is_cloud (bool) -- if true then creates a cloud mode index server
Raises:
SDKException:
Data type of the input(s) is not valid.
Response was not success.
Response was empty.
"""
if not (isinstance(index_server_roles, list) and isinstance(index_server_node_names, list)
and isinstance(index_server_name, basestring)):
raise SDKException('IndexServers', '101')
cloud_meta_infos = {
'INDEXLOCATION': index_directory,
'REPLICATION': '1',
'LANGUAGE': '0'
}
node_meta_infos = {
'PORTNO': '20000',
'JVMMAXMEMORY': '8191'
}
role_meta_infos = {}
req_json = deepcopy(IndexServerConstants.REQUEST_JSON)
req_json['cloudInfoEntity'] = {
'cloudName': index_server_name,
'cloudDisplayName': index_server_name
}
if is_cloud:
index_pool_obj = self._commcell_object.index_pools[index_pool_name]
req_json['type'] = 5
req_json['solrCloudInfo']['cloudPoolInfo'] = {
'cloudId': int(index_pool_obj['pool_id'])
}
role_meta_infos['ISCLOUDMODE'] = '3'
node_meta_infos['WEBSERVER'] = 'true'
for node_name in index_server_node_names:
node_obj = self._commcell_object.clients[node_name]
node_data = {
"opType": IndexServerConstants.OPERATION_ADD,
"nodeClientEntity": {
"hostName": node_obj['hostname'],
"clientId": int(node_obj['id']),
"clientName": node_name
},
'nodeMetaInfos': []
}
for node_info in node_meta_infos:
node_data['nodeMetaInfos'].append({
'name': node_info,
'value': node_meta_infos[node_info]
})
req_json['cloudNodes'].append(node_data)
for role in index_server_roles:
role_id = self._roles_obj.get_role_id(role)
if not role_id:
raise SDKException('IndexServers', '103')
role_data = {
"roleId": role_id,
"roleName": role,
"operationType": IndexServerConstants.OPERATION_ADD,
'roleMetaInfos': []
}
for role_info in role_meta_infos:
role_data['roleMetaInfos'].append({
'name': role_info,
'value': role_meta_infos[role_info]
})
req_json['solrCloudInfo']['roles'].append(role_data)
if cloud_param:
for param in cloud_param:
if param['name'] in cloud_meta_infos:
del cloud_meta_infos[param['name']]
req_json['cloudMetaInfos'].append(param)
for cloud_info in cloud_meta_infos:
req_json['cloudMetaInfos'].append({
'name': cloud_info,
'value': cloud_meta_infos[cloud_info]
})
flag, response = self._cvpysdk_object.make_request(
'POST', self._services['CLOUD_CREATE'], req_json)
if flag:
if response.json():
error_code = response.json()['genericResp']['errorCode']
error_string = response.json()['genericResp']['errorMessage']
if error_code == 0:
self.refresh()
else:
o_str = 'Failed to create Index Server. Error: "{0}"'.format(
error_string)
raise SDKException('IndexServers', '102', o_str)
else:
raise SDKException('Response', '102')
else:
self._response_not_success(response)
def delete(self, cloud_name):
"""Deletes / removes an index server from the commcell
Args:
cloud_name (str) -- cloud name of index server
to be removed from the commcell
Raises:
SDKException:
Data type of the input(s) is not valid.
Response was not success.
Response was empty.
"""
if not isinstance(cloud_name, basestring):
raise SDKException('IndexServers', '101')
cloud_id = self.get(cloud_name).cloud_id
req_json = deepcopy(IndexServerConstants.REQUEST_JSON)
req_json["opType"] = IndexServerConstants.OPERATION_DELETE
req_json['cloudInfoEntity']['cloudId'] = cloud_id
flag, response = self._cvpysdk_object.make_request(
'POST', self._services['CLOUD_DELETE'], req_json
)
if flag:
if response.json() and 'genericResp' in response.json() \
and 'errorCode' not in response.json()['genericResp']:
self.refresh()
return
if response.json() and 'genericResp' in response.json():
raise SDKException(
'Response', '102', response.json()['genericResp'].get(
'errorMessage', ''))
raise SDKException('Response', '102')
self._response_not_success(response)
class IndexServer(object):
"""Class for performing index server operations for a specific index server"""
def __init__(self, commcell_obj, name, cloud_id=None):
"""Initialise the IndexServer class instance.
Args:
commcell_obj (object) -- instance of the Commcell class
name (str) -- name of the index server
cloud_id (int) -- cloud id of the index server
default: None
Returns:
object - instance of the IndexServer class
"""
self._engine_name = name
self._commcell_obj = commcell_obj
self._cvpysdk_object = self._commcell_obj._cvpysdk_object
self._services = self._commcell_obj._services
if cloud_id:
self._cloud_id = cloud_id
else:
self._cloud_id = self._get_cloud_id()
self._properties = None
self._roles_obj = None
self.refresh()
def __repr__(self):
"""String representation of the instance of this class."""
return 'IndexServer class instance for index server: "{0}"'.format(
self._engine_name)
def _get_cloud_id(self):
"""Get the cloud id for the index server
Returns:
int - cloud id for the index server
"""
return self._commcell_obj.index_servers.get(self._engine_name).cloud_id
def _get_properties(self):
"""Get the properties of the index server"""
self._properties = self._commcell_obj.index_servers.get_properties(
self._engine_name)
def refresh(self):
"""Refresh the index server properties"""
self._get_properties()
if not self._roles_obj:
self._roles_obj = _Roles(self._commcell_obj)
def update_roles_data(self):
"""Synchronize the cloud roles data with the commcell"""
self._roles_obj.update_roles_data()
def modify(self, index_location, node_name, node_params):
"""Modifies the properties of an index server
Args:
index_location (str) -- index server data directory
node_name (str) -- index server node name
node_params (dict) -- parameters to be passed
[
{
"name" : <property_name>,
"value" : <property_value>
}
]
Raises:
SDKException:
Response was not success.
Response was empty.
"""
json_req = deepcopy(IndexServerConstants.REQUEST_JSON)
json_req['opType'] = IndexServerConstants.OPERATION_EDIT
json_req['cloudNodes'] = [{
"opType": IndexServerConstants.OPERATION_EDIT,
"nodeClientEntity": {
"clientId": int(self._commcell_obj.clients.get(node_name).client_id)
},
"nodeMetaInfos": [
{
"name": "INDEXLOCATION",
"value": index_location
}
]
}]
json_req['cloudInfoEntity']['cloudId'] = self.cloud_id
for param in node_params:
json_req['cloudNodes'][0]['nodeMetaInfos'].append(param)
flag, response = self._cvpysdk_object.make_request(
"POST", self._services['CLOUD_MODIFY'], json_req)
if flag:
if response.json():
if 'cloudId' in response.json():
self.refresh()
return
raise SDKException('Response', '102')
raise SDKException('Response', '101')
def update_role(self, props=None):
"""Updates a role of an Index Server
Args:
props (list) -- array of dictionaries
consisting details of the roles such as role name
and operation type.
[{
"roleName": <name> (str)
"operationType": 1 or 2 (int)
1 for adding a role
2 for removing a role
}]
Raises:
SDKException:
if response is empty
if response is not success
"""
json_req = {"cloudId": self.cloud_id, "roles": []}
if props:
for prop in props:
role_id = self._roles_obj.get_role_id(prop['roleName'])
if not role_id:
raise SDKException('IndexServers', '103')
prop['roleId'] = role_id
json_req['roles'].append(prop)
flag, response = self._cvpysdk_object.make_request(
'POST', self._services['CLOUD_ROLE_UPDATE'], json_req
)
if flag:
if response.json() and 'errorCode' in response.json():
if response.json()['errorCode'] == 0:
self.refresh()
return
raise SDKException('Response', '102')
raise SDKException('Response', '101')
@property
def is_cloud(self):
return self.server_type == 5
@property
def roles_data(self):
"""Returns the cloud roles data"""
return self._roles_obj.roles_data
@property
def properties(self):
"""Returns the index server properties"""
return self._properties
@property
def host_name(self):
"""Returns the host name of index server"""
return self._properties[IndexServerConstants.HOST_NAME]
@property
def cloud_name(self):
"""Returns the internal cloud name of index server"""
return self._properties[IndexServerConstants.CLOUD_NAME]
@property
def client_name(self):
"""Returns the client name of index server"""
return self._properties[IndexServerConstants.CLIENT_NAME]
@property
def server_url(self):
"""Returns the content indexing url of index server"""
return self._properties[IndexServerConstants.CI_SERVER_URL]
@property
def type(self):
"""Returns the type of index server"""
return self._properties[IndexServerConstants.TYPE]
@property
def base_port(self):
"""Returns the base port of index server"""
return self._properties[IndexServerConstants.BASE_PORT]
@property
def client_id(self):
"""Returns the client id of index server"""
return self._properties[IndexServerConstants.CLIENT_ID]
@property
def roles(self):
"""Returns the roles of index server"""
return self._properties[IndexServerConstants.ROLES]
@property
def cloud_id(self):
"""Returns the cloud id of index server"""
return self._properties[IndexServerConstants.CLOUD_ID]
@property
def server_type(self):
"""Returns the server type of index server"""
return self._properties[IndexServerConstants.SERVER_TYPE]
@property
def engine_name(self):
"""Returns the engine name of index server"""
return self._properties[IndexServerConstants.ENGINE_NAME]
@property
def index_server_client_id(self):
"""Returns the index server client id of index server"""
return self._properties[IndexServerConstants.INDEX_SERVER_CLIENT_ID]
class _Roles(object):
"""Class for cloud roles data operations"""
def __init__(self, commcell_object):
"""Initializes _Roles class with commcell object
Args:
commcell_object (object) -- instance of Commcell class
Returns:
object - instance of _Roles class
"""
self.commcell_object = commcell_object
self._roles_data = None
self.refresh()
def refresh(self):
"""Refreshes the class data"""
self._get_all_roles()
def _get_all_roles(self):
"""Method to get all cloud roles details available on the commcell.
Raises:
SDKException:
Response was empty.
Response was not success.
"""
flag, response = self.commcell_object._cvpysdk_object.make_request(
"GET", self.commcell_object._services['GET_ALL_ROLES']
)
if flag:
if response.json():
if 'rolesInfo' in response.json():
self._roles_data = response.json()['rolesInfo']
return
raise SDKException('Response', '102')
raise SDKException('Response', '101')
def get_role_id(self, role_name):
"""Method to get a cloud role id with given name
Args:
role_name (str) -- cloud role name of which role id has to be returned
Returns:
role_id (int) -- if role name is found in roles data then returns the id
else returns None
"""
for role_data in self._roles_data:
if role_data['roleName'] == role_name:
return role_data['roleId']
return None
def update_roles_data(self):
"""Synchronize the cloud role data with the commcell database"""
self._get_all_roles()
@property
def roles_data(self):
"""Returns the list of dictionary of details of each cloud role"""
return self._roles_data
| <filename>cvpysdk/index_server.py
# --------------------------------------------------------------------------
# Copyright Commvault Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# --------------------------------------------------------------------------
"""File for performing index server related operations on the commcell
IndexServers, IndexServer and _Roles are 3 classes defined in this file
IndexServers: Class for representing all the index servers associated with the commcell
IndexServer: Class for a instance of a single index server of the commcell
_Roles: Class for storing all the cloud role details
IndexServers
============
__init__() -- initialize object of IndexServers class associated with
the commcell
__str() -- returns all the index servers of the commcell
__repr__() -- returns the string to represent the instance
__len__() -- returns the number of index servers associated
_get_index_servers() -- gets all the index server associated with the commcell
_response_not_success() -- raise exception when response is not 200
_get_all_roles() -- creates an instance of _Roles class
has() -- returns whether the index server is present or not
get() -- returns a IndexServer object for given cloud name
create() -- creates a index server within the commcell
delete() -- deletes a index server associated with commcell
update_roles_data() -- fetches the cloud roles data from commcell
get_properties() -- returns a dict of data of index server for the given
cloud name
refresh() -- refresh the index servers associated with commcell
IndexServers Attributes
-----------------------
**all_index_servers** -- returns the dictionary consisting of all the index
servers associated with the commcell and there details
**roles_data** -- returns the list of cloud roles details
IndexServer
===========
__init()__ -- initializes the object with the specified commcell
object, index server name and the cloud id
__repr__() -- returns the index server's name, the instance is
associated with
_get_cloud_id() -- gets the cloud id
_get_properties() -- gets all the properties of the index server
refresh() -- refresh all the properties of client
update_roles_data() -- fetches the cloud roles data from commcell
modify() -- to modify the index server node details
update_role() -- to update the roles assigned to cloud
IndexServer Attributes
----------------------
**properties** -- returns the properties of this index server
**roles_data** -- returns all the available cloud roles data
**host_name** -- returns the host name for the index server
**internal_cloud_name** -- returns the internal cloud name
**client_name** -- returns the client name for index server
**server_url** -- returns the content indexing server url
**type** -- returns the type of the index server
**base_port** -- returns the base port of this index server
**client_id** -- returns the client id for this index server
**roles** -- returns the array of roles installed
with the index server within the commcell
**cloud_id** -- returns the cloud id of the index server
**server_type** -- returns the server type of the index server
**engine_name** -- returns the engine name that is index server name
**index_server_client_id** -- returns the index server client id
_Roles
======
__init__() -- initializes the class with commcell object
refresh() -- refreshes the attributes
_get_all_roles() -- fetches the cloud roles data from commcell
get_role_id() -- returns role id for given role name
update_roles_data() -- fetches the cloud roles data from commcell
_Roles Attributes
-----------------
**roles_data** -- returns the list of details of all cloud roles
"""
from copy import deepcopy
from past.builtins import basestring
from .exception import SDKException
from .datacube.constants import IndexServerConstants
class IndexServers(object):
"""Class for representing all the index servers associated with the commcell."""
def __init__(self, commcell_object):
"""Initialize object of the IndexServers class.
Args:
commcell_object (object) -- instance of the Commcell class
Returns:
object - instance of the IndexServers class
"""
self._commcell_object = commcell_object
self._cvpysdk_object = commcell_object._cvpysdk_object
self._services = commcell_object._services
self._update_response_ = commcell_object._update_response_
self._all_index_servers = None
self._roles_obj = None
self.refresh()
def __str__(self):
"""Representation string consisting of all index servers of the commcell.
Returns:
str - string of all the index servers with different roles associated
with the commcell
"""
representation_string = '{:^5}\t{:^20}\n\n'.format('S. No.', 'IS Name')
index = 1
for index_server in self._all_index_servers:
representation_string += '{:^5}\t{:^20}\n'.format(
index, index_server['engineName'])
index += 1
return representation_string
def __repr__(self):
"""Representation string for the instance of the IndexServers class."""
return "IndexServers class instance for Commcell: '{0}'".format(
self._commcell_object.commserv_name
)
def __len__(self):
"""Returns the number of the index servers associated with the commcell"""
return len(self._all_index_servers)
def _response_not_success(self, response):
"""Helper method to raise exception when response is not 200 (ok)
Raises:
SDKException:
Response was not success
"""
raise SDKException(
'Response',
'101',
self._update_response_(
response.text))
def _get_index_servers(self):
"""Method to retrieve all the index server available on commcell.
Raises:
SDKException:
Failed to get the list of analytics engines
Response was not success
"""
flag, response = self._cvpysdk_object.make_request(
'GET', self._services['GET_ALL_INDEX_SERVERS'])
if flag:
if response.json() and 'listOfCIServer' in response.json():
for item in response.json()['listOfCIServer']:
if item['cloudID'] in self._all_index_servers:
self._all_index_servers[item['cloudID']]['version'].append(
item['version'])
else:
item['version'] = [item['version']]
self._all_index_servers[item['cloudID']] = item
else:
self._all_index_servers = {}
else:
self._response_not_success(response)
def _get_all_roles(self):
"""Creates an instance of _Roles class and adds it to the IndexServer class"""
self._roles_obj = _Roles(self._commcell_object)
@property
def all_index_servers(self):
"""Returns the details of all the index server for associated commcell.
Returns:
dict - dictionary consisting details of all the index servers
associated with commcell
Sample - {
<cloud_id_1> :
{
"engineName" : <property_value>,
"internalCloudName" : <property_value>,
...
},
<cloud_id_2> :
{
"engineName" : <property_value>,
"cloudID" : <property_value>,
...
}
}
"""
return self._all_index_servers
@property
def roles_data(self):
"""Returns the details of all the cloud roles data
Returns:
list - list of dictionary containing details of the cloud roles
"""
return self._roles_obj.roles_data
def refresh(self):
"""Refresh the properties of IndexServers class"""
self._all_index_servers = {}
self._get_index_servers()
if not self._roles_obj:
self._get_all_roles()
def update_roles_data(self):
"""Synchronises all the cloud roles details with the commcell"""
self._roles_obj.update_roles_data()
def get_properties(self, cloud_name):
"""Returns all details of a index server with the cloud name
Args:
cloud_name (str) -- cloud name of index server
Returns:
dict - dict consisting details of the index server
"""
for index_server in self._all_index_servers:
if self._all_index_servers[index_server]['engineName'] == cloud_name:
return self._all_index_servers[index_server]
raise SDKException('IndexServers', '102')
def has(self, cloud_name):
"""Returns True if the index server with given name is present in commcell.
Args:
cloud_name (str) -- the engine name of index server
Returns:
boolean - True if index server with given name as is_name
is associated with the commcell else returns False
Raises:
SDKExecption:
Data type of the input(s) is not valid
"""
if isinstance(cloud_name, basestring):
for index_server in self._all_index_servers:
if self._all_index_servers[index_server]["engineName"].lower() == cloud_name.lower():
return True
return False
raise SDKException('IndexServers', '101')
def get(self, cloud_data):
"""Returns IndexServer object if a index server is found.
Args:
cloud_data (int/str) -- cloud name or
cloud ID of index server
Returns:
object (IndexServer) -- Instance on index server with
the engine name or cloud id as item
Raises:
SDKException:
Index Server not found.
Data type of the input(s) is not valid.
"""
if isinstance(cloud_data, int):
if cloud_data in self._all_index_servers:
return IndexServer(
self._commcell_object,
self._all_index_servers[cloud_data]['engineName'],
cloud_data)
SDKException('IndexServers', '102')
elif isinstance(cloud_data, basestring):
name = cloud_data.lower()
for itter in self._all_index_servers:
if self._all_index_servers[itter]['engineName'].lower(
) == name:
return IndexServer(
self._commcell_object,
self._all_index_servers[itter]['engineName'],
self._all_index_servers[itter]['cloudID'])
raise SDKException('IndexServers', '102')
raise SDKException('IndexServers', '101')
def create(
self,
index_server_name,
index_server_node_names,
index_directory,
index_server_roles,
index_pool_name=None,
is_cloud=False,
cloud_param=None):
"""Creates an index server within the commcell
Args:
index_server_node_names (list) -- client names for index server node
index_server_name (str) -- name for the index server
index_directory (str) -- index location for the index server
index_server_roles (list) -- list of role names to be assigned
index_pool_name (str) -- name for the index pool to used by cloud index server
cloud_param (list) -- list of custom parameters to be parsed
into the json for index server meta info
[
{
"name": <name>,
"value": <value>
}
]
is_cloud (bool) -- if true then creates a cloud mode index server
Raises:
SDKException:
Data type of the input(s) is not valid.
Response was not success.
Response was empty.
"""
if not (isinstance(index_server_roles, list) and isinstance(index_server_node_names, list)
and isinstance(index_server_name, basestring)):
raise SDKException('IndexServers', '101')
cloud_meta_infos = {
'INDEXLOCATION': index_directory,
'REPLICATION': '1',
'LANGUAGE': '0'
}
node_meta_infos = {
'PORTNO': '20000',
'JVMMAXMEMORY': '8191'
}
role_meta_infos = {}
req_json = deepcopy(IndexServerConstants.REQUEST_JSON)
req_json['cloudInfoEntity'] = {
'cloudName': index_server_name,
'cloudDisplayName': index_server_name
}
if is_cloud:
index_pool_obj = self._commcell_object.index_pools[index_pool_name]
req_json['type'] = 5
req_json['solrCloudInfo']['cloudPoolInfo'] = {
'cloudId': int(index_pool_obj['pool_id'])
}
role_meta_infos['ISCLOUDMODE'] = '3'
node_meta_infos['WEBSERVER'] = 'true'
for node_name in index_server_node_names:
node_obj = self._commcell_object.clients[node_name]
node_data = {
"opType": IndexServerConstants.OPERATION_ADD,
"nodeClientEntity": {
"hostName": node_obj['hostname'],
"clientId": int(node_obj['id']),
"clientName": node_name
},
'nodeMetaInfos': []
}
for node_info in node_meta_infos:
node_data['nodeMetaInfos'].append({
'name': node_info,
'value': node_meta_infos[node_info]
})
req_json['cloudNodes'].append(node_data)
for role in index_server_roles:
role_id = self._roles_obj.get_role_id(role)
if not role_id:
raise SDKException('IndexServers', '103')
role_data = {
"roleId": role_id,
"roleName": role,
"operationType": IndexServerConstants.OPERATION_ADD,
'roleMetaInfos': []
}
for role_info in role_meta_infos:
role_data['roleMetaInfos'].append({
'name': role_info,
'value': role_meta_infos[role_info]
})
req_json['solrCloudInfo']['roles'].append(role_data)
if cloud_param:
for param in cloud_param:
if param['name'] in cloud_meta_infos:
del cloud_meta_infos[param['name']]
req_json['cloudMetaInfos'].append(param)
for cloud_info in cloud_meta_infos:
req_json['cloudMetaInfos'].append({
'name': cloud_info,
'value': cloud_meta_infos[cloud_info]
})
flag, response = self._cvpysdk_object.make_request(
'POST', self._services['CLOUD_CREATE'], req_json)
if flag:
if response.json():
error_code = response.json()['genericResp']['errorCode']
error_string = response.json()['genericResp']['errorMessage']
if error_code == 0:
self.refresh()
else:
o_str = 'Failed to create Index Server. Error: "{0}"'.format(
error_string)
raise SDKException('IndexServers', '102', o_str)
else:
raise SDKException('Response', '102')
else:
self._response_not_success(response)
def delete(self, cloud_name):
"""Deletes / removes an index server from the commcell
Args:
cloud_name (str) -- cloud name of index server
to be removed from the commcell
Raises:
SDKException:
Data type of the input(s) is not valid.
Response was not success.
Response was empty.
"""
if not isinstance(cloud_name, basestring):
raise SDKException('IndexServers', '101')
cloud_id = self.get(cloud_name).cloud_id
req_json = deepcopy(IndexServerConstants.REQUEST_JSON)
req_json["opType"] = IndexServerConstants.OPERATION_DELETE
req_json['cloudInfoEntity']['cloudId'] = cloud_id
flag, response = self._cvpysdk_object.make_request(
'POST', self._services['CLOUD_DELETE'], req_json
)
if flag:
if response.json() and 'genericResp' in response.json() \
and 'errorCode' not in response.json()['genericResp']:
self.refresh()
return
if response.json() and 'genericResp' in response.json():
raise SDKException(
'Response', '102', response.json()['genericResp'].get(
'errorMessage', ''))
raise SDKException('Response', '102')
self._response_not_success(response)
class IndexServer(object):
"""Class for performing index server operations for a specific index server"""
def __init__(self, commcell_obj, name, cloud_id=None):
"""Initialise the IndexServer class instance.
Args:
commcell_obj (object) -- instance of the Commcell class
name (str) -- name of the index server
cloud_id (int) -- cloud id of the index server
default: None
Returns:
object - instance of the IndexServer class
"""
self._engine_name = name
self._commcell_obj = commcell_obj
self._cvpysdk_object = self._commcell_obj._cvpysdk_object
self._services = self._commcell_obj._services
if cloud_id:
self._cloud_id = cloud_id
else:
self._cloud_id = self._get_cloud_id()
self._properties = None
self._roles_obj = None
self.refresh()
def __repr__(self):
"""String representation of the instance of this class."""
return 'IndexServer class instance for index server: "{0}"'.format(
self._engine_name)
def _get_cloud_id(self):
"""Get the cloud id for the index server
Returns:
int - cloud id for the index server
"""
return self._commcell_obj.index_servers.get(self._engine_name).cloud_id
def _get_properties(self):
"""Get the properties of the index server"""
self._properties = self._commcell_obj.index_servers.get_properties(
self._engine_name)
def refresh(self):
"""Refresh the index server properties"""
self._get_properties()
if not self._roles_obj:
self._roles_obj = _Roles(self._commcell_obj)
def update_roles_data(self):
"""Synchronize the cloud roles data with the commcell"""
self._roles_obj.update_roles_data()
def modify(self, index_location, node_name, node_params):
"""Modifies the properties of an index server
Args:
index_location (str) -- index server data directory
node_name (str) -- index server node name
node_params (dict) -- parameters to be passed
[
{
"name" : <property_name>,
"value" : <property_value>
}
]
Raises:
SDKException:
Response was not success.
Response was empty.
"""
json_req = deepcopy(IndexServerConstants.REQUEST_JSON)
json_req['opType'] = IndexServerConstants.OPERATION_EDIT
json_req['cloudNodes'] = [{
"opType": IndexServerConstants.OPERATION_EDIT,
"nodeClientEntity": {
"clientId": int(self._commcell_obj.clients.get(node_name).client_id)
},
"nodeMetaInfos": [
{
"name": "INDEXLOCATION",
"value": index_location
}
]
}]
json_req['cloudInfoEntity']['cloudId'] = self.cloud_id
for param in node_params:
json_req['cloudNodes'][0]['nodeMetaInfos'].append(param)
flag, response = self._cvpysdk_object.make_request(
"POST", self._services['CLOUD_MODIFY'], json_req)
if flag:
if response.json():
if 'cloudId' in response.json():
self.refresh()
return
raise SDKException('Response', '102')
raise SDKException('Response', '101')
def update_role(self, props=None):
"""Updates a role of an Index Server
Args:
props (list) -- array of dictionaries
consisting details of the roles such as role name
and operation type.
[{
"roleName": <name> (str)
"operationType": 1 or 2 (int)
1 for adding a role
2 for removing a role
}]
Raises:
SDKException:
if response is empty
if response is not success
"""
json_req = {"cloudId": self.cloud_id, "roles": []}
if props:
for prop in props:
role_id = self._roles_obj.get_role_id(prop['roleName'])
if not role_id:
raise SDKException('IndexServers', '103')
prop['roleId'] = role_id
json_req['roles'].append(prop)
flag, response = self._cvpysdk_object.make_request(
'POST', self._services['CLOUD_ROLE_UPDATE'], json_req
)
if flag:
if response.json() and 'errorCode' in response.json():
if response.json()['errorCode'] == 0:
self.refresh()
return
raise SDKException('Response', '102')
raise SDKException('Response', '101')
@property
def is_cloud(self):
return self.server_type == 5
@property
def roles_data(self):
"""Returns the cloud roles data"""
return self._roles_obj.roles_data
@property
def properties(self):
"""Returns the index server properties"""
return self._properties
@property
def host_name(self):
"""Returns the host name of index server"""
return self._properties[IndexServerConstants.HOST_NAME]
@property
def cloud_name(self):
"""Returns the internal cloud name of index server"""
return self._properties[IndexServerConstants.CLOUD_NAME]
@property
def client_name(self):
"""Returns the client name of index server"""
return self._properties[IndexServerConstants.CLIENT_NAME]
@property
def server_url(self):
"""Returns the content indexing url of index server"""
return self._properties[IndexServerConstants.CI_SERVER_URL]
@property
def type(self):
"""Returns the type of index server"""
return self._properties[IndexServerConstants.TYPE]
@property
def base_port(self):
"""Returns the base port of index server"""
return self._properties[IndexServerConstants.BASE_PORT]
@property
def client_id(self):
"""Returns the client id of index server"""
return self._properties[IndexServerConstants.CLIENT_ID]
@property
def roles(self):
"""Returns the roles of index server"""
return self._properties[IndexServerConstants.ROLES]
@property
def cloud_id(self):
"""Returns the cloud id of index server"""
return self._properties[IndexServerConstants.CLOUD_ID]
@property
def server_type(self):
"""Returns the server type of index server"""
return self._properties[IndexServerConstants.SERVER_TYPE]
@property
def engine_name(self):
"""Returns the engine name of index server"""
return self._properties[IndexServerConstants.ENGINE_NAME]
@property
def index_server_client_id(self):
"""Returns the index server client id of index server"""
return self._properties[IndexServerConstants.INDEX_SERVER_CLIENT_ID]
class _Roles(object):
"""Class for cloud roles data operations"""
def __init__(self, commcell_object):
"""Initializes _Roles class with commcell object
Args:
commcell_object (object) -- instance of Commcell class
Returns:
object - instance of _Roles class
"""
self.commcell_object = commcell_object
self._roles_data = None
self.refresh()
def refresh(self):
"""Refreshes the class data"""
self._get_all_roles()
def _get_all_roles(self):
"""Method to get all cloud roles details available on the commcell.
Raises:
SDKException:
Response was empty.
Response was not success.
"""
flag, response = self.commcell_object._cvpysdk_object.make_request(
"GET", self.commcell_object._services['GET_ALL_ROLES']
)
if flag:
if response.json():
if 'rolesInfo' in response.json():
self._roles_data = response.json()['rolesInfo']
return
raise SDKException('Response', '102')
raise SDKException('Response', '101')
def get_role_id(self, role_name):
"""Method to get a cloud role id with given name
Args:
role_name (str) -- cloud role name of which role id has to be returned
Returns:
role_id (int) -- if role name is found in roles data then returns the id
else returns None
"""
for role_data in self._roles_data:
if role_data['roleName'] == role_name:
return role_data['roleId']
return None
def update_roles_data(self):
"""Synchronize the cloud role data with the commcell database"""
self._get_all_roles()
@property
def roles_data(self):
"""Returns the list of dictionary of details of each cloud role"""
return self._roles_data
| en | 0.629219 | # -------------------------------------------------------------------------- # Copyright Commvault Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # -------------------------------------------------------------------------- File for performing index server related operations on the commcell IndexServers, IndexServer and _Roles are 3 classes defined in this file IndexServers: Class for representing all the index servers associated with the commcell IndexServer: Class for a instance of a single index server of the commcell _Roles: Class for storing all the cloud role details IndexServers ============ __init__() -- initialize object of IndexServers class associated with the commcell __str() -- returns all the index servers of the commcell __repr__() -- returns the string to represent the instance __len__() -- returns the number of index servers associated _get_index_servers() -- gets all the index server associated with the commcell _response_not_success() -- raise exception when response is not 200 _get_all_roles() -- creates an instance of _Roles class has() -- returns whether the index server is present or not get() -- returns a IndexServer object for given cloud name create() -- creates a index server within the commcell delete() -- deletes a index server associated with commcell update_roles_data() -- fetches the cloud roles data from commcell get_properties() -- returns a dict of data of index server for the given cloud name refresh() -- refresh the index servers associated with commcell IndexServers Attributes ----------------------- **all_index_servers** -- returns the dictionary consisting of all the index servers associated with the commcell and there details **roles_data** -- returns the list of cloud roles details IndexServer =========== __init()__ -- initializes the object with the specified commcell object, index server name and the cloud id __repr__() -- returns the index server's name, the instance is associated with _get_cloud_id() -- gets the cloud id _get_properties() -- gets all the properties of the index server refresh() -- refresh all the properties of client update_roles_data() -- fetches the cloud roles data from commcell modify() -- to modify the index server node details update_role() -- to update the roles assigned to cloud IndexServer Attributes ---------------------- **properties** -- returns the properties of this index server **roles_data** -- returns all the available cloud roles data **host_name** -- returns the host name for the index server **internal_cloud_name** -- returns the internal cloud name **client_name** -- returns the client name for index server **server_url** -- returns the content indexing server url **type** -- returns the type of the index server **base_port** -- returns the base port of this index server **client_id** -- returns the client id for this index server **roles** -- returns the array of roles installed with the index server within the commcell **cloud_id** -- returns the cloud id of the index server **server_type** -- returns the server type of the index server **engine_name** -- returns the engine name that is index server name **index_server_client_id** -- returns the index server client id _Roles ====== __init__() -- initializes the class with commcell object refresh() -- refreshes the attributes _get_all_roles() -- fetches the cloud roles data from commcell get_role_id() -- returns role id for given role name update_roles_data() -- fetches the cloud roles data from commcell _Roles Attributes ----------------- **roles_data** -- returns the list of details of all cloud roles Class for representing all the index servers associated with the commcell. Initialize object of the IndexServers class. Args: commcell_object (object) -- instance of the Commcell class Returns: object - instance of the IndexServers class Representation string consisting of all index servers of the commcell. Returns: str - string of all the index servers with different roles associated with the commcell Representation string for the instance of the IndexServers class. Returns the number of the index servers associated with the commcell Helper method to raise exception when response is not 200 (ok) Raises: SDKException: Response was not success Method to retrieve all the index server available on commcell. Raises: SDKException: Failed to get the list of analytics engines Response was not success Creates an instance of _Roles class and adds it to the IndexServer class Returns the details of all the index server for associated commcell. Returns: dict - dictionary consisting details of all the index servers associated with commcell Sample - { <cloud_id_1> : { "engineName" : <property_value>, "internalCloudName" : <property_value>, ... }, <cloud_id_2> : { "engineName" : <property_value>, "cloudID" : <property_value>, ... } } Returns the details of all the cloud roles data Returns: list - list of dictionary containing details of the cloud roles Refresh the properties of IndexServers class Synchronises all the cloud roles details with the commcell Returns all details of a index server with the cloud name Args: cloud_name (str) -- cloud name of index server Returns: dict - dict consisting details of the index server Returns True if the index server with given name is present in commcell. Args: cloud_name (str) -- the engine name of index server Returns: boolean - True if index server with given name as is_name is associated with the commcell else returns False Raises: SDKExecption: Data type of the input(s) is not valid Returns IndexServer object if a index server is found. Args: cloud_data (int/str) -- cloud name or cloud ID of index server Returns: object (IndexServer) -- Instance on index server with the engine name or cloud id as item Raises: SDKException: Index Server not found. Data type of the input(s) is not valid. Creates an index server within the commcell Args: index_server_node_names (list) -- client names for index server node index_server_name (str) -- name for the index server index_directory (str) -- index location for the index server index_server_roles (list) -- list of role names to be assigned index_pool_name (str) -- name for the index pool to used by cloud index server cloud_param (list) -- list of custom parameters to be parsed into the json for index server meta info [ { "name": <name>, "value": <value> } ] is_cloud (bool) -- if true then creates a cloud mode index server Raises: SDKException: Data type of the input(s) is not valid. Response was not success. Response was empty. Deletes / removes an index server from the commcell Args: cloud_name (str) -- cloud name of index server to be removed from the commcell Raises: SDKException: Data type of the input(s) is not valid. Response was not success. Response was empty. Class for performing index server operations for a specific index server Initialise the IndexServer class instance. Args: commcell_obj (object) -- instance of the Commcell class name (str) -- name of the index server cloud_id (int) -- cloud id of the index server default: None Returns: object - instance of the IndexServer class String representation of the instance of this class. Get the cloud id for the index server Returns: int - cloud id for the index server Get the properties of the index server Refresh the index server properties Synchronize the cloud roles data with the commcell Modifies the properties of an index server Args: index_location (str) -- index server data directory node_name (str) -- index server node name node_params (dict) -- parameters to be passed [ { "name" : <property_name>, "value" : <property_value> } ] Raises: SDKException: Response was not success. Response was empty. Updates a role of an Index Server Args: props (list) -- array of dictionaries consisting details of the roles such as role name and operation type. [{ "roleName": <name> (str) "operationType": 1 or 2 (int) 1 for adding a role 2 for removing a role }] Raises: SDKException: if response is empty if response is not success Returns the cloud roles data Returns the index server properties Returns the host name of index server Returns the internal cloud name of index server Returns the client name of index server Returns the content indexing url of index server Returns the type of index server Returns the base port of index server Returns the client id of index server Returns the roles of index server Returns the cloud id of index server Returns the server type of index server Returns the engine name of index server Returns the index server client id of index server Class for cloud roles data operations Initializes _Roles class with commcell object Args: commcell_object (object) -- instance of Commcell class Returns: object - instance of _Roles class Refreshes the class data Method to get all cloud roles details available on the commcell. Raises: SDKException: Response was empty. Response was not success. Method to get a cloud role id with given name Args: role_name (str) -- cloud role name of which role id has to be returned Returns: role_id (int) -- if role name is found in roles data then returns the id else returns None Synchronize the cloud role data with the commcell database Returns the list of dictionary of details of each cloud role | 1.835931 | 2 |
digsby/src/gui/uberwidgets/uberbook/tabbar.py | ifwe/digsby | 35 | 6612998 | <reponame>ifwe/digsby<filename>digsby/src/gui/uberwidgets/uberbook/tabbar.py
from __future__ import with_statement
from tab import Tab
from gui import skin
from OverlayImage import OverlayImage
from navigation_arrows import Navi
from gui.uberwidgets.UberButton import UberButton
from gui.uberwidgets.UberEvents import EVT_DRAG_START
import wx
from wx import RectS, Rect, RectPS
from util.primitives.funcs import do
from common import pref, profile, prefprop
from gui.uberwidgets import UberWidget
from cgui import SimplePanel
#from traceback import print_stack,format_stack
CUPID = wx.NewId()
CDOWNID = wx.NewId()
CLOSE_TAB = wx.NewId()
CLOSE_OTHER_TABS = wx.NewId()
class TabBar(SimplePanel, UberWidget):
"""
Where the tabs live, handles all display and organization functionality
"""
def __init__(self, parent, skinkey):
SimplePanel.__init__(self, parent)
self.tabs = [] # a list of all the tabs
self.rows = [] # a list of all the visible row, each a list of all the tabs in that row
self.rowindex = 0 # the first visible row
self.tabindex = 0 # the first tab of the first visible row
self.tabendex = 0 # the last tab of the last visible row
events = [(wx.EVT_PAINT, self.OnPaint),
(wx.EVT_SIZE, self.OnSize),
(wx.EVT_BUTTON, self.OnButton),
(wx.EVT_MOUSEWHEEL, self.OnWheel),
(wx.EVT_MOTION,self.OnMotion)]
for event, method in events: self.Bind(event, method)
self.flagedrows = set()
self.lastsize=self.Size
self.rowheight=0#height of a row in pixels
self.SetSkinKey(skinkey,True)
#buttons for verticle alignment
self.cupb = UberButton(self, CUPID, skin=self.scrollbuttonskin, icon=self.upicon)
self.cupb.Show(False)
self.cdownb = UberButton(self, CDOWNID, skin=self.scrollbuttonskin, icon=self.downicon)
self.cdownb.Show(False)
#the navigation box
self.navi=Navi(self)
self.dragorigin = None#when draging the tab that you are dragging
self.dragtarget = None#when dragging the mouse is over and at that point released on
# the arrow image shown when dragging tabs
self.dropmarker=OverlayImage(self, self.dropmarkerimage)
# self.dropmarker = Storage(Show = lambda v=True: None)
self.dragside=None#was the tab droped on the left or right of the target tab
#linking prefs
link = profile.prefs.link #@UndefinedVariable
link('tabs.rows', self.Generate, False)
link('tabs.tabbar_x', self.Generate, False)
link('tabs.hide_at_1', self.Generate, False)
link('tabs.side_tabs', self.SkinRedirect, False)
self.Top.Bind(wx.EVT_MENU, self.OnMenuEvent)
side_tabs = prefprop('tabs.side_tabs')
tab_rows = prefprop('tabs.rows', 2)
def UpdateSkin(self):
key = self.tabskin = self.skinkey
g = lambda k, default = sentinel: skin.get(key + '.' + k, default)
sg = lambda k, default = sentinel: skin.get('side' + key + '.' + k, default)
elems = (('spacing', 'spacing', 2),
('bg', 'backgrounds.bar'),
('dropmarkerimage', 'dropmarker.image'),
# ('dropmarkeroverlay', 'dropmarker.overlay', 0),
('dropmarkeroffset', 'dropmarker.offset', 0),
('closebuttonskin', 'closebuttonskin', ''),
('closeicon', 'icons.close', None),
('scrollbuttonskin', 'scrollbuttonskin', ''),
('lefticon', 'icons.left', ''),
('righticon', 'icons.right', ''),
('upicon', 'icons.up', ''),
('downicon', 'icons.down', ''))
for elem in elems:
setattr(self, 'top' + elem[0], g(*elem[1:]))
setattr(self, 'side' + elem[0], sg(elem[1],getattr(self,'top' + elem[0])))
setattr(self, elem[0], getattr(self, ('side' if self.side_tabs else 'top') + elem[0]))
if hasattr(self,'dropmarker'):
self.dropmarker.SetImage(self.dropmarkerimage)
self.dropmarker.SetRotation((self.side_tabs and not self.dropmarkerimage))
navi = getattr(self, 'navi', None)
if navi is not None:
self.cdownb.SetSkinKey(self.scrollbuttonskin)
self.cupb.SetSkinKey(self.scrollbuttonskin)
self.cdownb.SetIcon(self.downicon)
self.cupb.SetIcon(self.upicon)
self.navi.closebutton.SetSkinKey(self.closebuttonskin)
self.navi.closebutton.SetIcon(self.closeicon)
scrollskin = self.scrollbuttonskin
navi.prevb.SetSkinKey(scrollskin)
navi.nextb.SetSkinKey(scrollskin)
navi.upb.SetSkinKey(scrollskin)
navi.downb.SetSkinKey(scrollskin)
navi.prevb.SetIcon(self.lefticon)
navi.nextb.SetIcon(self.righticon)
navi.upb.SetIcon(self.upicon)
navi.downb.SetIcon(self.downicon)
wx.CallAfter(self.Generate)
def SkinRedirect(self,val=None):
elems = ('spacing',
'bg',
'dropmarkerimage',
#'dropmarkeroverlay',
'closebuttonskin',
'closeicon',
'scrollbuttonskin',
'lefticon',
'righticon',
'upicon',
'downicon'
)
for elem in elems:
setattr(self, elem, getattr(self,('side' if self.side_tabs else 'top') + elem))
self.UpdateChildSkins()
def UpdateChildSkins(self):
self.cdownb.SetSkinKey(self.scrollbuttonskin,True)
self.cupb.SetSkinKey(self.scrollbuttonskin,True)
navi, sbs = self.navi, self.scrollbuttonskin
navi.closebutton.SetSkinKey(self.closebuttonskin,True)
navi.prevb.SetSkinKey(sbs, True)
navi.nextb.SetSkinKey(sbs, True)
navi.upb.SetSkinKey(sbs, True)
navi.downb.SetSkinKey(sbs, True)
self.UpdateChildrenIcons()
for tab in self.tabs:
tab.UpdateMode()
self.Generate()
def __repr__(self):
return '<TabBar %r>' % self.tabs
def OnDragStart(self, tab):
'Catches the tab drag event and starts the tab dragging system.'
self.NotifyDrag(tab)
def OnMotion(self,event):
'Positioning updates during drag and drop'
if event.LeftIsDown() and (self.dragorigin or self.Manager.source):
self.DragCalc(event.Position)
def __getitem__(self, index):
return self.tabs[index]
def OnPaint(self, event):
dc = wx.PaintDC(self)
rect = RectS(self.Size)
if not self.side_tabs:
rcount = min(len(self.rows), pref('tabs.rows', 2))
height = self.tabs[0].Size.height
y=0
for unused_i in xrange(rcount):
self.bg.Draw(dc, Rect(rect.x, y, rect.width, height))
y += height
else:
self.bg.Draw(dc,rect)
def Add(self, page, focus, resort = True):
"""
Adds a tab to the bar. Should only be used by parent NoteBook.
page - page in PageContainer the tab is to be associated with
focus - whether that tab should steal focus from current tab
"""
tab = Tab(self, page, skinkey = self.tabskin)
tab.Bind(wx.EVT_CONTEXT_MENU, self.ShowMenu)
tab.Show(False)
self.tabs.append(tab)
if focus:
wx.CallAfter(tab.SetActive, True)
elif resort:
if self.side_tabs:
self.ReVgenerate()
else:
self.Regenerate(True)
return tab
def ShowMenu(self, e):
self._menutab = e.EventObject
try:
menu = self._tabmenu
except AttributeError:
from gui.uberwidgets.umenu import UMenu
menu = self._tabmenu = UMenu(self)
menu.AddItem('Close &Other Tabs', id = CLOSE_OTHER_TABS)
menu.AddSep()
menu.AddItem('&Close Tab', id = CLOSE_TAB)
menu.PopupMenu()
def OnMenuEvent(self, e):
'''Invoked when a tab context menu item is clicked.'''
if e.Id == CLOSE_TAB:
self._menutab.CloseTab()
elif e.Id == CLOSE_OTHER_TABS:
menutab = self._menutab
# switch to that tab first
menutab.active = True
with self.Frozen():
for tab in self.tabs[:]:
if tab is not menutab:
tab.CloseTab()
else:
e.Skip()
def Generate(self, val=None):
self.navi.closebutton.Show(pref('tabs.tabbar_x', False))
if self.side_tabs:
self.ReVgenerate(True)
else:
self.Regenerate()
def ReVgenerate(self,total=False, safe=False, dotoggle=True):
"""
It's like Doo... err.. Regenerate, only vertical
"""
# print "Starting: Regenerate",self.Top.Title,'\n'#,'='*80,'\n','\n'.join(format_stack())
# print '='*80
#TODO: Should we be careful about the tab leaving the bar?
tabs = self.tabs
if not tabs: return
do(tab.Show(False) for tab in self.tabs)
for tab in self.tabs: tab.row = None
del self.rows[:]
# Safty precautions prevent list access errors
if self.tabindex < 0 or self.tabindex >= len(tabs):
self.tabindex = 0
# Preset variables
n = self.tabindex # the first tab shown
self.rowheight = tabs[0].GetMinHeight()
area = self.Notebook.Size.height - 32 # Height in pixels of the tabbar
# number of fully visible rows in the given area at the given height
i = area//self.rowheight
count = len(tabs)
#one tab per row
for r in xrange(count): tabs[r].row=r
rows = self.rows
size = self.Size
#Sets navimode and position
navi = self.navi
navi.ShowNav(4)
navi.Hide()
navi.Position = wx.Point(size.width - navi.Size.width,0)
# Totally reconstructs the list if it's told to or there are not tabs in the rows or
# if there isn't one more tab than there is room for and there is enough room to fit
# them all and number of tabs in the row equals the number of tabs
if total or not rows or (i + 1 != len(rows[0])) and not (i > len(rows[0])) and len(rows[0]) == len(tabs):
rows.append([])
col = rows[0]
#if all tabs fit
if i >= count:
n=0
self.tabindex=0
do(col.append(tab) for tab in tabs)
av=col[0].MinSize.height
#calculate and show range
else:
for t in xrange(n,n+i+1):
if t < len(tabs):col.append(tabs[t])
# populate with earlier stuff
while len(col) < i and n > 0:
n-=1
col.insert(0,tabs[n])
if col: av = col[0].MinSize.height
else:
#just leave the new values the same as the old
col = rows[0]
av = col[0].MinSize.height
# Show all tabs in the bar
count = 16
for t in col:
t.Size = (self.Size.width,av)
t.Position = (0,count)
count += av
t.Show()
self.tabindex=n
endex = self.tabendex=n+len(col)
if dotoggle:
self.Toggle()
cupb, cdownb = self.cupb, self.cdownb
cupb.Enable(self.tabindex != 0)
cdownb.Enable(endex < len(tabs) or tabs[endex - 1].Position.y +
tabs[endex-1].Size.height > size.height - 16)
self.UpdateNotify()
def Regenerate(self, safe = False, dotoggle=True):
'''
Regenerates layout information.
safe is a flag to indicate if we should try to keep the currently active
tab in view at all times. (This doesn't occur when scrolling, for
instance.)
'''
# print "Starting: Regenerate",self.Top.Title,'\n','='*80,'\n','\n'.join(format_stack())
# print '='*80
# early exit for when the tabbar isn't visible.
if not self.IsShown() and len(self.tabs) == 1:
return
with self.Frozen():
self._Regenerate(safe = safe, dotoggle = dotoggle)
self.Refresh(False)
def _Regenerate(self, safe = False, dotoggle = True):
self.cupb.Show(False)
self.cdownb.Show(False)
parentpage = self.Parent.pagecontainer
# style is the number of rows (or 0 for single)
style = self.tab_rows
# Should we be careful about the tab leaving the bar?
careful = not safe and parentpage.active
# Hide all tabs preparation for refilling
for tab in self.tabs:
tab.Show(False)
tab.row = None
del self.rows[:]
# navi set up, see if arrows are needed and placement
tally = sum(tab.MinSize.width for tab in self.tabs) #total size of tabs
navi = self.navi
tabs = self.tabs
rows = self.rows
if not tabs: return
# Tab alignment calculations
# Saftey precautions prevent list access errors
if self.tabindex < 0 or self.tabindex >= len(tabs):
self.tabindex = 0
# Preset variables
n = self.tabindex # the first tab shown
i = n
row = 0
self.rowheight = tabs[0].MinHeight
my_w, nav_w = self.Size.width, navi.Size.width
# Decide what kind of navigation panel, if any, to use...
if tally >= my_w - nav_w and not style:
navi.ShowNav(1) # arrows left and right
elif tally >= (my_w - nav_w):
navi.ShowNav(3) # arrows up and down next to the X
else:
navi.ShowNav(0)
#Where to put navigation panel.
navi.Freeze()
navi.Show(True)
navi.Fit()
navi.Position = wx.Point(self.Size.width-navi.Size.width,0)
navi.Size = wx.Size(-1,self.Size.height)
navi.Thaw()
#More preparing vars
area = self.Notebook.Size.width - navi.Size.width
#While more tabs are not in a row
while len(tabs) > i:
tally = tabs[i].MinSize.width
rows.append([])
# Loop through each visible tab, fitting tabs on the right.
while i < len(tabs) and tally < area:
i += 1
if i < len(tabs):
tally += tabs[i].MinSize.width
#Be carefull that the active tab doesn't scroll off the bar
if careful and not style:
activeindex = tabs.index(parentpage.active.tab)
change=False
#add tabs until the active tab is visible
while activeindex>=i and n!=i:
i += 1
tally += tabs[i].MinSize.width
change = True
#Remove tab if more tabs than room
if tally >= area and change:
tally -= tabs[n].MinSize.width
n += 1
self.tabindex=n
# If extra space, fit tabs to the right of the row
if not style: # if single row,
while n > 0 and area - tally > tabs[n-1].MinSize.width:
n -= 1
self.tabindex = n
tally += tabs[n].MinSize.width
# Injects tabs calculated to fit in that row into that row
if range(n, i):
rows[row] = [tabs[t] for t in xrange(n, i)]
for tab in rows[row]:
tab.row=row
else:
rows[row].append(tabs[i])
i += 1
if not style: break # If we're in single row, break now.
row += 1
n = i
# Row calculation
if self.rowindex >= len(rows):
self.rowindex = len(rows) - 1
#cycle through visible rows
row = self.rowindex
visible = self.tab_rows or 1
if careful and style:
#print "Being Careful"
active = parentpage.active.tab
#print active
for ir,r in enumerate(rows):
#print ir,r
if active in r:
if ir<row:
#print "moving index down"
row = self.rowindex = ir
elif ir >= row + visible:
#print "moving index up"
row = ir - (visible - 1)
# If we're closing tabs above where is visible, keep the visible
# index "where it is"
if len(rows) - (row + 1) < visible and len(rows) >= visible:
row = len(rows) - visible
self.rowindex = row
# Place tabs!
while row < len(rows) and row < self.rowindex + visible and len( rows[row] ) != 0:
# if this is a row that needs to be scrunched...
if rows.index(rows[row]) == len(rows)-1 and \
(style or len(rows[row]) == len(tabs)):
for t in xrange(0,len(rows[row])):
thistab = rows[row][t]
thistab.SetSize(thistab.MinSize)
if not t:
# The first tab is set to it's minimum width at x: 0
thistab.SetPosition((0, self.rowheight*(row-self.rowindex)))
else:
# Every other tab is placed right next to the tab
# before it.
thistab.SetPosition((rows[row][t-1].Position.x \
+ rows[row][t-1].Size.width,
self.rowheight*(row-self.rowindex)))
thistab.Show(True)
# If there are more rows than the current row...
elif len(rows) > row:
# Get a list of tab indices, widest to smallest.
ordered = [rows[row].index(t)
for t in sorted(rows[row],
key=lambda o: o.MinSize.width,
reverse=True) ]
length = len(ordered)
reserved=0
o=0 # o_O ?
# Average width of tab if all tabs are the same size, and
# fill up all the area.
av = (area - reserved) / (length - o)
mark = 0
while o < length:
# Loop from "current" tab to the end
for t in xrange(o, length):
tab = rows[row][ordered[t]]
# If this tab is larger than average...
if tab.GetMinSize()[0] > av:
# Make it it's minimum, and keep track of it
tab.SetSize(tab.MinSize)
reserved += tab.MinSize.width
o += 1
mark = o
# If we're not on the last tab, recalc average
if (length - o):
av=(area-reserved)/(length-o)
else:
o += 1
break
# For tabs less than the average, set them to average
for t in xrange(mark, length):
tab = rows[row][ordered[t]]
tab.SetSize((av, tab.MinSize.height))
# For every tab in the row
for t, tab in enumerate(rows[row]):
if not t: # If it's the first tab:
if length==1:
# If the row is so small it can only fit one tab,
# make due.
tab.Size = wx.Size(area, tab.MinSize.height)
tab.Position = wx.Point(0, self.rowheight * (row - self.rowindex))
else:
tab.Position = wx.Point(rows[row][t-1].Position.x + rows[row][t-1].Size.width,
self.rowheight * (row - self.rowindex))
tab.Show(True)
row += 1
if dotoggle:
self.Toggle()
# If total rows is less than total rows being shown, shrink the
# tab area so that it's only just big enough.
if len(rows) < style or not style:
rows_shown = len(rows)
else:
rows_shown = style
if self.Parent.SashPosition != rows_shown * self.rowheight:#self.MinSize.height
self.MinSize = wx.Size(-1, rows_shown * self.rowheight)
self.Parent.SetSashPosition(self.MinSize.height)
# self.Size=self.MinSize
# Determine if the Navi needs to enable or show arrows
navi.Enabler()
#self.Parent.Layout() # Relayout self
self.tabendex = i-1 # final tab being shown
self.UpdateNotify()
navi.Size = wx.Size(-1,rows_shown * self.rowheight)
def Remove(self, target):
'Removes the tab specified from the bar.'
index=self.tabs.index(target)
self.tabs.remove(target)
#if no more tabs close window
if len(self.tabs)==0:
self.Notebook.window.Close()
else:
#if index is between index and endex and bring one tab from the left
if index>self.tabindex and index<self.tabendex and self.tabindex>0:
self.tabindex-=1
if self.side_tabs:
self.ReVgenerate(total=True)
else:
self.Regenerate(safe = True)
def OnSize(self, event):
'ReLayout the tabs if the bar on event of a resize'
event.Skip()
if self.side_tabs and self.tabs:
cupb = self.cupb
cdownb = self.cdownb
size = self.Size
tabs = self.tabs
endex = self.tabendex
# position and size buttons
cupb.Position = (0,0)
cupb.Size = (size.width, 16)
cupb.Show()
cupb.Enable(self.tabindex != 0)
cdownb.Position = (0, size.height - 16)
cdownb.Size = (size.width, 16)
cdownb.Show()
cdownb.Enable(endex < len(tabs) or tabs[endex - 1].Position.y +
tabs[endex-1].Size.height > size.height - 16)
sz = self.Size
if ((sz.width != self.lastsize.width and not self.side_tabs) or (sz != self.lastsize and self.side_tabs)) and self.IsShownOnScreen():
self.lastsize = sz
if self.side_tabs:
self.ReVgenerate(dotoggle = False)
else:
self.Regenerate(False,dotoggle = False)
try:
wx.CallAfter(wx.CallAfter,self.Parent.pagecontainer.active.panel.input_area.expandEvent)
except AttributeError:
pass
self.Refresh(False)
def GetTabCount(self):
"""
Returns the number of tabs in the bar
"""
return len([t for t in self if t])
def NextTab(self):
self.SetNextActive(self.ActiveTab, wrap = True)
def PrevTab(self):
self.SetLastActive(self.ActiveTab, wrap = True)
def SetNextActive(self, origin,wrap=False):
"""
Sets the tab after the curent active
-if it does not exist does the previbus
-or the first if wrap is true
"""
if origin in self.tabs:
index=self.tabs.index(origin)
if not index < len(self.tabs)-1 and wrap:
self.tabs[0].SetActive(True)
elif index < len(self.tabs)-1:
self.tabs[index+1].SetActive(True)
elif index>0:
self.tabs[index-1].SetActive(True)
self.Refresh(False)
def SetLastActive(self, origin, wrap = False):
"""
Sets the tab before the curent active
-if it does not exist does the next
-or the last if wrap is true
"""
if origin in self.tabs:
index=self.tabs.index(origin)
if not index > 0 and wrap:
self.tabs[len(self.tabs)-1].SetActive(True)
elif index >0:
self.tabs[index-1].SetActive(True)
elif index<0:
self.tabs[index+1].SetActive(True)
self.Refresh(False)
def SyncActive(self,atab):
"""
Moves the index and endex so that the active tab is in the bar
"""
if not atab: return
if self.side_tabs:
if atab < self.tabindex:
self.tabindex=atab
self.ReVgenerate(True)
else:
thetab=self.tabs[atab]
while atab >= self.tabendex or thetab.Position.y+thetab.Size.height > self.Size.height-16:
self.tabindex+=1
self.ReVgenerate(True)
else:
style = self.tab_rows
if atab < self.rowindex:
self.rowindex=atab
self.Regenerate()
elif atab > self.rowindex+style-1:
self.rowindex=atab-style+1
self.Regenerate()
def OnWheel(self,event):
"""
Event that handles mouse wheeling,
maps the events to SetNextActive and SetLastActive
"""
if RectS(self.Size).Contains(event.Position):
direction = event.GetWheelRotation()
if direction<0:
self.SetNextActive(self.ActiveTab, True)
elif direction>0:
self.SetLastActive(self.ActiveTab, True)
@property
def Notebook(self):
return self.Parent
@property
def Manager(self):
return self.Notebook.manager
@property
def ActiveTab(self):
active = self.Notebook.pagecontainer.active
if active is not None:
return active.tab
def OnButton(self,event):
"""
The button events for vertical alignment for up and down
"""
if event.GetId()==CUPID:
if self.tabindex > 0:
self.tabindex -= 1
self.ReVgenerate(total = True)
elif event.GetId()==CDOWNID:
if self.tabendex<len(self.tabs) or self.tabs[self.tabendex-1].Position.y+self.tabs[self.tabendex-1].Size.height>self.Size.height-16:
self.tabindex+=1
self.ReVgenerate(total=True)
self.UpdateNotify()
def NotifyDrag(self, origin):
"""
When a tab is dragged this is called to start the tabbar handling dragging
origin - tab being dragged
"""
# Lets the TabMan know a tab as been dragged
self.dragorigin = origin
origin.SetCursor(wx.StockCursor(wx.CURSOR_NO_ENTRY))
self.Manager.Notify(self.Notebook)
def DragCalc(self,point):
"""
This does the dragging calculations for the tabs
"""
sidetabs = self.side_tabs
# if here is no local origin tab but there is a remote source identified in TabMan
if not self.dragorigin and self.Manager.source:
#setting a local drag origin
master = self.Manager.source
dragorigin=master.tabbar.dragorigin
# announcing to TabMan it is expecting a tab
self.Manager.Request(self.Notebook)
# if there is a local origin use that
else:
dragorigin=self.dragorigin
# if dragtarget is out of date find what you're dragging to
if not self.dragtarget or not self.dragtarget.Rect.Contains(point):
wap = wx.FindWindowAtPointer()
self.dragtarget = wap if isinstance(wap,Tab) else None
self.dragside = None
# if there is a tab as target
if self.dragtarget and self.dragtarget != dragorigin:
dtrect=self.dragtarget.Rect
# data to decide what side the tab would be dropped on
if not sidetabs:
x = point[0] - dtrect.x
x2 = dtrect.width / 2
else:
x = point[1] - dtrect.y
x2 = dtrect.height / 2
# make the left/top or right/bottom decision
if x <= x2:#left/top
if self.dragside!=False:
self.dragside=False
if not sidetabs:
self.DrawDropMarker(dtrect.x, dtrect.y)# + (dtrect.height // 2)
else:
self.DrawDropMarker(dtrect.x, dtrect.y)# + dtrect.width // 2
elif not self.dragside:#right/bottom
self.dragside=True
if not sidetabs:
self.DrawDropMarker(dtrect.x+dtrect.width,dtrect.y)#+(dtrect.height//2)
else: self.DrawDropMarker(dtrect.x,dtrect.y+dtrect.height)#+dtrect.width//2
self.SetFocus()
# if being dropped in the whitespace of the TabBar
elif (dragorigin and self.dragtarget!=dragorigin) or (dragorigin==None and self.dragtarget==None) and self.Rect.Contains(point):
# find what row the tab is being dropped in
if not sidetabs:
row=self.rows[(point[1]//self.rowheight)+self.rowindex]
tab=row[len(row)-1]
self.dragside=True
# or in vertical if at the beginning or end
else:
if point.y>self.rowheight:
tab=self.rows[0][len(self.rows[0])-1]
self.dragside=True
else:
tab=self.rows[0][0]
self.dragside=False
dtrect=tab.Rect
#Place marker
if not sidetabs: self.DrawDropMarker(dtrect.x+dtrect.width,dtrect.y)#+(dtrect.height//2)
elif self.dragside==True: self.DrawDropMarker(dtrect.x,dtrect.y+dtrect.height)#+dtrect.width//2
else: self.DrawDropMarker(dtrect.x+dtrect.width/2,dtrect.y)
self.SetFocus()
#cleanup
self.dragtarget=tab
else:#if not in tabbar anymore don't show arrow
self.dropmarker.Show(False)
def DragFinish(self,new=False):
"""
Ends dragging and does any rearranging if required
"""
if not wx.IsDestroyed(self.dragorigin):
self.dragorigin.SetCursor(wx.StockCursor(wx.CURSOR_ARROW))
if self.dragorigin and self.dragorigin.previewtabs:
# Destroy the preview tab
self.dragorigin.previewtabs.Stop()
self.dragorigin.previewtabs = None
rect = RectPS(self.Notebook.ClientToScreen(self.Position), self.Size)
parentrect = self.Notebook.window.Rect
mousepos = wx.GetMousePosition()
manager = self.Manager
#if released out of the window...
if not new and ((manager.destination and not parentrect.Contains(mousepos)) or not rect.Contains(mousepos)):
if self.ActiveTab==self.dragorigin:
self.SetNextActive(self.dragorigin)
self.dragorigin.Show(False)
#If no or invalid destination in manager create a new window and sets it destination
dest = manager.destination
if not dest or not dest.tabbar.Rect.Contains(dest.ScreenToClient(wx.GetMousePosition())):
# FIXME: SWIG doesn't like subtracting a wx.Size from a wx.Point, so do it the hard way
# until the SIP migration is finished.
originsize = self.dragorigin.GetSize()
newpoint = wx.Point(mousepos[0] - originsize[0], mousepos[1] - originsize[1])
destination = self.Notebook.winman.NewWindow(newpoint, self.Notebook.window.GetSize()).notebook
#else set the destination to the manager's destination
else:
destination = dest
#clear tabman's destination
manager.Request()
# Grab a reference to the tab's page
page = self.dragorigin.page
# Make the tab "forget" about the page
self.Notebook.did_remove(page.panel)
del self.dragorigin.page
del page.tab
# Remove the tab from the tabs list, and destroy the wxWindow
self.tabs.remove(self.dragorigin)
self.dragorigin.Close()
# remove page from this notebook and insert it into the target notebook
#page.Parent.RemoveChild(page)
destination.Insert(page, False)
# cleanup
manager.Notify()
self.dragorigin=None
# re-sort tabs
if self.side_tabs:
self.ReVgenerate(True)
else:
self.Regenerate(safe = True)
# if released inside of the window
# used both for moving within a window and as the last step of a
# interwindow move in case of interwindow tab has already been moved to
# this window at the end of the list and all that is left is to move it
# to the correct position.
elif self.dragtarget and self.dragorigin and self.dragorigin!=self.dragtarget and self.Rect.Contains(self.Notebook.ScreenToClient(mousepos)):
#remove the tab from the list
self.tabs.remove(self.dragorigin)
#decide which side of the target the tab should be dropped
pos = self.tabs.index(self.dragtarget) + (1 if self.dragside else 0)
# Reinsert the tab in it's new position
self.tabs.insert(pos, self.dragorigin)
after = self.tabs[pos+1] if pos+1 < len(self.tabs) else None
if after is not None:
after = after.page.panel
# call after so that windows are all in their correct places
wx.CallAfter(self.Notebook.did_rearrange, self.dragorigin.page.panel, after)
# # Resort
if self.side_tabs:
self.ReVgenerate(True)
else:
self.Regenerate()
elif new:
if self.side_tabs:
self.ReVgenerate(True)
else:
self.Regenerate()
#if there is a dragorigin run onMouseLeave on it to reset it's look
if self.dragorigin:
self.dragorigin.OnMouseLeave()
#local cleanup
self.dropmarker.Show(False)
self.dragorigin=None
self.dragtarget=None
#destination and manager cleanup
dest = manager.destination
if dest:
dest.tabbar.dragorigin=None
dest.tabbar.dragtarget=None
manager.Request()
manager.Notify()
if len(self.tabs)==0: self.Notebook.window.Close()
def DrawDropMarker(self,x,y):
"""
Places the marker for where to drop the tab
x and y are center position, saves calculation that way
"""
# if self.side_tabs:
# self.dropmarker.SetSize((self.Size.width - self.dropmarkeroverlay, -1))
# else:
# self.dropmarker.SetSize((-1, self.rowheight - self.dropmarkeroverlay))
self.dropmarker.Teleport(self.ClientToScreen((x, y+self.dropmarkeroffset)))
if not self.dropmarker.IsShown():
self.Manager.ShowDropMarker(self.dropmarker)
def Toggle(self, switch = None):
'Toggle whether the tabbar is hidden or shown.'
if pref('tabs.hide_at_1', True) and len(self.tabs) <= 1 and not switch:
self.Notebook.Split(False)
#make sure the content of the IM win resizes if tabbaar hides
self.ProcessEvent(wx.CommandEvent(wx.wxEVT_SIZE))
else:
self.Notebook.Split(True)
def UpdateNotify(self):
frows = self.flagedrows
frows.clear()
# Resort
if self.side_tabs:
tabs=self.tabs
for i, tab in enumerate(tabs):
if tab.notified:
frows.add(i)
elif i in self.flagedrows:
frows.remove(i)
self.cupb.SetNotify(len(frows) and min(frows) < self.tabindex)
self.cdownb.SetNotify(len(frows) and (max(frows) >= self.tabendex or
tabs[max(frows)].Position.y + tabs[max(frows)].Size.height > self.Size.height - 16))
else:
for i, row in enumerate(self.rows):
flaged = False
for tab in row:
if tab and tab.notified:
flaged = True
frows.add(i)
self.navi.upb.SetNotify(len(frows) and min(frows)<self.rowindex)
self.navi.downb.SetNotify(len(frows) and max(frows)>self.rowindex + self.tab_rows - 1)
| from __future__ import with_statement
from tab import Tab
from gui import skin
from OverlayImage import OverlayImage
from navigation_arrows import Navi
from gui.uberwidgets.UberButton import UberButton
from gui.uberwidgets.UberEvents import EVT_DRAG_START
import wx
from wx import RectS, Rect, RectPS
from util.primitives.funcs import do
from common import pref, profile, prefprop
from gui.uberwidgets import UberWidget
from cgui import SimplePanel
#from traceback import print_stack,format_stack
CUPID = wx.NewId()
CDOWNID = wx.NewId()
CLOSE_TAB = wx.NewId()
CLOSE_OTHER_TABS = wx.NewId()
class TabBar(SimplePanel, UberWidget):
"""
Where the tabs live, handles all display and organization functionality
"""
def __init__(self, parent, skinkey):
SimplePanel.__init__(self, parent)
self.tabs = [] # a list of all the tabs
self.rows = [] # a list of all the visible row, each a list of all the tabs in that row
self.rowindex = 0 # the first visible row
self.tabindex = 0 # the first tab of the first visible row
self.tabendex = 0 # the last tab of the last visible row
events = [(wx.EVT_PAINT, self.OnPaint),
(wx.EVT_SIZE, self.OnSize),
(wx.EVT_BUTTON, self.OnButton),
(wx.EVT_MOUSEWHEEL, self.OnWheel),
(wx.EVT_MOTION,self.OnMotion)]
for event, method in events: self.Bind(event, method)
self.flagedrows = set()
self.lastsize=self.Size
self.rowheight=0#height of a row in pixels
self.SetSkinKey(skinkey,True)
#buttons for verticle alignment
self.cupb = UberButton(self, CUPID, skin=self.scrollbuttonskin, icon=self.upicon)
self.cupb.Show(False)
self.cdownb = UberButton(self, CDOWNID, skin=self.scrollbuttonskin, icon=self.downicon)
self.cdownb.Show(False)
#the navigation box
self.navi=Navi(self)
self.dragorigin = None#when draging the tab that you are dragging
self.dragtarget = None#when dragging the mouse is over and at that point released on
# the arrow image shown when dragging tabs
self.dropmarker=OverlayImage(self, self.dropmarkerimage)
# self.dropmarker = Storage(Show = lambda v=True: None)
self.dragside=None#was the tab droped on the left or right of the target tab
#linking prefs
link = profile.prefs.link #@UndefinedVariable
link('tabs.rows', self.Generate, False)
link('tabs.tabbar_x', self.Generate, False)
link('tabs.hide_at_1', self.Generate, False)
link('tabs.side_tabs', self.SkinRedirect, False)
self.Top.Bind(wx.EVT_MENU, self.OnMenuEvent)
side_tabs = prefprop('tabs.side_tabs')
tab_rows = prefprop('tabs.rows', 2)
def UpdateSkin(self):
key = self.tabskin = self.skinkey
g = lambda k, default = sentinel: skin.get(key + '.' + k, default)
sg = lambda k, default = sentinel: skin.get('side' + key + '.' + k, default)
elems = (('spacing', 'spacing', 2),
('bg', 'backgrounds.bar'),
('dropmarkerimage', 'dropmarker.image'),
# ('dropmarkeroverlay', 'dropmarker.overlay', 0),
('dropmarkeroffset', 'dropmarker.offset', 0),
('closebuttonskin', 'closebuttonskin', ''),
('closeicon', 'icons.close', None),
('scrollbuttonskin', 'scrollbuttonskin', ''),
('lefticon', 'icons.left', ''),
('righticon', 'icons.right', ''),
('upicon', 'icons.up', ''),
('downicon', 'icons.down', ''))
for elem in elems:
setattr(self, 'top' + elem[0], g(*elem[1:]))
setattr(self, 'side' + elem[0], sg(elem[1],getattr(self,'top' + elem[0])))
setattr(self, elem[0], getattr(self, ('side' if self.side_tabs else 'top') + elem[0]))
if hasattr(self,'dropmarker'):
self.dropmarker.SetImage(self.dropmarkerimage)
self.dropmarker.SetRotation((self.side_tabs and not self.dropmarkerimage))
navi = getattr(self, 'navi', None)
if navi is not None:
self.cdownb.SetSkinKey(self.scrollbuttonskin)
self.cupb.SetSkinKey(self.scrollbuttonskin)
self.cdownb.SetIcon(self.downicon)
self.cupb.SetIcon(self.upicon)
self.navi.closebutton.SetSkinKey(self.closebuttonskin)
self.navi.closebutton.SetIcon(self.closeicon)
scrollskin = self.scrollbuttonskin
navi.prevb.SetSkinKey(scrollskin)
navi.nextb.SetSkinKey(scrollskin)
navi.upb.SetSkinKey(scrollskin)
navi.downb.SetSkinKey(scrollskin)
navi.prevb.SetIcon(self.lefticon)
navi.nextb.SetIcon(self.righticon)
navi.upb.SetIcon(self.upicon)
navi.downb.SetIcon(self.downicon)
wx.CallAfter(self.Generate)
def SkinRedirect(self,val=None):
elems = ('spacing',
'bg',
'dropmarkerimage',
#'dropmarkeroverlay',
'closebuttonskin',
'closeicon',
'scrollbuttonskin',
'lefticon',
'righticon',
'upicon',
'downicon'
)
for elem in elems:
setattr(self, elem, getattr(self,('side' if self.side_tabs else 'top') + elem))
self.UpdateChildSkins()
def UpdateChildSkins(self):
self.cdownb.SetSkinKey(self.scrollbuttonskin,True)
self.cupb.SetSkinKey(self.scrollbuttonskin,True)
navi, sbs = self.navi, self.scrollbuttonskin
navi.closebutton.SetSkinKey(self.closebuttonskin,True)
navi.prevb.SetSkinKey(sbs, True)
navi.nextb.SetSkinKey(sbs, True)
navi.upb.SetSkinKey(sbs, True)
navi.downb.SetSkinKey(sbs, True)
self.UpdateChildrenIcons()
for tab in self.tabs:
tab.UpdateMode()
self.Generate()
def __repr__(self):
return '<TabBar %r>' % self.tabs
def OnDragStart(self, tab):
'Catches the tab drag event and starts the tab dragging system.'
self.NotifyDrag(tab)
def OnMotion(self,event):
'Positioning updates during drag and drop'
if event.LeftIsDown() and (self.dragorigin or self.Manager.source):
self.DragCalc(event.Position)
def __getitem__(self, index):
return self.tabs[index]
def OnPaint(self, event):
dc = wx.PaintDC(self)
rect = RectS(self.Size)
if not self.side_tabs:
rcount = min(len(self.rows), pref('tabs.rows', 2))
height = self.tabs[0].Size.height
y=0
for unused_i in xrange(rcount):
self.bg.Draw(dc, Rect(rect.x, y, rect.width, height))
y += height
else:
self.bg.Draw(dc,rect)
def Add(self, page, focus, resort = True):
"""
Adds a tab to the bar. Should only be used by parent NoteBook.
page - page in PageContainer the tab is to be associated with
focus - whether that tab should steal focus from current tab
"""
tab = Tab(self, page, skinkey = self.tabskin)
tab.Bind(wx.EVT_CONTEXT_MENU, self.ShowMenu)
tab.Show(False)
self.tabs.append(tab)
if focus:
wx.CallAfter(tab.SetActive, True)
elif resort:
if self.side_tabs:
self.ReVgenerate()
else:
self.Regenerate(True)
return tab
def ShowMenu(self, e):
self._menutab = e.EventObject
try:
menu = self._tabmenu
except AttributeError:
from gui.uberwidgets.umenu import UMenu
menu = self._tabmenu = UMenu(self)
menu.AddItem('Close &Other Tabs', id = CLOSE_OTHER_TABS)
menu.AddSep()
menu.AddItem('&Close Tab', id = CLOSE_TAB)
menu.PopupMenu()
def OnMenuEvent(self, e):
'''Invoked when a tab context menu item is clicked.'''
if e.Id == CLOSE_TAB:
self._menutab.CloseTab()
elif e.Id == CLOSE_OTHER_TABS:
menutab = self._menutab
# switch to that tab first
menutab.active = True
with self.Frozen():
for tab in self.tabs[:]:
if tab is not menutab:
tab.CloseTab()
else:
e.Skip()
def Generate(self, val=None):
self.navi.closebutton.Show(pref('tabs.tabbar_x', False))
if self.side_tabs:
self.ReVgenerate(True)
else:
self.Regenerate()
def ReVgenerate(self,total=False, safe=False, dotoggle=True):
"""
It's like Doo... err.. Regenerate, only vertical
"""
# print "Starting: Regenerate",self.Top.Title,'\n'#,'='*80,'\n','\n'.join(format_stack())
# print '='*80
#TODO: Should we be careful about the tab leaving the bar?
tabs = self.tabs
if not tabs: return
do(tab.Show(False) for tab in self.tabs)
for tab in self.tabs: tab.row = None
del self.rows[:]
# Safty precautions prevent list access errors
if self.tabindex < 0 or self.tabindex >= len(tabs):
self.tabindex = 0
# Preset variables
n = self.tabindex # the first tab shown
self.rowheight = tabs[0].GetMinHeight()
area = self.Notebook.Size.height - 32 # Height in pixels of the tabbar
# number of fully visible rows in the given area at the given height
i = area//self.rowheight
count = len(tabs)
#one tab per row
for r in xrange(count): tabs[r].row=r
rows = self.rows
size = self.Size
#Sets navimode and position
navi = self.navi
navi.ShowNav(4)
navi.Hide()
navi.Position = wx.Point(size.width - navi.Size.width,0)
# Totally reconstructs the list if it's told to or there are not tabs in the rows or
# if there isn't one more tab than there is room for and there is enough room to fit
# them all and number of tabs in the row equals the number of tabs
if total or not rows or (i + 1 != len(rows[0])) and not (i > len(rows[0])) and len(rows[0]) == len(tabs):
rows.append([])
col = rows[0]
#if all tabs fit
if i >= count:
n=0
self.tabindex=0
do(col.append(tab) for tab in tabs)
av=col[0].MinSize.height
#calculate and show range
else:
for t in xrange(n,n+i+1):
if t < len(tabs):col.append(tabs[t])
# populate with earlier stuff
while len(col) < i and n > 0:
n-=1
col.insert(0,tabs[n])
if col: av = col[0].MinSize.height
else:
#just leave the new values the same as the old
col = rows[0]
av = col[0].MinSize.height
# Show all tabs in the bar
count = 16
for t in col:
t.Size = (self.Size.width,av)
t.Position = (0,count)
count += av
t.Show()
self.tabindex=n
endex = self.tabendex=n+len(col)
if dotoggle:
self.Toggle()
cupb, cdownb = self.cupb, self.cdownb
cupb.Enable(self.tabindex != 0)
cdownb.Enable(endex < len(tabs) or tabs[endex - 1].Position.y +
tabs[endex-1].Size.height > size.height - 16)
self.UpdateNotify()
def Regenerate(self, safe = False, dotoggle=True):
'''
Regenerates layout information.
safe is a flag to indicate if we should try to keep the currently active
tab in view at all times. (This doesn't occur when scrolling, for
instance.)
'''
# print "Starting: Regenerate",self.Top.Title,'\n','='*80,'\n','\n'.join(format_stack())
# print '='*80
# early exit for when the tabbar isn't visible.
if not self.IsShown() and len(self.tabs) == 1:
return
with self.Frozen():
self._Regenerate(safe = safe, dotoggle = dotoggle)
self.Refresh(False)
def _Regenerate(self, safe = False, dotoggle = True):
self.cupb.Show(False)
self.cdownb.Show(False)
parentpage = self.Parent.pagecontainer
# style is the number of rows (or 0 for single)
style = self.tab_rows
# Should we be careful about the tab leaving the bar?
careful = not safe and parentpage.active
# Hide all tabs preparation for refilling
for tab in self.tabs:
tab.Show(False)
tab.row = None
del self.rows[:]
# navi set up, see if arrows are needed and placement
tally = sum(tab.MinSize.width for tab in self.tabs) #total size of tabs
navi = self.navi
tabs = self.tabs
rows = self.rows
if not tabs: return
# Tab alignment calculations
# Saftey precautions prevent list access errors
if self.tabindex < 0 or self.tabindex >= len(tabs):
self.tabindex = 0
# Preset variables
n = self.tabindex # the first tab shown
i = n
row = 0
self.rowheight = tabs[0].MinHeight
my_w, nav_w = self.Size.width, navi.Size.width
# Decide what kind of navigation panel, if any, to use...
if tally >= my_w - nav_w and not style:
navi.ShowNav(1) # arrows left and right
elif tally >= (my_w - nav_w):
navi.ShowNav(3) # arrows up and down next to the X
else:
navi.ShowNav(0)
#Where to put navigation panel.
navi.Freeze()
navi.Show(True)
navi.Fit()
navi.Position = wx.Point(self.Size.width-navi.Size.width,0)
navi.Size = wx.Size(-1,self.Size.height)
navi.Thaw()
#More preparing vars
area = self.Notebook.Size.width - navi.Size.width
#While more tabs are not in a row
while len(tabs) > i:
tally = tabs[i].MinSize.width
rows.append([])
# Loop through each visible tab, fitting tabs on the right.
while i < len(tabs) and tally < area:
i += 1
if i < len(tabs):
tally += tabs[i].MinSize.width
#Be carefull that the active tab doesn't scroll off the bar
if careful and not style:
activeindex = tabs.index(parentpage.active.tab)
change=False
#add tabs until the active tab is visible
while activeindex>=i and n!=i:
i += 1
tally += tabs[i].MinSize.width
change = True
#Remove tab if more tabs than room
if tally >= area and change:
tally -= tabs[n].MinSize.width
n += 1
self.tabindex=n
# If extra space, fit tabs to the right of the row
if not style: # if single row,
while n > 0 and area - tally > tabs[n-1].MinSize.width:
n -= 1
self.tabindex = n
tally += tabs[n].MinSize.width
# Injects tabs calculated to fit in that row into that row
if range(n, i):
rows[row] = [tabs[t] for t in xrange(n, i)]
for tab in rows[row]:
tab.row=row
else:
rows[row].append(tabs[i])
i += 1
if not style: break # If we're in single row, break now.
row += 1
n = i
# Row calculation
if self.rowindex >= len(rows):
self.rowindex = len(rows) - 1
#cycle through visible rows
row = self.rowindex
visible = self.tab_rows or 1
if careful and style:
#print "Being Careful"
active = parentpage.active.tab
#print active
for ir,r in enumerate(rows):
#print ir,r
if active in r:
if ir<row:
#print "moving index down"
row = self.rowindex = ir
elif ir >= row + visible:
#print "moving index up"
row = ir - (visible - 1)
# If we're closing tabs above where is visible, keep the visible
# index "where it is"
if len(rows) - (row + 1) < visible and len(rows) >= visible:
row = len(rows) - visible
self.rowindex = row
# Place tabs!
while row < len(rows) and row < self.rowindex + visible and len( rows[row] ) != 0:
# if this is a row that needs to be scrunched...
if rows.index(rows[row]) == len(rows)-1 and \
(style or len(rows[row]) == len(tabs)):
for t in xrange(0,len(rows[row])):
thistab = rows[row][t]
thistab.SetSize(thistab.MinSize)
if not t:
# The first tab is set to it's minimum width at x: 0
thistab.SetPosition((0, self.rowheight*(row-self.rowindex)))
else:
# Every other tab is placed right next to the tab
# before it.
thistab.SetPosition((rows[row][t-1].Position.x \
+ rows[row][t-1].Size.width,
self.rowheight*(row-self.rowindex)))
thistab.Show(True)
# If there are more rows than the current row...
elif len(rows) > row:
# Get a list of tab indices, widest to smallest.
ordered = [rows[row].index(t)
for t in sorted(rows[row],
key=lambda o: o.MinSize.width,
reverse=True) ]
length = len(ordered)
reserved=0
o=0 # o_O ?
# Average width of tab if all tabs are the same size, and
# fill up all the area.
av = (area - reserved) / (length - o)
mark = 0
while o < length:
# Loop from "current" tab to the end
for t in xrange(o, length):
tab = rows[row][ordered[t]]
# If this tab is larger than average...
if tab.GetMinSize()[0] > av:
# Make it it's minimum, and keep track of it
tab.SetSize(tab.MinSize)
reserved += tab.MinSize.width
o += 1
mark = o
# If we're not on the last tab, recalc average
if (length - o):
av=(area-reserved)/(length-o)
else:
o += 1
break
# For tabs less than the average, set them to average
for t in xrange(mark, length):
tab = rows[row][ordered[t]]
tab.SetSize((av, tab.MinSize.height))
# For every tab in the row
for t, tab in enumerate(rows[row]):
if not t: # If it's the first tab:
if length==1:
# If the row is so small it can only fit one tab,
# make due.
tab.Size = wx.Size(area, tab.MinSize.height)
tab.Position = wx.Point(0, self.rowheight * (row - self.rowindex))
else:
tab.Position = wx.Point(rows[row][t-1].Position.x + rows[row][t-1].Size.width,
self.rowheight * (row - self.rowindex))
tab.Show(True)
row += 1
if dotoggle:
self.Toggle()
# If total rows is less than total rows being shown, shrink the
# tab area so that it's only just big enough.
if len(rows) < style or not style:
rows_shown = len(rows)
else:
rows_shown = style
if self.Parent.SashPosition != rows_shown * self.rowheight:#self.MinSize.height
self.MinSize = wx.Size(-1, rows_shown * self.rowheight)
self.Parent.SetSashPosition(self.MinSize.height)
# self.Size=self.MinSize
# Determine if the Navi needs to enable or show arrows
navi.Enabler()
#self.Parent.Layout() # Relayout self
self.tabendex = i-1 # final tab being shown
self.UpdateNotify()
navi.Size = wx.Size(-1,rows_shown * self.rowheight)
def Remove(self, target):
'Removes the tab specified from the bar.'
index=self.tabs.index(target)
self.tabs.remove(target)
#if no more tabs close window
if len(self.tabs)==0:
self.Notebook.window.Close()
else:
#if index is between index and endex and bring one tab from the left
if index>self.tabindex and index<self.tabendex and self.tabindex>0:
self.tabindex-=1
if self.side_tabs:
self.ReVgenerate(total=True)
else:
self.Regenerate(safe = True)
def OnSize(self, event):
'ReLayout the tabs if the bar on event of a resize'
event.Skip()
if self.side_tabs and self.tabs:
cupb = self.cupb
cdownb = self.cdownb
size = self.Size
tabs = self.tabs
endex = self.tabendex
# position and size buttons
cupb.Position = (0,0)
cupb.Size = (size.width, 16)
cupb.Show()
cupb.Enable(self.tabindex != 0)
cdownb.Position = (0, size.height - 16)
cdownb.Size = (size.width, 16)
cdownb.Show()
cdownb.Enable(endex < len(tabs) or tabs[endex - 1].Position.y +
tabs[endex-1].Size.height > size.height - 16)
sz = self.Size
if ((sz.width != self.lastsize.width and not self.side_tabs) or (sz != self.lastsize and self.side_tabs)) and self.IsShownOnScreen():
self.lastsize = sz
if self.side_tabs:
self.ReVgenerate(dotoggle = False)
else:
self.Regenerate(False,dotoggle = False)
try:
wx.CallAfter(wx.CallAfter,self.Parent.pagecontainer.active.panel.input_area.expandEvent)
except AttributeError:
pass
self.Refresh(False)
def GetTabCount(self):
"""
Returns the number of tabs in the bar
"""
return len([t for t in self if t])
def NextTab(self):
self.SetNextActive(self.ActiveTab, wrap = True)
def PrevTab(self):
self.SetLastActive(self.ActiveTab, wrap = True)
def SetNextActive(self, origin,wrap=False):
"""
Sets the tab after the curent active
-if it does not exist does the previbus
-or the first if wrap is true
"""
if origin in self.tabs:
index=self.tabs.index(origin)
if not index < len(self.tabs)-1 and wrap:
self.tabs[0].SetActive(True)
elif index < len(self.tabs)-1:
self.tabs[index+1].SetActive(True)
elif index>0:
self.tabs[index-1].SetActive(True)
self.Refresh(False)
def SetLastActive(self, origin, wrap = False):
"""
Sets the tab before the curent active
-if it does not exist does the next
-or the last if wrap is true
"""
if origin in self.tabs:
index=self.tabs.index(origin)
if not index > 0 and wrap:
self.tabs[len(self.tabs)-1].SetActive(True)
elif index >0:
self.tabs[index-1].SetActive(True)
elif index<0:
self.tabs[index+1].SetActive(True)
self.Refresh(False)
def SyncActive(self,atab):
"""
Moves the index and endex so that the active tab is in the bar
"""
if not atab: return
if self.side_tabs:
if atab < self.tabindex:
self.tabindex=atab
self.ReVgenerate(True)
else:
thetab=self.tabs[atab]
while atab >= self.tabendex or thetab.Position.y+thetab.Size.height > self.Size.height-16:
self.tabindex+=1
self.ReVgenerate(True)
else:
style = self.tab_rows
if atab < self.rowindex:
self.rowindex=atab
self.Regenerate()
elif atab > self.rowindex+style-1:
self.rowindex=atab-style+1
self.Regenerate()
def OnWheel(self,event):
"""
Event that handles mouse wheeling,
maps the events to SetNextActive and SetLastActive
"""
if RectS(self.Size).Contains(event.Position):
direction = event.GetWheelRotation()
if direction<0:
self.SetNextActive(self.ActiveTab, True)
elif direction>0:
self.SetLastActive(self.ActiveTab, True)
@property
def Notebook(self):
return self.Parent
@property
def Manager(self):
return self.Notebook.manager
@property
def ActiveTab(self):
active = self.Notebook.pagecontainer.active
if active is not None:
return active.tab
def OnButton(self,event):
"""
The button events for vertical alignment for up and down
"""
if event.GetId()==CUPID:
if self.tabindex > 0:
self.tabindex -= 1
self.ReVgenerate(total = True)
elif event.GetId()==CDOWNID:
if self.tabendex<len(self.tabs) or self.tabs[self.tabendex-1].Position.y+self.tabs[self.tabendex-1].Size.height>self.Size.height-16:
self.tabindex+=1
self.ReVgenerate(total=True)
self.UpdateNotify()
def NotifyDrag(self, origin):
"""
When a tab is dragged this is called to start the tabbar handling dragging
origin - tab being dragged
"""
# Lets the TabMan know a tab as been dragged
self.dragorigin = origin
origin.SetCursor(wx.StockCursor(wx.CURSOR_NO_ENTRY))
self.Manager.Notify(self.Notebook)
def DragCalc(self,point):
"""
This does the dragging calculations for the tabs
"""
sidetabs = self.side_tabs
# if here is no local origin tab but there is a remote source identified in TabMan
if not self.dragorigin and self.Manager.source:
#setting a local drag origin
master = self.Manager.source
dragorigin=master.tabbar.dragorigin
# announcing to TabMan it is expecting a tab
self.Manager.Request(self.Notebook)
# if there is a local origin use that
else:
dragorigin=self.dragorigin
# if dragtarget is out of date find what you're dragging to
if not self.dragtarget or not self.dragtarget.Rect.Contains(point):
wap = wx.FindWindowAtPointer()
self.dragtarget = wap if isinstance(wap,Tab) else None
self.dragside = None
# if there is a tab as target
if self.dragtarget and self.dragtarget != dragorigin:
dtrect=self.dragtarget.Rect
# data to decide what side the tab would be dropped on
if not sidetabs:
x = point[0] - dtrect.x
x2 = dtrect.width / 2
else:
x = point[1] - dtrect.y
x2 = dtrect.height / 2
# make the left/top or right/bottom decision
if x <= x2:#left/top
if self.dragside!=False:
self.dragside=False
if not sidetabs:
self.DrawDropMarker(dtrect.x, dtrect.y)# + (dtrect.height // 2)
else:
self.DrawDropMarker(dtrect.x, dtrect.y)# + dtrect.width // 2
elif not self.dragside:#right/bottom
self.dragside=True
if not sidetabs:
self.DrawDropMarker(dtrect.x+dtrect.width,dtrect.y)#+(dtrect.height//2)
else: self.DrawDropMarker(dtrect.x,dtrect.y+dtrect.height)#+dtrect.width//2
self.SetFocus()
# if being dropped in the whitespace of the TabBar
elif (dragorigin and self.dragtarget!=dragorigin) or (dragorigin==None and self.dragtarget==None) and self.Rect.Contains(point):
# find what row the tab is being dropped in
if not sidetabs:
row=self.rows[(point[1]//self.rowheight)+self.rowindex]
tab=row[len(row)-1]
self.dragside=True
# or in vertical if at the beginning or end
else:
if point.y>self.rowheight:
tab=self.rows[0][len(self.rows[0])-1]
self.dragside=True
else:
tab=self.rows[0][0]
self.dragside=False
dtrect=tab.Rect
#Place marker
if not sidetabs: self.DrawDropMarker(dtrect.x+dtrect.width,dtrect.y)#+(dtrect.height//2)
elif self.dragside==True: self.DrawDropMarker(dtrect.x,dtrect.y+dtrect.height)#+dtrect.width//2
else: self.DrawDropMarker(dtrect.x+dtrect.width/2,dtrect.y)
self.SetFocus()
#cleanup
self.dragtarget=tab
else:#if not in tabbar anymore don't show arrow
self.dropmarker.Show(False)
def DragFinish(self,new=False):
"""
Ends dragging and does any rearranging if required
"""
if not wx.IsDestroyed(self.dragorigin):
self.dragorigin.SetCursor(wx.StockCursor(wx.CURSOR_ARROW))
if self.dragorigin and self.dragorigin.previewtabs:
# Destroy the preview tab
self.dragorigin.previewtabs.Stop()
self.dragorigin.previewtabs = None
rect = RectPS(self.Notebook.ClientToScreen(self.Position), self.Size)
parentrect = self.Notebook.window.Rect
mousepos = wx.GetMousePosition()
manager = self.Manager
#if released out of the window...
if not new and ((manager.destination and not parentrect.Contains(mousepos)) or not rect.Contains(mousepos)):
if self.ActiveTab==self.dragorigin:
self.SetNextActive(self.dragorigin)
self.dragorigin.Show(False)
#If no or invalid destination in manager create a new window and sets it destination
dest = manager.destination
if not dest or not dest.tabbar.Rect.Contains(dest.ScreenToClient(wx.GetMousePosition())):
# FIXME: SWIG doesn't like subtracting a wx.Size from a wx.Point, so do it the hard way
# until the SIP migration is finished.
originsize = self.dragorigin.GetSize()
newpoint = wx.Point(mousepos[0] - originsize[0], mousepos[1] - originsize[1])
destination = self.Notebook.winman.NewWindow(newpoint, self.Notebook.window.GetSize()).notebook
#else set the destination to the manager's destination
else:
destination = dest
#clear tabman's destination
manager.Request()
# Grab a reference to the tab's page
page = self.dragorigin.page
# Make the tab "forget" about the page
self.Notebook.did_remove(page.panel)
del self.dragorigin.page
del page.tab
# Remove the tab from the tabs list, and destroy the wxWindow
self.tabs.remove(self.dragorigin)
self.dragorigin.Close()
# remove page from this notebook and insert it into the target notebook
#page.Parent.RemoveChild(page)
destination.Insert(page, False)
# cleanup
manager.Notify()
self.dragorigin=None
# re-sort tabs
if self.side_tabs:
self.ReVgenerate(True)
else:
self.Regenerate(safe = True)
# if released inside of the window
# used both for moving within a window and as the last step of a
# interwindow move in case of interwindow tab has already been moved to
# this window at the end of the list and all that is left is to move it
# to the correct position.
elif self.dragtarget and self.dragorigin and self.dragorigin!=self.dragtarget and self.Rect.Contains(self.Notebook.ScreenToClient(mousepos)):
#remove the tab from the list
self.tabs.remove(self.dragorigin)
#decide which side of the target the tab should be dropped
pos = self.tabs.index(self.dragtarget) + (1 if self.dragside else 0)
# Reinsert the tab in it's new position
self.tabs.insert(pos, self.dragorigin)
after = self.tabs[pos+1] if pos+1 < len(self.tabs) else None
if after is not None:
after = after.page.panel
# call after so that windows are all in their correct places
wx.CallAfter(self.Notebook.did_rearrange, self.dragorigin.page.panel, after)
# # Resort
if self.side_tabs:
self.ReVgenerate(True)
else:
self.Regenerate()
elif new:
if self.side_tabs:
self.ReVgenerate(True)
else:
self.Regenerate()
#if there is a dragorigin run onMouseLeave on it to reset it's look
if self.dragorigin:
self.dragorigin.OnMouseLeave()
#local cleanup
self.dropmarker.Show(False)
self.dragorigin=None
self.dragtarget=None
#destination and manager cleanup
dest = manager.destination
if dest:
dest.tabbar.dragorigin=None
dest.tabbar.dragtarget=None
manager.Request()
manager.Notify()
if len(self.tabs)==0: self.Notebook.window.Close()
def DrawDropMarker(self,x,y):
"""
Places the marker for where to drop the tab
x and y are center position, saves calculation that way
"""
# if self.side_tabs:
# self.dropmarker.SetSize((self.Size.width - self.dropmarkeroverlay, -1))
# else:
# self.dropmarker.SetSize((-1, self.rowheight - self.dropmarkeroverlay))
self.dropmarker.Teleport(self.ClientToScreen((x, y+self.dropmarkeroffset)))
if not self.dropmarker.IsShown():
self.Manager.ShowDropMarker(self.dropmarker)
def Toggle(self, switch = None):
'Toggle whether the tabbar is hidden or shown.'
if pref('tabs.hide_at_1', True) and len(self.tabs) <= 1 and not switch:
self.Notebook.Split(False)
#make sure the content of the IM win resizes if tabbaar hides
self.ProcessEvent(wx.CommandEvent(wx.wxEVT_SIZE))
else:
self.Notebook.Split(True)
def UpdateNotify(self):
frows = self.flagedrows
frows.clear()
# Resort
if self.side_tabs:
tabs=self.tabs
for i, tab in enumerate(tabs):
if tab.notified:
frows.add(i)
elif i in self.flagedrows:
frows.remove(i)
self.cupb.SetNotify(len(frows) and min(frows) < self.tabindex)
self.cdownb.SetNotify(len(frows) and (max(frows) >= self.tabendex or
tabs[max(frows)].Position.y + tabs[max(frows)].Size.height > self.Size.height - 16))
else:
for i, row in enumerate(self.rows):
flaged = False
for tab in row:
if tab and tab.notified:
flaged = True
frows.add(i)
self.navi.upb.SetNotify(len(frows) and min(frows)<self.rowindex)
self.navi.downb.SetNotify(len(frows) and max(frows)>self.rowindex + self.tab_rows - 1) | en | 0.81064 | #from traceback import print_stack,format_stack Where the tabs live, handles all display and organization functionality # a list of all the tabs # a list of all the visible row, each a list of all the tabs in that row # the first visible row # the first tab of the first visible row # the last tab of the last visible row #height of a row in pixels #buttons for verticle alignment #the navigation box #when draging the tab that you are dragging #when dragging the mouse is over and at that point released on # the arrow image shown when dragging tabs # self.dropmarker = Storage(Show = lambda v=True: None) #was the tab droped on the left or right of the target tab #linking prefs #@UndefinedVariable # ('dropmarkeroverlay', 'dropmarker.overlay', 0), #'dropmarkeroverlay', Adds a tab to the bar. Should only be used by parent NoteBook.
page - page in PageContainer the tab is to be associated with
focus - whether that tab should steal focus from current tab Invoked when a tab context menu item is clicked. # switch to that tab first It's like Doo... err.. Regenerate, only vertical # print "Starting: Regenerate",self.Top.Title,'\n'#,'='*80,'\n','\n'.join(format_stack()) # print '='*80 #TODO: Should we be careful about the tab leaving the bar? # Safty precautions prevent list access errors # Preset variables # the first tab shown # Height in pixels of the tabbar # number of fully visible rows in the given area at the given height #one tab per row #Sets navimode and position # Totally reconstructs the list if it's told to or there are not tabs in the rows or # if there isn't one more tab than there is room for and there is enough room to fit # them all and number of tabs in the row equals the number of tabs #if all tabs fit #calculate and show range # populate with earlier stuff #just leave the new values the same as the old # Show all tabs in the bar Regenerates layout information.
safe is a flag to indicate if we should try to keep the currently active
tab in view at all times. (This doesn't occur when scrolling, for
instance.) # print "Starting: Regenerate",self.Top.Title,'\n','='*80,'\n','\n'.join(format_stack()) # print '='*80 # early exit for when the tabbar isn't visible. # style is the number of rows (or 0 for single) # Should we be careful about the tab leaving the bar? # Hide all tabs preparation for refilling # navi set up, see if arrows are needed and placement #total size of tabs # Tab alignment calculations # Saftey precautions prevent list access errors # Preset variables # the first tab shown # Decide what kind of navigation panel, if any, to use... # arrows left and right # arrows up and down next to the X #Where to put navigation panel. #More preparing vars #While more tabs are not in a row # Loop through each visible tab, fitting tabs on the right. #Be carefull that the active tab doesn't scroll off the bar #add tabs until the active tab is visible #Remove tab if more tabs than room # If extra space, fit tabs to the right of the row # if single row, # Injects tabs calculated to fit in that row into that row # If we're in single row, break now. # Row calculation #cycle through visible rows #print "Being Careful" #print active #print ir,r #print "moving index down" #print "moving index up" # If we're closing tabs above where is visible, keep the visible # index "where it is" # Place tabs! # if this is a row that needs to be scrunched... # The first tab is set to it's minimum width at x: 0 # Every other tab is placed right next to the tab # before it. # If there are more rows than the current row... # Get a list of tab indices, widest to smallest. # o_O ? # Average width of tab if all tabs are the same size, and # fill up all the area. # Loop from "current" tab to the end # If this tab is larger than average... # Make it it's minimum, and keep track of it # If we're not on the last tab, recalc average # For tabs less than the average, set them to average # For every tab in the row # If it's the first tab: # If the row is so small it can only fit one tab, # make due. # If total rows is less than total rows being shown, shrink the # tab area so that it's only just big enough. #self.MinSize.height # self.Size=self.MinSize # Determine if the Navi needs to enable or show arrows #self.Parent.Layout() # Relayout self # final tab being shown #if no more tabs close window #if index is between index and endex and bring one tab from the left # position and size buttons Returns the number of tabs in the bar Sets the tab after the curent active
-if it does not exist does the previbus
-or the first if wrap is true Sets the tab before the curent active
-if it does not exist does the next
-or the last if wrap is true Moves the index and endex so that the active tab is in the bar Event that handles mouse wheeling,
maps the events to SetNextActive and SetLastActive The button events for vertical alignment for up and down When a tab is dragged this is called to start the tabbar handling dragging
origin - tab being dragged # Lets the TabMan know a tab as been dragged This does the dragging calculations for the tabs # if here is no local origin tab but there is a remote source identified in TabMan #setting a local drag origin # announcing to TabMan it is expecting a tab # if there is a local origin use that # if dragtarget is out of date find what you're dragging to # if there is a tab as target # data to decide what side the tab would be dropped on # make the left/top or right/bottom decision #left/top # + (dtrect.height // 2) # + dtrect.width // 2 #right/bottom #+(dtrect.height//2) #+dtrect.width//2 # if being dropped in the whitespace of the TabBar # find what row the tab is being dropped in # or in vertical if at the beginning or end #Place marker #+(dtrect.height//2) #+dtrect.width//2 #cleanup #if not in tabbar anymore don't show arrow Ends dragging and does any rearranging if required # Destroy the preview tab #if released out of the window... #If no or invalid destination in manager create a new window and sets it destination # FIXME: SWIG doesn't like subtracting a wx.Size from a wx.Point, so do it the hard way # until the SIP migration is finished. #else set the destination to the manager's destination #clear tabman's destination # Grab a reference to the tab's page # Make the tab "forget" about the page # Remove the tab from the tabs list, and destroy the wxWindow # remove page from this notebook and insert it into the target notebook #page.Parent.RemoveChild(page) # cleanup # re-sort tabs # if released inside of the window # used both for moving within a window and as the last step of a # interwindow move in case of interwindow tab has already been moved to # this window at the end of the list and all that is left is to move it # to the correct position. #remove the tab from the list #decide which side of the target the tab should be dropped # Reinsert the tab in it's new position # call after so that windows are all in their correct places # # Resort #if there is a dragorigin run onMouseLeave on it to reset it's look #local cleanup #destination and manager cleanup Places the marker for where to drop the tab
x and y are center position, saves calculation that way # if self.side_tabs: # self.dropmarker.SetSize((self.Size.width - self.dropmarkeroverlay, -1)) # else: # self.dropmarker.SetSize((-1, self.rowheight - self.dropmarkeroverlay)) #make sure the content of the IM win resizes if tabbaar hides # Resort | 2.310913 | 2 |
examples/groups/__init__.py | Arianxx/freesia | 2 | 6612999 | <filename>examples/groups/__init__.py
from freesia import Freesia
def create_app():
app = Freesia()
from .api import api
app.register_group(api)
return app
if __name__ == "__main__":
app = create_app()
app.run()
| <filename>examples/groups/__init__.py
from freesia import Freesia
def create_app():
app = Freesia()
from .api import api
app.register_group(api)
return app
if __name__ == "__main__":
app = create_app()
app.run()
| none | 1 | 1.802863 | 2 | |
source/deepsecurity/models/aws_connector.py | felipecosta09/cloudone-workload-controltower-lifecycle | 1 | 6613000 | <reponame>felipecosta09/cloudone-workload-controltower-lifecycle<filename>source/deepsecurity/models/aws_connector.py
# coding: utf-8
"""
Trend Micro Deep Security API
Copyright 2018 - 2020 Trend Micro Incorporated.<br/>Get protected, stay secured, and keep informed with Trend Micro Deep Security's new RESTful API. Access system data and manage security configurations to automate your security workflows and integrate Deep Security into your CI/CD pipeline. # noqa: E501
OpenAPI spec version: 12.5.841
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from deepsecurity.models.aws_region import AWSRegion # noqa: F401,E501
class AWSConnector(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'display_name': 'str',
'account_id': 'str',
'account_alias': 'str',
'access_key': 'str',
'secret_key': 'str',
'seed_region': 'str',
'use_instance_role': 'bool',
'cross_account_role_arn': 'str',
'last_sync_time': 'int',
'synced_regions': 'list[AWSRegion]',
'workspaces_enabled': 'bool',
'id': 'int'
}
attribute_map = {
'display_name': 'displayName',
'account_id': 'accountId',
'account_alias': 'accountAlias',
'access_key': 'accessKey',
'secret_key': 'secretKey',
'seed_region': 'seedRegion',
'use_instance_role': 'useInstanceRole',
'cross_account_role_arn': 'crossAccountRoleArn',
'last_sync_time': 'lastSyncTime',
'synced_regions': 'syncedRegions',
'workspaces_enabled': 'workspacesEnabled',
'id': 'ID'
}
def __init__(self, display_name=None, account_id=None, account_alias=None, access_key=None, secret_key=None, seed_region=None, use_instance_role=None, cross_account_role_arn=None, last_sync_time=None, synced_regions=None, workspaces_enabled=None, id=None): # noqa: E501
"""AWSConnector - a model defined in Swagger""" # noqa: E501
self._display_name = None
self._account_id = None
self._account_alias = None
self._access_key = None
self._secret_key = None
self._seed_region = None
self._use_instance_role = None
self._cross_account_role_arn = None
self._last_sync_time = None
self._synced_regions = None
self._workspaces_enabled = None
self._id = None
self.discriminator = None
if display_name is not None:
self.display_name = display_name
if account_id is not None:
self.account_id = account_id
if account_alias is not None:
self.account_alias = account_alias
if access_key is not None:
self.access_key = access_key
if secret_key is not None:
self.secret_key = secret_key
if seed_region is not None:
self.seed_region = seed_region
if use_instance_role is not None:
self.use_instance_role = use_instance_role
if cross_account_role_arn is not None:
self.cross_account_role_arn = cross_account_role_arn
if last_sync_time is not None:
self.last_sync_time = last_sync_time
if synced_regions is not None:
self.synced_regions = synced_regions
if workspaces_enabled is not None:
self.workspaces_enabled = workspaces_enabled
if id is not None:
self.id = id
@property
def display_name(self):
"""Gets the display_name of this AWSConnector. # noqa: E501
The AWS Connector's display name in DSM. # noqa: E501
:return: The display_name of this AWSConnector. # noqa: E501
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""Sets the display_name of this AWSConnector.
The AWS Connector's display name in DSM. # noqa: E501
:param display_name: The display_name of this AWSConnector. # noqa: E501
:type: str
"""
self._display_name = display_name
@property
def account_id(self):
"""Gets the account_id of this AWSConnector. # noqa: E501
The AWS Account ID. Searchable as String. # noqa: E501
:return: The account_id of this AWSConnector. # noqa: E501
:rtype: str
"""
return self._account_id
@account_id.setter
def account_id(self, account_id):
"""Sets the account_id of this AWSConnector.
The AWS Account ID. Searchable as String. # noqa: E501
:param account_id: The account_id of this AWSConnector. # noqa: E501
:type: str
"""
self._account_id = account_id
@property
def account_alias(self):
"""Gets the account_alias of this AWSConnector. # noqa: E501
The AWS Account Alias. Searchable as String. # noqa: E501
:return: The account_alias of this AWSConnector. # noqa: E501
:rtype: str
"""
return self._account_alias
@account_alias.setter
def account_alias(self, account_alias):
"""Sets the account_alias of this AWSConnector.
The AWS Account Alias. Searchable as String. # noqa: E501
:param account_alias: The account_alias of this AWSConnector. # noqa: E501
:type: str
"""
self._account_alias = account_alias
@property
def access_key(self):
"""Gets the access_key of this AWSConnector. # noqa: E501
The AWS Access Key of the account. If used, Cross Account Role ARN is not required. Searchable as String. # noqa: E501
:return: The access_key of this AWSConnector. # noqa: E501
:rtype: str
"""
return self._access_key
@access_key.setter
def access_key(self, access_key):
"""Sets the access_key of this AWSConnector.
The AWS Access Key of the account. If used, Cross Account Role ARN is not required. Searchable as String. # noqa: E501
:param access_key: The access_key of this AWSConnector. # noqa: E501
:type: str
"""
self._access_key = access_key
@property
def secret_key(self):
"""Gets the secret_key of this AWSConnector. # noqa: E501
The AWS Secret Key required to add the connector using an Access Key. Not present in returned objects. # noqa: E501
:return: The secret_key of this AWSConnector. # noqa: E501
:rtype: str
"""
return self._secret_key
@secret_key.setter
def secret_key(self, secret_key):
"""Sets the secret_key of this AWSConnector.
The AWS Secret Key required to add the connector using an Access Key. Not present in returned objects. # noqa: E501
:param secret_key: The secret_key of this AWSConnector. # noqa: E501
:type: str
"""
self._secret_key = secret_key
@property
def seed_region(self):
"""Gets the seed_region of this AWSConnector. # noqa: E501
The region to initialize the EC2 client in. This is an advanced option used if you want to access special regions. Searchable as String. # noqa: E501
:return: The seed_region of this AWSConnector. # noqa: E501
:rtype: str
"""
return self._seed_region
@seed_region.setter
def seed_region(self, seed_region):
"""Sets the seed_region of this AWSConnector.
The region to initialize the EC2 client in. This is an advanced option used if you want to access special regions. Searchable as String. # noqa: E501
:param seed_region: The seed_region of this AWSConnector. # noqa: E501
:type: str
"""
self._seed_region = seed_region
@property
def use_instance_role(self):
"""Gets the use_instance_role of this AWSConnector. # noqa: E501
Specifies whether or not to use the DSM instance role to add the AWS Connector instead of an Access Key or a Cross Account Role ARN. # noqa: E501
:return: The use_instance_role of this AWSConnector. # noqa: E501
:rtype: bool
"""
return self._use_instance_role
@use_instance_role.setter
def use_instance_role(self, use_instance_role):
"""Sets the use_instance_role of this AWSConnector.
Specifies whether or not to use the DSM instance role to add the AWS Connector instead of an Access Key or a Cross Account Role ARN. # noqa: E501
:param use_instance_role: The use_instance_role of this AWSConnector. # noqa: E501
:type: bool
"""
self._use_instance_role = use_instance_role
@property
def cross_account_role_arn(self):
"""Gets the cross_account_role_arn of this AWSConnector. # noqa: E501
The Cross Account Role ARN of the AWS account. If used, Access Key is not required. Searchable as String. # noqa: E501
:return: The cross_account_role_arn of this AWSConnector. # noqa: E501
:rtype: str
"""
return self._cross_account_role_arn
@cross_account_role_arn.setter
def cross_account_role_arn(self, cross_account_role_arn):
"""Sets the cross_account_role_arn of this AWSConnector.
The Cross Account Role ARN of the AWS account. If used, Access Key is not required. Searchable as String. # noqa: E501
:param cross_account_role_arn: The cross_account_role_arn of this AWSConnector. # noqa: E501
:type: str
"""
self._cross_account_role_arn = cross_account_role_arn
@property
def last_sync_time(self):
"""Gets the last_sync_time of this AWSConnector. # noqa: E501
Timestamp of the last time the AWS Connector was successfully synchronized, in milliseconds since epoch. Searchable as Date. # noqa: E501
:return: The last_sync_time of this AWSConnector. # noqa: E501
:rtype: int
"""
return self._last_sync_time
@last_sync_time.setter
def last_sync_time(self, last_sync_time):
"""Sets the last_sync_time of this AWSConnector.
Timestamp of the last time the AWS Connector was successfully synchronized, in milliseconds since epoch. Searchable as Date. # noqa: E501
:param last_sync_time: The last_sync_time of this AWSConnector. # noqa: E501
:type: int
"""
self._last_sync_time = last_sync_time
@property
def synced_regions(self):
"""Gets the synced_regions of this AWSConnector. # noqa: E501
The list of AWS regions that have been synchronized for the connector. # noqa: E501
:return: The synced_regions of this AWSConnector. # noqa: E501
:rtype: list[AWSRegion]
"""
return self._synced_regions
@synced_regions.setter
def synced_regions(self, synced_regions):
"""Sets the synced_regions of this AWSConnector.
The list of AWS regions that have been synchronized for the connector. # noqa: E501
:param synced_regions: The synced_regions of this AWSConnector. # noqa: E501
:type: list[AWSRegion]
"""
self._synced_regions = synced_regions
@property
def workspaces_enabled(self):
"""Gets the workspaces_enabled of this AWSConnector. # noqa: E501
A flag controlling whether or not Amazon Workspaces are enabled for the connector. Searchable as Boolean. Default is false. # noqa: E501
:return: The workspaces_enabled of this AWSConnector. # noqa: E501
:rtype: bool
"""
return self._workspaces_enabled
@workspaces_enabled.setter
def workspaces_enabled(self, workspaces_enabled):
"""Sets the workspaces_enabled of this AWSConnector.
A flag controlling whether or not Amazon Workspaces are enabled for the connector. Searchable as Boolean. Default is false. # noqa: E501
:param workspaces_enabled: The workspaces_enabled of this AWSConnector. # noqa: E501
:type: bool
"""
self._workspaces_enabled = workspaces_enabled
@property
def id(self):
"""Gets the id of this AWSConnector. # noqa: E501
The Deep Security internal ID of the AWS Cloud Connector. Searchable as ID. # noqa: E501
:return: The id of this AWSConnector. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this AWSConnector.
The Deep Security internal ID of the AWS Cloud Connector. Searchable as ID. # noqa: E501
:param id: The id of this AWSConnector. # noqa: E501
:type: int
"""
self._id = id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(AWSConnector, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AWSConnector):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| # coding: utf-8
"""
Trend Micro Deep Security API
Copyright 2018 - 2020 Trend Micro Incorporated.<br/>Get protected, stay secured, and keep informed with Trend Micro Deep Security's new RESTful API. Access system data and manage security configurations to automate your security workflows and integrate Deep Security into your CI/CD pipeline. # noqa: E501
OpenAPI spec version: 12.5.841
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from deepsecurity.models.aws_region import AWSRegion # noqa: F401,E501
class AWSConnector(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'display_name': 'str',
'account_id': 'str',
'account_alias': 'str',
'access_key': 'str',
'secret_key': 'str',
'seed_region': 'str',
'use_instance_role': 'bool',
'cross_account_role_arn': 'str',
'last_sync_time': 'int',
'synced_regions': 'list[AWSRegion]',
'workspaces_enabled': 'bool',
'id': 'int'
}
attribute_map = {
'display_name': 'displayName',
'account_id': 'accountId',
'account_alias': 'accountAlias',
'access_key': 'accessKey',
'secret_key': 'secretKey',
'seed_region': 'seedRegion',
'use_instance_role': 'useInstanceRole',
'cross_account_role_arn': 'crossAccountRoleArn',
'last_sync_time': 'lastSyncTime',
'synced_regions': 'syncedRegions',
'workspaces_enabled': 'workspacesEnabled',
'id': 'ID'
}
def __init__(self, display_name=None, account_id=None, account_alias=None, access_key=None, secret_key=None, seed_region=None, use_instance_role=None, cross_account_role_arn=None, last_sync_time=None, synced_regions=None, workspaces_enabled=None, id=None): # noqa: E501
"""AWSConnector - a model defined in Swagger""" # noqa: E501
self._display_name = None
self._account_id = None
self._account_alias = None
self._access_key = None
self._secret_key = None
self._seed_region = None
self._use_instance_role = None
self._cross_account_role_arn = None
self._last_sync_time = None
self._synced_regions = None
self._workspaces_enabled = None
self._id = None
self.discriminator = None
if display_name is not None:
self.display_name = display_name
if account_id is not None:
self.account_id = account_id
if account_alias is not None:
self.account_alias = account_alias
if access_key is not None:
self.access_key = access_key
if secret_key is not None:
self.secret_key = secret_key
if seed_region is not None:
self.seed_region = seed_region
if use_instance_role is not None:
self.use_instance_role = use_instance_role
if cross_account_role_arn is not None:
self.cross_account_role_arn = cross_account_role_arn
if last_sync_time is not None:
self.last_sync_time = last_sync_time
if synced_regions is not None:
self.synced_regions = synced_regions
if workspaces_enabled is not None:
self.workspaces_enabled = workspaces_enabled
if id is not None:
self.id = id
@property
def display_name(self):
"""Gets the display_name of this AWSConnector. # noqa: E501
The AWS Connector's display name in DSM. # noqa: E501
:return: The display_name of this AWSConnector. # noqa: E501
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""Sets the display_name of this AWSConnector.
The AWS Connector's display name in DSM. # noqa: E501
:param display_name: The display_name of this AWSConnector. # noqa: E501
:type: str
"""
self._display_name = display_name
@property
def account_id(self):
"""Gets the account_id of this AWSConnector. # noqa: E501
The AWS Account ID. Searchable as String. # noqa: E501
:return: The account_id of this AWSConnector. # noqa: E501
:rtype: str
"""
return self._account_id
@account_id.setter
def account_id(self, account_id):
"""Sets the account_id of this AWSConnector.
The AWS Account ID. Searchable as String. # noqa: E501
:param account_id: The account_id of this AWSConnector. # noqa: E501
:type: str
"""
self._account_id = account_id
@property
def account_alias(self):
"""Gets the account_alias of this AWSConnector. # noqa: E501
The AWS Account Alias. Searchable as String. # noqa: E501
:return: The account_alias of this AWSConnector. # noqa: E501
:rtype: str
"""
return self._account_alias
@account_alias.setter
def account_alias(self, account_alias):
"""Sets the account_alias of this AWSConnector.
The AWS Account Alias. Searchable as String. # noqa: E501
:param account_alias: The account_alias of this AWSConnector. # noqa: E501
:type: str
"""
self._account_alias = account_alias
@property
def access_key(self):
"""Gets the access_key of this AWSConnector. # noqa: E501
The AWS Access Key of the account. If used, Cross Account Role ARN is not required. Searchable as String. # noqa: E501
:return: The access_key of this AWSConnector. # noqa: E501
:rtype: str
"""
return self._access_key
@access_key.setter
def access_key(self, access_key):
"""Sets the access_key of this AWSConnector.
The AWS Access Key of the account. If used, Cross Account Role ARN is not required. Searchable as String. # noqa: E501
:param access_key: The access_key of this AWSConnector. # noqa: E501
:type: str
"""
self._access_key = access_key
@property
def secret_key(self):
"""Gets the secret_key of this AWSConnector. # noqa: E501
The AWS Secret Key required to add the connector using an Access Key. Not present in returned objects. # noqa: E501
:return: The secret_key of this AWSConnector. # noqa: E501
:rtype: str
"""
return self._secret_key
@secret_key.setter
def secret_key(self, secret_key):
"""Sets the secret_key of this AWSConnector.
The AWS Secret Key required to add the connector using an Access Key. Not present in returned objects. # noqa: E501
:param secret_key: The secret_key of this AWSConnector. # noqa: E501
:type: str
"""
self._secret_key = secret_key
@property
def seed_region(self):
"""Gets the seed_region of this AWSConnector. # noqa: E501
The region to initialize the EC2 client in. This is an advanced option used if you want to access special regions. Searchable as String. # noqa: E501
:return: The seed_region of this AWSConnector. # noqa: E501
:rtype: str
"""
return self._seed_region
@seed_region.setter
def seed_region(self, seed_region):
"""Sets the seed_region of this AWSConnector.
The region to initialize the EC2 client in. This is an advanced option used if you want to access special regions. Searchable as String. # noqa: E501
:param seed_region: The seed_region of this AWSConnector. # noqa: E501
:type: str
"""
self._seed_region = seed_region
@property
def use_instance_role(self):
"""Gets the use_instance_role of this AWSConnector. # noqa: E501
Specifies whether or not to use the DSM instance role to add the AWS Connector instead of an Access Key or a Cross Account Role ARN. # noqa: E501
:return: The use_instance_role of this AWSConnector. # noqa: E501
:rtype: bool
"""
return self._use_instance_role
@use_instance_role.setter
def use_instance_role(self, use_instance_role):
"""Sets the use_instance_role of this AWSConnector.
Specifies whether or not to use the DSM instance role to add the AWS Connector instead of an Access Key or a Cross Account Role ARN. # noqa: E501
:param use_instance_role: The use_instance_role of this AWSConnector. # noqa: E501
:type: bool
"""
self._use_instance_role = use_instance_role
@property
def cross_account_role_arn(self):
"""Gets the cross_account_role_arn of this AWSConnector. # noqa: E501
The Cross Account Role ARN of the AWS account. If used, Access Key is not required. Searchable as String. # noqa: E501
:return: The cross_account_role_arn of this AWSConnector. # noqa: E501
:rtype: str
"""
return self._cross_account_role_arn
@cross_account_role_arn.setter
def cross_account_role_arn(self, cross_account_role_arn):
"""Sets the cross_account_role_arn of this AWSConnector.
The Cross Account Role ARN of the AWS account. If used, Access Key is not required. Searchable as String. # noqa: E501
:param cross_account_role_arn: The cross_account_role_arn of this AWSConnector. # noqa: E501
:type: str
"""
self._cross_account_role_arn = cross_account_role_arn
@property
def last_sync_time(self):
"""Gets the last_sync_time of this AWSConnector. # noqa: E501
Timestamp of the last time the AWS Connector was successfully synchronized, in milliseconds since epoch. Searchable as Date. # noqa: E501
:return: The last_sync_time of this AWSConnector. # noqa: E501
:rtype: int
"""
return self._last_sync_time
@last_sync_time.setter
def last_sync_time(self, last_sync_time):
"""Sets the last_sync_time of this AWSConnector.
Timestamp of the last time the AWS Connector was successfully synchronized, in milliseconds since epoch. Searchable as Date. # noqa: E501
:param last_sync_time: The last_sync_time of this AWSConnector. # noqa: E501
:type: int
"""
self._last_sync_time = last_sync_time
@property
def synced_regions(self):
"""Gets the synced_regions of this AWSConnector. # noqa: E501
The list of AWS regions that have been synchronized for the connector. # noqa: E501
:return: The synced_regions of this AWSConnector. # noqa: E501
:rtype: list[AWSRegion]
"""
return self._synced_regions
@synced_regions.setter
def synced_regions(self, synced_regions):
"""Sets the synced_regions of this AWSConnector.
The list of AWS regions that have been synchronized for the connector. # noqa: E501
:param synced_regions: The synced_regions of this AWSConnector. # noqa: E501
:type: list[AWSRegion]
"""
self._synced_regions = synced_regions
@property
def workspaces_enabled(self):
"""Gets the workspaces_enabled of this AWSConnector. # noqa: E501
A flag controlling whether or not Amazon Workspaces are enabled for the connector. Searchable as Boolean. Default is false. # noqa: E501
:return: The workspaces_enabled of this AWSConnector. # noqa: E501
:rtype: bool
"""
return self._workspaces_enabled
@workspaces_enabled.setter
def workspaces_enabled(self, workspaces_enabled):
"""Sets the workspaces_enabled of this AWSConnector.
A flag controlling whether or not Amazon Workspaces are enabled for the connector. Searchable as Boolean. Default is false. # noqa: E501
:param workspaces_enabled: The workspaces_enabled of this AWSConnector. # noqa: E501
:type: bool
"""
self._workspaces_enabled = workspaces_enabled
@property
def id(self):
"""Gets the id of this AWSConnector. # noqa: E501
The Deep Security internal ID of the AWS Cloud Connector. Searchable as ID. # noqa: E501
:return: The id of this AWSConnector. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this AWSConnector.
The Deep Security internal ID of the AWS Cloud Connector. Searchable as ID. # noqa: E501
:param id: The id of this AWSConnector. # noqa: E501
:type: int
"""
self._id = id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(AWSConnector, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AWSConnector):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other | en | 0.733168 | # coding: utf-8 Trend Micro Deep Security API
Copyright 2018 - 2020 Trend Micro Incorporated.<br/>Get protected, stay secured, and keep informed with Trend Micro Deep Security's new RESTful API. Access system data and manage security configurations to automate your security workflows and integrate Deep Security into your CI/CD pipeline. # noqa: E501
OpenAPI spec version: 12.5.841
Generated by: https://github.com/swagger-api/swagger-codegen.git # noqa: F401 # noqa: F401,E501 NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually. Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition. # noqa: E501 AWSConnector - a model defined in Swagger # noqa: E501 Gets the display_name of this AWSConnector. # noqa: E501
The AWS Connector's display name in DSM. # noqa: E501
:return: The display_name of this AWSConnector. # noqa: E501
:rtype: str Sets the display_name of this AWSConnector.
The AWS Connector's display name in DSM. # noqa: E501
:param display_name: The display_name of this AWSConnector. # noqa: E501
:type: str Gets the account_id of this AWSConnector. # noqa: E501
The AWS Account ID. Searchable as String. # noqa: E501
:return: The account_id of this AWSConnector. # noqa: E501
:rtype: str Sets the account_id of this AWSConnector.
The AWS Account ID. Searchable as String. # noqa: E501
:param account_id: The account_id of this AWSConnector. # noqa: E501
:type: str Gets the account_alias of this AWSConnector. # noqa: E501
The AWS Account Alias. Searchable as String. # noqa: E501
:return: The account_alias of this AWSConnector. # noqa: E501
:rtype: str Sets the account_alias of this AWSConnector.
The AWS Account Alias. Searchable as String. # noqa: E501
:param account_alias: The account_alias of this AWSConnector. # noqa: E501
:type: str Gets the access_key of this AWSConnector. # noqa: E501
The AWS Access Key of the account. If used, Cross Account Role ARN is not required. Searchable as String. # noqa: E501
:return: The access_key of this AWSConnector. # noqa: E501
:rtype: str Sets the access_key of this AWSConnector.
The AWS Access Key of the account. If used, Cross Account Role ARN is not required. Searchable as String. # noqa: E501
:param access_key: The access_key of this AWSConnector. # noqa: E501
:type: str Gets the secret_key of this AWSConnector. # noqa: E501
The AWS Secret Key required to add the connector using an Access Key. Not present in returned objects. # noqa: E501
:return: The secret_key of this AWSConnector. # noqa: E501
:rtype: str Sets the secret_key of this AWSConnector.
The AWS Secret Key required to add the connector using an Access Key. Not present in returned objects. # noqa: E501
:param secret_key: The secret_key of this AWSConnector. # noqa: E501
:type: str Gets the seed_region of this AWSConnector. # noqa: E501
The region to initialize the EC2 client in. This is an advanced option used if you want to access special regions. Searchable as String. # noqa: E501
:return: The seed_region of this AWSConnector. # noqa: E501
:rtype: str Sets the seed_region of this AWSConnector.
The region to initialize the EC2 client in. This is an advanced option used if you want to access special regions. Searchable as String. # noqa: E501
:param seed_region: The seed_region of this AWSConnector. # noqa: E501
:type: str Gets the use_instance_role of this AWSConnector. # noqa: E501
Specifies whether or not to use the DSM instance role to add the AWS Connector instead of an Access Key or a Cross Account Role ARN. # noqa: E501
:return: The use_instance_role of this AWSConnector. # noqa: E501
:rtype: bool Sets the use_instance_role of this AWSConnector.
Specifies whether or not to use the DSM instance role to add the AWS Connector instead of an Access Key or a Cross Account Role ARN. # noqa: E501
:param use_instance_role: The use_instance_role of this AWSConnector. # noqa: E501
:type: bool Gets the cross_account_role_arn of this AWSConnector. # noqa: E501
The Cross Account Role ARN of the AWS account. If used, Access Key is not required. Searchable as String. # noqa: E501
:return: The cross_account_role_arn of this AWSConnector. # noqa: E501
:rtype: str Sets the cross_account_role_arn of this AWSConnector.
The Cross Account Role ARN of the AWS account. If used, Access Key is not required. Searchable as String. # noqa: E501
:param cross_account_role_arn: The cross_account_role_arn of this AWSConnector. # noqa: E501
:type: str Gets the last_sync_time of this AWSConnector. # noqa: E501
Timestamp of the last time the AWS Connector was successfully synchronized, in milliseconds since epoch. Searchable as Date. # noqa: E501
:return: The last_sync_time of this AWSConnector. # noqa: E501
:rtype: int Sets the last_sync_time of this AWSConnector.
Timestamp of the last time the AWS Connector was successfully synchronized, in milliseconds since epoch. Searchable as Date. # noqa: E501
:param last_sync_time: The last_sync_time of this AWSConnector. # noqa: E501
:type: int Gets the synced_regions of this AWSConnector. # noqa: E501
The list of AWS regions that have been synchronized for the connector. # noqa: E501
:return: The synced_regions of this AWSConnector. # noqa: E501
:rtype: list[AWSRegion] Sets the synced_regions of this AWSConnector.
The list of AWS regions that have been synchronized for the connector. # noqa: E501
:param synced_regions: The synced_regions of this AWSConnector. # noqa: E501
:type: list[AWSRegion] Gets the workspaces_enabled of this AWSConnector. # noqa: E501
A flag controlling whether or not Amazon Workspaces are enabled for the connector. Searchable as Boolean. Default is false. # noqa: E501
:return: The workspaces_enabled of this AWSConnector. # noqa: E501
:rtype: bool Sets the workspaces_enabled of this AWSConnector.
A flag controlling whether or not Amazon Workspaces are enabled for the connector. Searchable as Boolean. Default is false. # noqa: E501
:param workspaces_enabled: The workspaces_enabled of this AWSConnector. # noqa: E501
:type: bool Gets the id of this AWSConnector. # noqa: E501
The Deep Security internal ID of the AWS Cloud Connector. Searchable as ID. # noqa: E501
:return: The id of this AWSConnector. # noqa: E501
:rtype: int Sets the id of this AWSConnector.
The Deep Security internal ID of the AWS Cloud Connector. Searchable as ID. # noqa: E501
:param id: The id of this AWSConnector. # noqa: E501
:type: int Returns the model properties as a dict Returns the string representation of the model For `print` and `pprint` Returns true if both objects are equal Returns true if both objects are not equal | 1.383452 | 1 |
d15_individual_digits.py | DK2K00/100DaysOfCode | 0 | 6613001 | #Function to add individual digits of a given number using recursion
def individual_sum(n):
if(n == 0):
return 0
if(n == 1):
return 1
sum = 0
#Splitting individual digits
sum += int(n % 10)
return(sum + individual_sum(n/10))
individual_sum(54321)
| #Function to add individual digits of a given number using recursion
def individual_sum(n):
if(n == 0):
return 0
if(n == 1):
return 1
sum = 0
#Splitting individual digits
sum += int(n % 10)
return(sum + individual_sum(n/10))
individual_sum(54321)
| en | 0.404612 | #Function to add individual digits of a given number using recursion #Splitting individual digits | 3.801225 | 4 |
hiyobot/cogs/events/error.py | ombe1229/Hiyobot | 12 | 6613002 | <gh_stars>10-100
from discord.ext import commands
from hiyobot.bot import Hiyobot
class Error(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_command_error(self, ctx: commands.Context, error):
if isinstance(error, commands.CommandNotFound):
await ctx.send(
"명령어를 찾을 수 없습니다. `&도움말` 명령어를 사용해 전체 명령어 목록을 볼 수 있습니다.", delete_after=5
)
elif isinstance(error, commands.NSFWChannelRequired):
await ctx.send(
"연령 제한(NSFW)이 설정된 채널에서만 사용하실 수 있습니다. 이 명령어를 사용하려면 채널 관리자가 `채널 설정 -> 연령 제한 채널`을 활성화해야 합니다.",
delete_after=5,
)
elif isinstance(error, commands.MissingRequiredArgument):
await ctx.send(
"명령어 사용법이 잘못되었습니다. 값이 부족합니다. `&도움말` 명령어를 통해 정확한 사용법을 보실 수 있습니다.",
delete_after=5,
)
elif isinstance(error, commands.BadArgument):
await ctx.send(
"명령어 사용법이 잘못되었습니다. 지정한 값이 잘못되었습니다. `&도움말` 명령어를 통해 정확한 사용법을 보실 수 있습니다.",
delete_after=5,
)
elif isinstance(error, commands.NotOwner):
await ctx.send("해당 명령어는 봇 관리자만 사용 가능합니다.", delete_after=5)
elif isinstance(error, commands.TooManyArguments):
await ctx.send(
"명령어의 인자값이 너무 많습니다. '&도움말' 명령어를 통해 정확한 사용법을 확인해주세요.", delete_after=5
)
else:
await ctx.send(
"알수없는 오류가 발생했습니다. 자동으로 개발자에게 오류로그를 전송합니다.\n``&문의``를 이용해 버그신고를 해주시면 더 빠른 도움이됩니다.",
delete_after=10,
)
raise error
def setup(bot: Hiyobot):
bot.add_cog(Error(bot))
| from discord.ext import commands
from hiyobot.bot import Hiyobot
class Error(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_command_error(self, ctx: commands.Context, error):
if isinstance(error, commands.CommandNotFound):
await ctx.send(
"명령어를 찾을 수 없습니다. `&도움말` 명령어를 사용해 전체 명령어 목록을 볼 수 있습니다.", delete_after=5
)
elif isinstance(error, commands.NSFWChannelRequired):
await ctx.send(
"연령 제한(NSFW)이 설정된 채널에서만 사용하실 수 있습니다. 이 명령어를 사용하려면 채널 관리자가 `채널 설정 -> 연령 제한 채널`을 활성화해야 합니다.",
delete_after=5,
)
elif isinstance(error, commands.MissingRequiredArgument):
await ctx.send(
"명령어 사용법이 잘못되었습니다. 값이 부족합니다. `&도움말` 명령어를 통해 정확한 사용법을 보실 수 있습니다.",
delete_after=5,
)
elif isinstance(error, commands.BadArgument):
await ctx.send(
"명령어 사용법이 잘못되었습니다. 지정한 값이 잘못되었습니다. `&도움말` 명령어를 통해 정확한 사용법을 보실 수 있습니다.",
delete_after=5,
)
elif isinstance(error, commands.NotOwner):
await ctx.send("해당 명령어는 봇 관리자만 사용 가능합니다.", delete_after=5)
elif isinstance(error, commands.TooManyArguments):
await ctx.send(
"명령어의 인자값이 너무 많습니다. '&도움말' 명령어를 통해 정확한 사용법을 확인해주세요.", delete_after=5
)
else:
await ctx.send(
"알수없는 오류가 발생했습니다. 자동으로 개발자에게 오류로그를 전송합니다.\n``&문의``를 이용해 버그신고를 해주시면 더 빠른 도움이됩니다.",
delete_after=10,
)
raise error
def setup(bot: Hiyobot):
bot.add_cog(Error(bot)) | none | 1 | 2.445265 | 2 | |
src/pigs/bot_test.py | rcobanov/Examination2 | 0 | 6613003 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Unit testing."""
import unittest
import bot
import dice
class TestBotClass(unittest.TestCase):
"""Test the class."""
def test_object_initiating(self):
"""Instantiate an object and check its properties."""
test_bot = bot.Bot(0, 0, 1)
self.assertIsInstance(test_bot, bot.Bot)
def test_add_current_to_total_score(self):
"""Summarize total with the current score on this round."""
test_bot = bot.Bot(12, 10, 2)
test_bot.add_curr_to_total()
self.assertEqual(test_bot.total_score, 22)
def test_reset_current_score_to_zero(self):
"""Reset current score to zero."""
test_bot = bot.Bot(12, 0, 2)
test_bot.reset_current_score()
self.assertEqual(test_bot.curr_round_score, 0)
def test_number_of_rounds(self):
"""Test get_number_of_rounds method."""
test_bot = bot.Bot(0, 0, 2)
number_of_rounds = test_bot.get_number_of_rounds(2)
exp = 9
self.assertEqual(number_of_rounds, exp)
def test_reset_bot_values(self):
"""Test reset scores on bot."""
test_bot = bot.Bot(2, 2, 2)
test_bot.reset_scores()
self.assertTrue(test_bot.curr_round_score == 0)
self.assertTrue(test_bot.total_score == 0)
def test_easy_level_on_bot_round(self):
"""Testing the the easiest level on the bot."""
test_bot = bot.Bot(0, 0, 1)
test_dice = dice.Dice()
test_bot.bot_round(test_dice)
exp = 0
self.assertEqual(test_bot.curr_round_score, exp)
self.assertTrue(test_bot.total_score <= 70)
self.assertEqual(test_bot.level, 1)
def test_hard_level_on_bot_round(self):
"""Testing the the hardest level on the bot."""
test_bot = bot.Bot(0, 0, 3)
test_dice = dice.Dice()
test_bot.bot_round(test_dice)
self.assertTrue(test_bot.curr_round_score == 0)
self.assertTrue(test_bot.total_score < 30)
self.assertEqual(test_bot.level, 3)
if __name__ == '__main__':
unittest.main()
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Unit testing."""
import unittest
import bot
import dice
class TestBotClass(unittest.TestCase):
"""Test the class."""
def test_object_initiating(self):
"""Instantiate an object and check its properties."""
test_bot = bot.Bot(0, 0, 1)
self.assertIsInstance(test_bot, bot.Bot)
def test_add_current_to_total_score(self):
"""Summarize total with the current score on this round."""
test_bot = bot.Bot(12, 10, 2)
test_bot.add_curr_to_total()
self.assertEqual(test_bot.total_score, 22)
def test_reset_current_score_to_zero(self):
"""Reset current score to zero."""
test_bot = bot.Bot(12, 0, 2)
test_bot.reset_current_score()
self.assertEqual(test_bot.curr_round_score, 0)
def test_number_of_rounds(self):
"""Test get_number_of_rounds method."""
test_bot = bot.Bot(0, 0, 2)
number_of_rounds = test_bot.get_number_of_rounds(2)
exp = 9
self.assertEqual(number_of_rounds, exp)
def test_reset_bot_values(self):
"""Test reset scores on bot."""
test_bot = bot.Bot(2, 2, 2)
test_bot.reset_scores()
self.assertTrue(test_bot.curr_round_score == 0)
self.assertTrue(test_bot.total_score == 0)
def test_easy_level_on_bot_round(self):
"""Testing the the easiest level on the bot."""
test_bot = bot.Bot(0, 0, 1)
test_dice = dice.Dice()
test_bot.bot_round(test_dice)
exp = 0
self.assertEqual(test_bot.curr_round_score, exp)
self.assertTrue(test_bot.total_score <= 70)
self.assertEqual(test_bot.level, 1)
def test_hard_level_on_bot_round(self):
"""Testing the the hardest level on the bot."""
test_bot = bot.Bot(0, 0, 3)
test_dice = dice.Dice()
test_bot.bot_round(test_dice)
self.assertTrue(test_bot.curr_round_score == 0)
self.assertTrue(test_bot.total_score < 30)
self.assertEqual(test_bot.level, 3)
if __name__ == '__main__':
unittest.main()
| en | 0.746811 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- Unit testing. Test the class. Instantiate an object and check its properties. Summarize total with the current score on this round. Reset current score to zero. Test get_number_of_rounds method. Test reset scores on bot. Testing the the easiest level on the bot. Testing the the hardest level on the bot. | 3.932082 | 4 |
lib/aquilon/worker/commands/__init__.py | ned21/aquilon | 7 | 6613004 | <filename>lib/aquilon/worker/commands/__init__.py
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2013,2014 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Initialize each broker command and make it easy to load by installing
an instance of the command as broker_command in the module.
This ends up importing everything... any script in this directory
will end up being triggered any time any module in this directory is
loaded - side effects and all.
Once loaded, this iterates through each module, finds the subclass of
BrokerCommand and installs it as broker_command. The module name is
then added to __all__.
"""
import os
import logging
from traceback import format_exc
from inspect import isclass
from twisted.python import log
__all__ = []
_thisdir = os.path.dirname(os.path.realpath(__file__))
for f in os.listdir(_thisdir):
full = os.path.join(_thisdir, f)
if os.path.isfile(full) and f.endswith('.py') and f != '__init__.py':
moduleshort = f[:-3]
modulename = __name__ + '.' + moduleshort
try:
mymodule = __import__(modulename, fromlist=["BrokerCommand"])
except Exception as e: # pragma: no cover
log.msg("Error importing %s: %s" % (modulename, format_exc()))
continue
if not hasattr(mymodule, "BrokerCommand"): # pragma: no cover
continue
# This is just convenient... don't have to import the 'real'
# BrokerCommand, since any file we care about will have already
# had to import it.
BrokerCommand = mymodule.BrokerCommand
for item in [getattr(mymodule, i) for i in dir(mymodule)]:
if not isclass(item):
continue
if item.__module__ != mymodule.__name__:
# Prevents us from accidently picking up base classes and
# other imports.
continue
if issubclass(item, BrokerCommand):
mymodule.broker_command = item()
mymodule.broker_command.module_logger = \
logging.getLogger(modulename)
__all__.append(moduleshort)
break
| <filename>lib/aquilon/worker/commands/__init__.py
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2013,2014 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Initialize each broker command and make it easy to load by installing
an instance of the command as broker_command in the module.
This ends up importing everything... any script in this directory
will end up being triggered any time any module in this directory is
loaded - side effects and all.
Once loaded, this iterates through each module, finds the subclass of
BrokerCommand and installs it as broker_command. The module name is
then added to __all__.
"""
import os
import logging
from traceback import format_exc
from inspect import isclass
from twisted.python import log
__all__ = []
_thisdir = os.path.dirname(os.path.realpath(__file__))
for f in os.listdir(_thisdir):
full = os.path.join(_thisdir, f)
if os.path.isfile(full) and f.endswith('.py') and f != '__init__.py':
moduleshort = f[:-3]
modulename = __name__ + '.' + moduleshort
try:
mymodule = __import__(modulename, fromlist=["BrokerCommand"])
except Exception as e: # pragma: no cover
log.msg("Error importing %s: %s" % (modulename, format_exc()))
continue
if not hasattr(mymodule, "BrokerCommand"): # pragma: no cover
continue
# This is just convenient... don't have to import the 'real'
# BrokerCommand, since any file we care about will have already
# had to import it.
BrokerCommand = mymodule.BrokerCommand
for item in [getattr(mymodule, i) for i in dir(mymodule)]:
if not isclass(item):
continue
if item.__module__ != mymodule.__name__:
# Prevents us from accidently picking up base classes and
# other imports.
continue
if issubclass(item, BrokerCommand):
mymodule.broker_command = item()
mymodule.broker_command.module_logger = \
logging.getLogger(modulename)
__all__.append(moduleshort)
break
| en | 0.87273 | # -*- cpy-indent-level: 4; indent-tabs-mode: nil -*- # ex: set expandtab softtabstop=4 shiftwidth=4: # # Copyright (C) 2008,2009,2010,2011,2013,2014 Contributor # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Initialize each broker command and make it easy to load by installing an instance of the command as broker_command in the module. This ends up importing everything... any script in this directory will end up being triggered any time any module in this directory is loaded - side effects and all. Once loaded, this iterates through each module, finds the subclass of BrokerCommand and installs it as broker_command. The module name is then added to __all__. # pragma: no cover # pragma: no cover # This is just convenient... don't have to import the 'real' # BrokerCommand, since any file we care about will have already # had to import it. # Prevents us from accidently picking up base classes and # other imports. | 1.969589 | 2 |
generate_prod_min.py | sthagen/approvals-ApprovalTests.Python | 0 | 6613005 | <gh_stars>0
from pathlib import Path
reg_prod_path = Path(__file__).parent / "requirements.prod.txt"
min_prod_path = Path(__file__).parent / "requirements.prod.min.txt"
reg_prod = reg_prod_path.read_text()
min_prod = reg_prod.replace(">=", "==")
min_prod_path.write_text(min_prod)
| from pathlib import Path
reg_prod_path = Path(__file__).parent / "requirements.prod.txt"
min_prod_path = Path(__file__).parent / "requirements.prod.min.txt"
reg_prod = reg_prod_path.read_text()
min_prod = reg_prod.replace(">=", "==")
min_prod_path.write_text(min_prod) | none | 1 | 2.087677 | 2 | |
Python3/0350-Intersection-of-Two-Arrays-II/soln-1.py | wyaadarsh/LeetCode-Solutions | 5 | 6613006 | class Solution:
def intersect(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: List[int]
"""
counter1, counter2 = collections.Counter(nums1), collections.Counter(nums2)
return list(itertools.chain.from_iterable([num] * min(counter1[num], counter2[num]) for num in counter1 & counter2)) | class Solution:
def intersect(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: List[int]
"""
counter1, counter2 = collections.Counter(nums1), collections.Counter(nums2)
return list(itertools.chain.from_iterable([num] * min(counter1[num], counter2[num]) for num in counter1 & counter2)) | en | 0.11888 | :type nums1: List[int] :type nums2: List[int] :rtype: List[int] | 3.443143 | 3 |
trefoil/render/renderers/unique.py | icpac-igad/trefoil | 9 | 6613007 | from PIL import Image
import numpy
from trefoil.render.renderers import RasterRenderer
from trefoil.render.renderers.legend import LegendElement
from trefoil.utilities.format import PrecisionFormatter
LEGEND_TICK_POSITION = 0.5
class UniqueValuesRenderer(RasterRenderer):
def __init__(self, colormap, fill_value=None, background_color=None, labels=None):
"""
Maps unique values to colors. Any color not mapped is set to transparent or background_color.
:param colormap: list of value, Color instances: [(value, Color)...]
"""
assert len(colormap) > 0
super(UniqueValuesRenderer, self).__init__(colormap, fill_value, background_color)
if labels:
assert len(colormap) == len(labels)
self.labels = labels
else:
self.labels = []
def get_legend(self, image_width=20, image_height=20):
legend_elements = []
if self.labels:
labels = self.labels
else:
formatter = PrecisionFormatter(self.values)
labels = [formatter.format(x) for x in self.values]
for index, value in enumerate(self.values):
legend_elements.append(LegendElement(
Image.new("RGBA", (image_width, image_height), tuple(self.palette[index])),
[LEGEND_TICK_POSITION],
[labels[index]]
))
return legend_elements
def render_image(self, data, row_major_order=True):
values = self._mask_fill_value(data.ravel())
max_value = max(values.max(), self.values.max())
if values.dtype.kind == 'u' and max_value < 65536:
palette_indices = numpy.zeros(max_value + 1, dtype=numpy.uint8)
palette_indices.fill(self.values.shape[0])
for index, value in enumerate(self.values):
palette_indices.itemset(value, index)
image_data = palette_indices[values].astype(numpy.uint8)
else:
image_data = numpy.zeros(values.shape, dtype=numpy.uint8)
image_data.fill(self.values.shape[0])
for index, value in enumerate(self.values):
image_data[values == value] = index
# have to invert dimensions because PIL thinks about this backwards
size = data.shape[::-1] if row_major_order else data.shape[:2]
return self._create_image(image_data, size)
def _generate_palette(self):
self.palette = numpy.asarray([entry[1].to_tuple() for entry in self.colormap]).astype(numpy.uint8)
def serialize(self):
ret = super(UniqueValuesRenderer, self).serialize()
if self.labels:
if 'options' in ret:
ret['options']['labels'] = self.labels
else:
ret['options'] = {'labels': self.labels}
return ret | from PIL import Image
import numpy
from trefoil.render.renderers import RasterRenderer
from trefoil.render.renderers.legend import LegendElement
from trefoil.utilities.format import PrecisionFormatter
LEGEND_TICK_POSITION = 0.5
class UniqueValuesRenderer(RasterRenderer):
def __init__(self, colormap, fill_value=None, background_color=None, labels=None):
"""
Maps unique values to colors. Any color not mapped is set to transparent or background_color.
:param colormap: list of value, Color instances: [(value, Color)...]
"""
assert len(colormap) > 0
super(UniqueValuesRenderer, self).__init__(colormap, fill_value, background_color)
if labels:
assert len(colormap) == len(labels)
self.labels = labels
else:
self.labels = []
def get_legend(self, image_width=20, image_height=20):
legend_elements = []
if self.labels:
labels = self.labels
else:
formatter = PrecisionFormatter(self.values)
labels = [formatter.format(x) for x in self.values]
for index, value in enumerate(self.values):
legend_elements.append(LegendElement(
Image.new("RGBA", (image_width, image_height), tuple(self.palette[index])),
[LEGEND_TICK_POSITION],
[labels[index]]
))
return legend_elements
def render_image(self, data, row_major_order=True):
values = self._mask_fill_value(data.ravel())
max_value = max(values.max(), self.values.max())
if values.dtype.kind == 'u' and max_value < 65536:
palette_indices = numpy.zeros(max_value + 1, dtype=numpy.uint8)
palette_indices.fill(self.values.shape[0])
for index, value in enumerate(self.values):
palette_indices.itemset(value, index)
image_data = palette_indices[values].astype(numpy.uint8)
else:
image_data = numpy.zeros(values.shape, dtype=numpy.uint8)
image_data.fill(self.values.shape[0])
for index, value in enumerate(self.values):
image_data[values == value] = index
# have to invert dimensions because PIL thinks about this backwards
size = data.shape[::-1] if row_major_order else data.shape[:2]
return self._create_image(image_data, size)
def _generate_palette(self):
self.palette = numpy.asarray([entry[1].to_tuple() for entry in self.colormap]).astype(numpy.uint8)
def serialize(self):
ret = super(UniqueValuesRenderer, self).serialize()
if self.labels:
if 'options' in ret:
ret['options']['labels'] = self.labels
else:
ret['options'] = {'labels': self.labels}
return ret | en | 0.713876 | Maps unique values to colors. Any color not mapped is set to transparent or background_color.
:param colormap: list of value, Color instances: [(value, Color)...] # have to invert dimensions because PIL thinks about this backwards | 2.502487 | 3 |
dpmmpython/dpmmwrapper.py | razzamir/dpmmpython | 0 | 6613008 | <reponame>razzamir/dpmmpython
import julia
julia.install()
from dpmmpython.priors import niw, multinomial
from julia import DPMMSubClusters
import numpy as np
import platform
import subprocess
import json
import os
class DPMMPython:
"""
Wrapper for the DPMMSubCluster Julia package
"""
@staticmethod
def create_prior(dim,mean_prior,mean_str,cov_prior,cov_str):
"""
Creates a gaussian prior, if cov_prior is a scalar, then creates an isotropic prior scaled to that, if its a matrix
uses it as covariance
:param dim: data dimension
:param mean_prior: if a scalar, will create a vector scaled to that, if its a vector then use it as the prior mean
:param mean_str: prior mean psuedo count
:param cov_prior: if a scalar, will create an isotropic covariance scaled to cov_prior, if a matrix will use it as
the covariance.
:param cov_str: prior covariance psuedo counts
:return: DPMMSubClusters.niw_hyperparams prior
"""
if isinstance(mean_prior,(int,float)):
prior_mean = np.ones(dim) * mean_prior
else:
prior_mean = mean_prior
if isinstance(cov_prior, (int, float)):
prior_covariance = np.eye(dim) * cov_prior
else:
prior_covariance = cov_prior
prior =niw(mean_str,prior_mean,dim + cov_str, prior_covariance)
return prior
@staticmethod
def fit(data,alpha, prior = None,
iterations= 100, verbose = False,
burnout=15, gt=None, outlier_weight=0, outlier_params=None, gpu=True, force_kernel = 2):
"""
Wrapper for DPMMSubClusters fit, reffer to "https://bgu-cs-vil.github.io/DPMMSubClusters.jl/stable/usage/" for specification
Note that directly working with the returned clusters can be problematic software displaying the workspace (such as PyCharm debugger).
:return: labels, clusters, sublabels
"""
if gpu == True:
np.save("modelData.npy", np.swapaxes(data, 0, 1))
modelParams = {'alpha': alpha,
'iterations': iterations,
'use_verbose': verbose,
'burnout_period': burnout,
'force_kernel': force_kernel,
'outlier_mod': outlier_weight,
'outlier_hyper_params': outlier_params,
'hyper_params': prior.to_JSON()
}
if gt is not None:
modelParams['gt'] = gt.tolist()
with open('modelParams.json', 'w') as f:
json.dump(modelParams, f)
if platform.system().startswith('Windows'):
FULL_PATH_TO_PACKAGE_IN_WINDOWS = os.environ.get('DPMM_GPU_FULL_PATH_TO_PACKAGE_IN_WINDOWS')
process = subprocess.Popen([FULL_PATH_TO_PACKAGE_IN_WINDOWS,
"--prior_type=" + prior.get_type(), "--model_path=modelData.npy",
"--params_path=modelParams.json", "--result_path=result.json"])
elif platform.system().startswith("Linux"):
FULL_PATH_TO_PACKAGE_IN_LINUX = os.environ.get('DPMM_GPU_FULL_PATH_TO_PACKAGE_IN_LINUX')
process = subprocess.Popen(
[FULL_PATH_TO_PACKAGE_IN_LINUX,
"--prior_type=" + prior.get_type(), "--model_path=modelData.npy", "--params_path=modelParams.json",
"--result_path=result.json"])
else:
print(f'Not support {platform.system()} OS')
out, err = process.communicate()
errcode = process.returncode
process.kill()
process.terminate()
with open('result.json') as f:
results_json = json.load(f)
if "error" in results_json:
print(f'Error:{results_json["error"]}')
return [], []
os.remove("result.json")
return results_json["labels"], None, [results_json["weights"], results_json["iter_count"]]
else:
if prior == None:
results = DPMMSubClusters.fit(data, alpha, iters=iterations,
verbose=verbose, burnout=burnout,
gt=gt, outlier_weight=outlier_weight,
outlier_params=outlier_params)
else:
results = DPMMSubClusters.fit(data, prior.to_julia_prior(), alpha, iters=iterations,
verbose=verbose, burnout=burnout,
gt=gt, outlier_weight=outlier_weight,
outlier_params=outlier_params)
return results[0],results[1],results[2:]
@staticmethod
def get_model_ll(points,labels,clusters):
"""
Wrapper for DPMMSubClusters cluster statistics
:param points: data
:param labels: labels
:param clusters: vector of clusters distributions
:return: vector with each cluster avg ll
"""
return DPMMSubClusters.cluster_statistics(points,labels,clusters)[0]
@staticmethod
def add_procs(procs_count):
j = julia.Julia()
j.eval('using Distributed')
j.eval('addprocs(' + str(procs_count) + ')')
j.eval('@everywhere using DPMMSubClusters')
j.eval('@everywhere using LinearAlgebra')
j.eval('@everywhere BLAS.set_num_threads(2)')
@staticmethod
def generate_gaussian_data(sample_count,dim,components,var):
'''
Wrapper for DPMMSubClusters cluster statistics
:param sample_count: how much of samples
:param dim: samples dimension
:param components: number of components
:param var: variance between componenets means
:return: (data, gt)
'''
data = DPMMSubClusters.generate_gaussian_data(sample_count, dim, components, var)
gt = data[1]
data = data[0]
return data,gt
@staticmethod
def predict(model,data):
'''
Given a DPMM Model (which is located in fit(...)[2][-1] for backwards compatibility),
predict the clusters for a data. The predict is using each cluster predictive posterior,
in contrary to the model itself during training, which sample from the posterior.
:params model: a DPMM (Julia object) model, returned from fit
:data: The data in which to predict, DxN (similar to the fit argument)
:return: labels
'''
return DPMMSubClusters.predict(model,data)
if __name__ == "__main__":
j = julia.Julia()
data,gt = DPMMPython.generate_gaussian_data(10000, 2, 10, 100.0)
prior = niw(kappa = 1, mu = np.ones(2)*0, nu = 3, psi = np.eye(2))
# labels_j,_,sub_labels= DPMMPython.fit(data, 100, prior = prior, verbose = True, gt = gt, gpu = False)
labels_j,_,sub_labels = DPMMPython.fit(data, 100, prior = prior, verbose = True, gt = gt, gpu = True) | import julia
julia.install()
from dpmmpython.priors import niw, multinomial
from julia import DPMMSubClusters
import numpy as np
import platform
import subprocess
import json
import os
class DPMMPython:
"""
Wrapper for the DPMMSubCluster Julia package
"""
@staticmethod
def create_prior(dim,mean_prior,mean_str,cov_prior,cov_str):
"""
Creates a gaussian prior, if cov_prior is a scalar, then creates an isotropic prior scaled to that, if its a matrix
uses it as covariance
:param dim: data dimension
:param mean_prior: if a scalar, will create a vector scaled to that, if its a vector then use it as the prior mean
:param mean_str: prior mean psuedo count
:param cov_prior: if a scalar, will create an isotropic covariance scaled to cov_prior, if a matrix will use it as
the covariance.
:param cov_str: prior covariance psuedo counts
:return: DPMMSubClusters.niw_hyperparams prior
"""
if isinstance(mean_prior,(int,float)):
prior_mean = np.ones(dim) * mean_prior
else:
prior_mean = mean_prior
if isinstance(cov_prior, (int, float)):
prior_covariance = np.eye(dim) * cov_prior
else:
prior_covariance = cov_prior
prior =niw(mean_str,prior_mean,dim + cov_str, prior_covariance)
return prior
@staticmethod
def fit(data,alpha, prior = None,
iterations= 100, verbose = False,
burnout=15, gt=None, outlier_weight=0, outlier_params=None, gpu=True, force_kernel = 2):
"""
Wrapper for DPMMSubClusters fit, reffer to "https://bgu-cs-vil.github.io/DPMMSubClusters.jl/stable/usage/" for specification
Note that directly working with the returned clusters can be problematic software displaying the workspace (such as PyCharm debugger).
:return: labels, clusters, sublabels
"""
if gpu == True:
np.save("modelData.npy", np.swapaxes(data, 0, 1))
modelParams = {'alpha': alpha,
'iterations': iterations,
'use_verbose': verbose,
'burnout_period': burnout,
'force_kernel': force_kernel,
'outlier_mod': outlier_weight,
'outlier_hyper_params': outlier_params,
'hyper_params': prior.to_JSON()
}
if gt is not None:
modelParams['gt'] = gt.tolist()
with open('modelParams.json', 'w') as f:
json.dump(modelParams, f)
if platform.system().startswith('Windows'):
FULL_PATH_TO_PACKAGE_IN_WINDOWS = os.environ.get('DPMM_GPU_FULL_PATH_TO_PACKAGE_IN_WINDOWS')
process = subprocess.Popen([FULL_PATH_TO_PACKAGE_IN_WINDOWS,
"--prior_type=" + prior.get_type(), "--model_path=modelData.npy",
"--params_path=modelParams.json", "--result_path=result.json"])
elif platform.system().startswith("Linux"):
FULL_PATH_TO_PACKAGE_IN_LINUX = os.environ.get('DPMM_GPU_FULL_PATH_TO_PACKAGE_IN_LINUX')
process = subprocess.Popen(
[FULL_PATH_TO_PACKAGE_IN_LINUX,
"--prior_type=" + prior.get_type(), "--model_path=modelData.npy", "--params_path=modelParams.json",
"--result_path=result.json"])
else:
print(f'Not support {platform.system()} OS')
out, err = process.communicate()
errcode = process.returncode
process.kill()
process.terminate()
with open('result.json') as f:
results_json = json.load(f)
if "error" in results_json:
print(f'Error:{results_json["error"]}')
return [], []
os.remove("result.json")
return results_json["labels"], None, [results_json["weights"], results_json["iter_count"]]
else:
if prior == None:
results = DPMMSubClusters.fit(data, alpha, iters=iterations,
verbose=verbose, burnout=burnout,
gt=gt, outlier_weight=outlier_weight,
outlier_params=outlier_params)
else:
results = DPMMSubClusters.fit(data, prior.to_julia_prior(), alpha, iters=iterations,
verbose=verbose, burnout=burnout,
gt=gt, outlier_weight=outlier_weight,
outlier_params=outlier_params)
return results[0],results[1],results[2:]
@staticmethod
def get_model_ll(points,labels,clusters):
"""
Wrapper for DPMMSubClusters cluster statistics
:param points: data
:param labels: labels
:param clusters: vector of clusters distributions
:return: vector with each cluster avg ll
"""
return DPMMSubClusters.cluster_statistics(points,labels,clusters)[0]
@staticmethod
def add_procs(procs_count):
j = julia.Julia()
j.eval('using Distributed')
j.eval('addprocs(' + str(procs_count) + ')')
j.eval('@everywhere using DPMMSubClusters')
j.eval('@everywhere using LinearAlgebra')
j.eval('@everywhere BLAS.set_num_threads(2)')
@staticmethod
def generate_gaussian_data(sample_count,dim,components,var):
'''
Wrapper for DPMMSubClusters cluster statistics
:param sample_count: how much of samples
:param dim: samples dimension
:param components: number of components
:param var: variance between componenets means
:return: (data, gt)
'''
data = DPMMSubClusters.generate_gaussian_data(sample_count, dim, components, var)
gt = data[1]
data = data[0]
return data,gt
@staticmethod
def predict(model,data):
'''
Given a DPMM Model (which is located in fit(...)[2][-1] for backwards compatibility),
predict the clusters for a data. The predict is using each cluster predictive posterior,
in contrary to the model itself during training, which sample from the posterior.
:params model: a DPMM (Julia object) model, returned from fit
:data: The data in which to predict, DxN (similar to the fit argument)
:return: labels
'''
return DPMMSubClusters.predict(model,data)
if __name__ == "__main__":
j = julia.Julia()
data,gt = DPMMPython.generate_gaussian_data(10000, 2, 10, 100.0)
prior = niw(kappa = 1, mu = np.ones(2)*0, nu = 3, psi = np.eye(2))
# labels_j,_,sub_labels= DPMMPython.fit(data, 100, prior = prior, verbose = True, gt = gt, gpu = False)
labels_j,_,sub_labels = DPMMPython.fit(data, 100, prior = prior, verbose = True, gt = gt, gpu = True) | en | 0.800669 | Wrapper for the DPMMSubCluster Julia package Creates a gaussian prior, if cov_prior is a scalar, then creates an isotropic prior scaled to that, if its a matrix uses it as covariance :param dim: data dimension :param mean_prior: if a scalar, will create a vector scaled to that, if its a vector then use it as the prior mean :param mean_str: prior mean psuedo count :param cov_prior: if a scalar, will create an isotropic covariance scaled to cov_prior, if a matrix will use it as the covariance. :param cov_str: prior covariance psuedo counts :return: DPMMSubClusters.niw_hyperparams prior Wrapper for DPMMSubClusters fit, reffer to "https://bgu-cs-vil.github.io/DPMMSubClusters.jl/stable/usage/" for specification Note that directly working with the returned clusters can be problematic software displaying the workspace (such as PyCharm debugger). :return: labels, clusters, sublabels Wrapper for DPMMSubClusters cluster statistics :param points: data :param labels: labels :param clusters: vector of clusters distributions :return: vector with each cluster avg ll Wrapper for DPMMSubClusters cluster statistics :param sample_count: how much of samples :param dim: samples dimension :param components: number of components :param var: variance between componenets means :return: (data, gt) Given a DPMM Model (which is located in fit(...)[2][-1] for backwards compatibility), predict the clusters for a data. The predict is using each cluster predictive posterior, in contrary to the model itself during training, which sample from the posterior. :params model: a DPMM (Julia object) model, returned from fit :data: The data in which to predict, DxN (similar to the fit argument) :return: labels # labels_j,_,sub_labels= DPMMPython.fit(data, 100, prior = prior, verbose = True, gt = gt, gpu = False) | 2.530148 | 3 |
examples/broadcast.py | AnotherKamila/distributed-algorithms-emulator | 0 | 6613009 | """Implements broadcast and convergecast on a tree topology."""
# In the whole file port 0 is assumed to be the parent.
from random import shuffle
from da import Node, Network
import topo
################################################################################
class Broadcast(Node):
def run(self):
self.data['msg'] = None
if 'shout' in self.data: self.send(0, ('dispatch', self.data['shout']))
while True:
p, m = self.recv()
if m[0] == 'dispatch' and p == 0:
self.data['msg'] = m[1]
for p in range(1, self.deg):
self.send(p, ('dispatch', self.data['msg']))
return
def run_broadcast(t):
print('--- running broadcast ---')
msg = 'test'
net = Network(Broadcast, t)
net.nodes[0].data['shout'] = msg # this one will initiate the shout
net.run()
# check that it worked
for n in net.nodes:
if n.data['msg'] != msg:
n.log('did not receive message!')
################################################################################
class Convergecast(Node):
def run(self):
self.data['max'] = self.data['msg']
cnt = 0
while True:
if cnt == self.deg - 1:
self.send(0, ('my max', self.data['max']))
return
p, m = self.recv()
if m[0] == 'my max':
self.data['max'] = max(self.data['max'], m[1])
cnt += 1
def run_convergecast(t):
print('--- running convergecast ---')
net = Network(Convergecast, t)
msgs = list(range(100)); shuffle(msgs)
for n, m in zip(net.nodes, msgs): n.data['msg'] = m
net.run()
# check that it worked
m = max([ n.data['msg'] for n in net.nodes ])
k = net.nodes[0].data['max']
if k != m: print('Found {}, but max is {}!', k, m)
################################################################################
parents = [0, 6, 0, 2, 5, 6, 0] # 0 is its own parent -- root
t = topo.bidirectional({ i: [p] for i, p in enumerate(parents) })
if __name__ == '__main__':
run_broadcast(t)
run_convergecast(t)
| """Implements broadcast and convergecast on a tree topology."""
# In the whole file port 0 is assumed to be the parent.
from random import shuffle
from da import Node, Network
import topo
################################################################################
class Broadcast(Node):
def run(self):
self.data['msg'] = None
if 'shout' in self.data: self.send(0, ('dispatch', self.data['shout']))
while True:
p, m = self.recv()
if m[0] == 'dispatch' and p == 0:
self.data['msg'] = m[1]
for p in range(1, self.deg):
self.send(p, ('dispatch', self.data['msg']))
return
def run_broadcast(t):
print('--- running broadcast ---')
msg = 'test'
net = Network(Broadcast, t)
net.nodes[0].data['shout'] = msg # this one will initiate the shout
net.run()
# check that it worked
for n in net.nodes:
if n.data['msg'] != msg:
n.log('did not receive message!')
################################################################################
class Convergecast(Node):
def run(self):
self.data['max'] = self.data['msg']
cnt = 0
while True:
if cnt == self.deg - 1:
self.send(0, ('my max', self.data['max']))
return
p, m = self.recv()
if m[0] == 'my max':
self.data['max'] = max(self.data['max'], m[1])
cnt += 1
def run_convergecast(t):
print('--- running convergecast ---')
net = Network(Convergecast, t)
msgs = list(range(100)); shuffle(msgs)
for n, m in zip(net.nodes, msgs): n.data['msg'] = m
net.run()
# check that it worked
m = max([ n.data['msg'] for n in net.nodes ])
k = net.nodes[0].data['max']
if k != m: print('Found {}, but max is {}!', k, m)
################################################################################
parents = [0, 6, 0, 2, 5, 6, 0] # 0 is its own parent -- root
t = topo.bidirectional({ i: [p] for i, p in enumerate(parents) })
if __name__ == '__main__':
run_broadcast(t)
run_convergecast(t)
| de | 0.549586 | Implements broadcast and convergecast on a tree topology. # In the whole file port 0 is assumed to be the parent. ################################################################################ # this one will initiate the shout # check that it worked ################################################################################ # check that it worked ################################################################################ # 0 is its own parent -- root | 2.914994 | 3 |
schedule/models.py | stenvix/lpschedule | 7 | 6613010 | # -*- coding: utf-8 -*-
"""App models."""
from schedule.core import db
from sqlalchemy.ext.declarative import declared_attr
class Base(object):
@declared_attr
def __tablename__(self):
return self.__name__.lower()
class Lesson(Base, db.Model):
lesson_id = db.Column(db.Integer, primary_key=True)
lesson_name = db.Column(db.Unicode)
lesson_number = db.Column(db.Integer)
lesson_type = db.Column(db.Unicode)
lesson_week = db.Column(db.Integer, default=-1)
subgroup = db.Column(db.Integer, default=-1)
room = db.Column(db.Unicode)
semester_part = db.Column(db.Integer)
active = db.Column(db.Boolean, default=True)
day_number = db.Column(db.Integer)
day_name = db.Column(db.Unicode)
group_id = db.Column(db.Integer, db.ForeignKey('group.group_id'))
time_id = db.Column(db.Integer, db.ForeignKey('time.time_id'))
time = db.relationship('Time')
group = db.relationship('Group', backref='lessons')
@staticmethod
def get_by_attrs(**kwargs):
return Lesson.query.filter_by(subgroup=kwargs.get('subgroup'),
lesson_number=kwargs.get('lesson_number'),
lesson_week=kwargs.get('lesson_week'),
day_number=kwargs.get('day_number'),
group=Group.get_by_full_name(kwargs.get('group'))).first()
@staticmethod
def add(lesson):
db.session.add(lesson)
db.session.commit()
@staticmethod
def deactivate(lesson):
lesson.active = False
lesson.teachers = []
db.session.commit()
@staticmethod
def update(lesson, **kwargs):
changes = False
lesson_name = kwargs.get('lesson_name')
if lesson_name is not None and lesson.lesson_name != lesson_name:
lesson.lesson_name = lesson_name
changes = True
lesson_type = kwargs.get('lesson_type')
if lesson_type is not None and lesson.lesson_type != lesson_type:
lesson.lesson_type = lesson_type
changes = True
semester_part = kwargs.get('semester_part')
if semester_part is not None and lesson.semester_part != int(semester_part):
lesson.semester_part = int(semester_part)
changes = True
room = kwargs.get('room')
if room is not None and lesson.room != room:
lesson.room = room
changes = True
day_name = kwargs.get('day_name')
if day_name is not None and lesson.day_name != day_name:
lesson.day_name = day_name
changes = True
teacher = kwargs.get('teacher')
if teacher is not None and teacher not in lesson.teachers:
lesson.teachers = []
lesson.teachers.append(teacher)
changes = True
active = kwargs.get('active')
if active is not None and lesson.active != active:
lesson.active = active
changes = True
if changes:
db.session.commit()
class Institute(Base, db.Model):
institute_id = db.Column(db.Integer, primary_key=True)
institute_abbr = db.Column(db.String(10, convert_unicode=True), unique=True)
institute_full_name = db.Column(db.String(convert_unicode=True))
@staticmethod
def get_by_attr(abbr):
return Institute.query.filter_by(institute_abbr=abbr).first()
@staticmethod
def add(institute):
db.session.add(institute)
db.session.commit()
class Group(Base, db.Model):
group_id = db.Column(db.Integer, primary_key=True)
group_full_name = db.Column(db.String)
group_url = db.Column(db.String)
institute_id = db.Column(db.Integer, db.ForeignKey('institute.institute_id'))
active = db.Column(db.Boolean, default=True)
institute = db.relationship('Institute')
@staticmethod
def get_by_full_name(group_full_name):
return Group.query.filter_by(group_full_name=group_full_name).first()
@staticmethod
def add(Group):
db.session.add(Group)
db.session.commit()
class Time(Base, db.Model):
time_id = db.Column(db.Integer, primary_key=True)
time_number = db.Column(db.Integer, unique=True)
time_start = db.Column(db.Time)
time_end = db.Column(db.Time)
@staticmethod
def get_by_number(time_number):
return Time.query.filter_by(time_number=time_number).first()
@staticmethod
def add(time):
if time.time_id is None:
db.session.add(time)
db.session.commit()
class Teacher(Base, db.Model):
teacher_id = db.Column(db.Integer, primary_key=True)
teacher_name = db.Column(db.Unicode)
active = db.Column(db.Boolean, default=True)
lessons = db.relationship('Lesson', secondary='lessonteacher', backref='teachers')
institute_id = db.Column(db.Integer, db.ForeignKey('institute.institute_id'))
institute = db.relationship('Institute')
@staticmethod
def get_by_name(teacher_name, institute):
return Teacher.query.filter_by(teacher_name=teacher_name, institute_id=institute.institute_id).first()
@staticmethod
def add(Teacher):
db.session.add(Teacher)
db.session.commit()
return Teacher
class LessonTeacher(Base, db.Model):
lessonteacher_id = db.Column(db.Integer, primary_key=True)
teacher_id = db.Column(db.Integer, db.ForeignKey('teacher.teacher_id'))
lesson_id = db.Column(db.Integer, db.ForeignKey('lesson.lesson_id'))
| # -*- coding: utf-8 -*-
"""App models."""
from schedule.core import db
from sqlalchemy.ext.declarative import declared_attr
class Base(object):
@declared_attr
def __tablename__(self):
return self.__name__.lower()
class Lesson(Base, db.Model):
lesson_id = db.Column(db.Integer, primary_key=True)
lesson_name = db.Column(db.Unicode)
lesson_number = db.Column(db.Integer)
lesson_type = db.Column(db.Unicode)
lesson_week = db.Column(db.Integer, default=-1)
subgroup = db.Column(db.Integer, default=-1)
room = db.Column(db.Unicode)
semester_part = db.Column(db.Integer)
active = db.Column(db.Boolean, default=True)
day_number = db.Column(db.Integer)
day_name = db.Column(db.Unicode)
group_id = db.Column(db.Integer, db.ForeignKey('group.group_id'))
time_id = db.Column(db.Integer, db.ForeignKey('time.time_id'))
time = db.relationship('Time')
group = db.relationship('Group', backref='lessons')
@staticmethod
def get_by_attrs(**kwargs):
return Lesson.query.filter_by(subgroup=kwargs.get('subgroup'),
lesson_number=kwargs.get('lesson_number'),
lesson_week=kwargs.get('lesson_week'),
day_number=kwargs.get('day_number'),
group=Group.get_by_full_name(kwargs.get('group'))).first()
@staticmethod
def add(lesson):
db.session.add(lesson)
db.session.commit()
@staticmethod
def deactivate(lesson):
lesson.active = False
lesson.teachers = []
db.session.commit()
@staticmethod
def update(lesson, **kwargs):
changes = False
lesson_name = kwargs.get('lesson_name')
if lesson_name is not None and lesson.lesson_name != lesson_name:
lesson.lesson_name = lesson_name
changes = True
lesson_type = kwargs.get('lesson_type')
if lesson_type is not None and lesson.lesson_type != lesson_type:
lesson.lesson_type = lesson_type
changes = True
semester_part = kwargs.get('semester_part')
if semester_part is not None and lesson.semester_part != int(semester_part):
lesson.semester_part = int(semester_part)
changes = True
room = kwargs.get('room')
if room is not None and lesson.room != room:
lesson.room = room
changes = True
day_name = kwargs.get('day_name')
if day_name is not None and lesson.day_name != day_name:
lesson.day_name = day_name
changes = True
teacher = kwargs.get('teacher')
if teacher is not None and teacher not in lesson.teachers:
lesson.teachers = []
lesson.teachers.append(teacher)
changes = True
active = kwargs.get('active')
if active is not None and lesson.active != active:
lesson.active = active
changes = True
if changes:
db.session.commit()
class Institute(Base, db.Model):
institute_id = db.Column(db.Integer, primary_key=True)
institute_abbr = db.Column(db.String(10, convert_unicode=True), unique=True)
institute_full_name = db.Column(db.String(convert_unicode=True))
@staticmethod
def get_by_attr(abbr):
return Institute.query.filter_by(institute_abbr=abbr).first()
@staticmethod
def add(institute):
db.session.add(institute)
db.session.commit()
class Group(Base, db.Model):
group_id = db.Column(db.Integer, primary_key=True)
group_full_name = db.Column(db.String)
group_url = db.Column(db.String)
institute_id = db.Column(db.Integer, db.ForeignKey('institute.institute_id'))
active = db.Column(db.Boolean, default=True)
institute = db.relationship('Institute')
@staticmethod
def get_by_full_name(group_full_name):
return Group.query.filter_by(group_full_name=group_full_name).first()
@staticmethod
def add(Group):
db.session.add(Group)
db.session.commit()
class Time(Base, db.Model):
time_id = db.Column(db.Integer, primary_key=True)
time_number = db.Column(db.Integer, unique=True)
time_start = db.Column(db.Time)
time_end = db.Column(db.Time)
@staticmethod
def get_by_number(time_number):
return Time.query.filter_by(time_number=time_number).first()
@staticmethod
def add(time):
if time.time_id is None:
db.session.add(time)
db.session.commit()
class Teacher(Base, db.Model):
teacher_id = db.Column(db.Integer, primary_key=True)
teacher_name = db.Column(db.Unicode)
active = db.Column(db.Boolean, default=True)
lessons = db.relationship('Lesson', secondary='lessonteacher', backref='teachers')
institute_id = db.Column(db.Integer, db.ForeignKey('institute.institute_id'))
institute = db.relationship('Institute')
@staticmethod
def get_by_name(teacher_name, institute):
return Teacher.query.filter_by(teacher_name=teacher_name, institute_id=institute.institute_id).first()
@staticmethod
def add(Teacher):
db.session.add(Teacher)
db.session.commit()
return Teacher
class LessonTeacher(Base, db.Model):
lessonteacher_id = db.Column(db.Integer, primary_key=True)
teacher_id = db.Column(db.Integer, db.ForeignKey('teacher.teacher_id'))
lesson_id = db.Column(db.Integer, db.ForeignKey('lesson.lesson_id'))
| en | 0.801949 | # -*- coding: utf-8 -*- App models. | 2.335909 | 2 |
PYQT/pyqt_13_combobox2.py | dogancantorun8/python-application | 0 | 6613011 | # -*- coding: utf-8 -*-
"""
Created on Fri Mar 26 17:56:12 2021
@author: doğancan
"""
import sys
import csv
from PyQt5.Qt import *
class MainWindow(QWidget):
def __init__(self):
super().__init__()
self.resize(640, 480)
self.cb = QComboBox(self)
self.cb.setGeometry(10, 10, 300, 30)
self.cb.setFont(QFont('Arial', 14))
self.cb.setMaxVisibleItems(15)
self.buttonOk = QPushButton('Ok', self)
self.buttonOk.move(350, 10)
self.buttonOk.clicked.connect(self.buttonOkHandler)
with open('Countries.csv', encoding='UTF-8') as f:
for line in csv.reader(f):
self.cb.addItem(line[-1])
def buttonOkHandler(self):
print(self.cb.currentText()) #o anda hangi eleman seçilmişse onu .Yani comboxtaki yazımı alıyorum
print(self.cb.currentIndex()) # o anda hangi eleman seçilmişse onun indexini veriyor.
app = QApplication(sys.argv)
mainWindow = MainWindow()
mainWindow.show()
app.exec()
| # -*- coding: utf-8 -*-
"""
Created on Fri Mar 26 17:56:12 2021
@author: doğancan
"""
import sys
import csv
from PyQt5.Qt import *
class MainWindow(QWidget):
def __init__(self):
super().__init__()
self.resize(640, 480)
self.cb = QComboBox(self)
self.cb.setGeometry(10, 10, 300, 30)
self.cb.setFont(QFont('Arial', 14))
self.cb.setMaxVisibleItems(15)
self.buttonOk = QPushButton('Ok', self)
self.buttonOk.move(350, 10)
self.buttonOk.clicked.connect(self.buttonOkHandler)
with open('Countries.csv', encoding='UTF-8') as f:
for line in csv.reader(f):
self.cb.addItem(line[-1])
def buttonOkHandler(self):
print(self.cb.currentText()) #o anda hangi eleman seçilmişse onu .Yani comboxtaki yazımı alıyorum
print(self.cb.currentIndex()) # o anda hangi eleman seçilmişse onun indexini veriyor.
app = QApplication(sys.argv)
mainWindow = MainWindow()
mainWindow.show()
app.exec()
| tr | 0.914782 | # -*- coding: utf-8 -*- Created on Fri Mar 26 17:56:12 2021 @author: doğancan #o anda hangi eleman seçilmişse onu .Yani comboxtaki yazımı alıyorum # o anda hangi eleman seçilmişse onun indexini veriyor. | 2.5084 | 3 |
rnncomp/run_mnist.py | Seanny123/rnn-comparison | 6 | 6613012 | <reponame>Seanny123/rnn-comparison
import lstm_test
import pickle
import numpy as np
from dataman import make_run_args
from constants import *
import matplotlib.pyplot as plt
import ipdb
import sys
from IPython.core import ultratb
sys.excepthook = ultratb.FormattedTB(mode='Verbose',
color_scheme='Linux', call_pdb=1)
with open("mnist.pkl", "rb") as f:
train, _, test = pickle.load(f)
train_targets = np.zeros((train[1].shape[0], 1, 10), dtype=np.float32)
train_targets[np.arange(train[1].shape[0]), :, train[1]] = 1.0
test_targets = np.zeros((test[1].shape[0], 1, 10), dtype=np.float32)
test_targets[np.arange(test[1].shape[0]), :, test[1]] = 1.0
train = train[0].reshape((-1, 1, 784))
test = test[0]
lstm_test.main(784, 1, 10, (train, train_targets), (test, test_targets))
| import lstm_test
import pickle
import numpy as np
from dataman import make_run_args
from constants import *
import matplotlib.pyplot as plt
import ipdb
import sys
from IPython.core import ultratb
sys.excepthook = ultratb.FormattedTB(mode='Verbose',
color_scheme='Linux', call_pdb=1)
with open("mnist.pkl", "rb") as f:
train, _, test = pickle.load(f)
train_targets = np.zeros((train[1].shape[0], 1, 10), dtype=np.float32)
train_targets[np.arange(train[1].shape[0]), :, train[1]] = 1.0
test_targets = np.zeros((test[1].shape[0], 1, 10), dtype=np.float32)
test_targets[np.arange(test[1].shape[0]), :, test[1]] = 1.0
train = train[0].reshape((-1, 1, 784))
test = test[0]
lstm_test.main(784, 1, 10, (train, train_targets), (test, test_targets)) | none | 1 | 2.278804 | 2 | |
apps/users/models.py | jin-hao-chen/team_go_backend | 0 | 6613013 | <reponame>jin-hao-chen/team_go_backend
from datetime import datetime
from django.db import models
from django.contrib.auth.models import AbstractUser
from django.utils.translation import gettext_lazy as _
from users.validators import validate_username
from clubs.models import Institute
from clubs.models import Club
# from ckeditor_uploader.fields import RichTextUploadingField
from DjangoUeditor.models import UEditorField
class AdminInfo(AbstractUser):
"""后台管理员表
Notes
-----
AdminInfo 和 学生表是分开来的, AdminInfo 是提供给后台内部使用的
一般用于后台统计数据, 导出统计数据文件等, 因此 AdminInfo 只包含了
少数的字段
"""
username = models.CharField(max_length=10, validators=[validate_username], verbose_name='学号', unique=True)
password = models.CharField(_('密码'), max_length=128)
nickname = models.CharField(max_length=30, verbose_name='昵称', blank=True, null=True)
mobile = models.CharField(max_length=11, verbose_name='手机号', blank=True, null=True)
email = models.EmailField(_('邮箱'), blank=True)
class Meta:
verbose_name = '管理员'
verbose_name_plural = verbose_name
def __str__(self):
return self.nickname
class Teacher(models.Model):
username = models.CharField(max_length=10, validators=[validate_username], verbose_name='工号', unique=True, \
null=False, blank=False)
nickname = models.CharField(max_length=20, verbose_name='老师名', null=False, blank=False)
mobile = models.CharField(max_length=11, verbose_name='手机号', null=False, blank=False)
club = models.OneToOneField(to=Club, on_delete=models.CASCADE, verbose_name='指导的社团', null=True)
class Meta:
verbose_name = '指导老师'
verbose_name_plural = '指导老师'
def __str__(self):
return self.username
class User(models.Model):
username = models.CharField(max_length=10, validators=[validate_username], verbose_name='学号', unique=True)
password = models.CharField(verbose_name='密码', max_length=128, blank=False, null=False)
nickname = models.CharField(max_length=30, verbose_name='学生名', blank=False, null=False)
mobile = models.CharField(max_length=11, verbose_name='手机号', null=True, blank=True)
is_admin = models.BooleanField(verbose_name='是否为管理员', null=False, blank=False, default=False)
institute = models.ForeignKey(to=Institute, on_delete=models.CASCADE, verbose_name='所属学院', null=False, blank=False)
admission_time = models.DateField(verbose_name='入学时间', null=False, blank=False)
introduction = models.TextField(max_length=3000, verbose_name='个人简介')
# introduction = UEditorField(verbose_name='个人简介', width=600, height=300, toolbars="full")
icon = models.ImageField(upload_to='media/images/users/icons', null=True, blank=True)
clubs = models.ManyToManyField(to=Club, verbose_name='加入的社团', null=True, blank=True)
class Meta:
verbose_name = '用户'
verbose_name_plural = '用户'
def __str__(self):
return self.username
class Token(models.Model):
token = models.CharField(max_length=128, verbose_name='token')
user = models.OneToOneField(to=User, verbose_name='用户', on_delete=models.CASCADE)
create_time = models.DateTimeField(verbose_name='创建时间', default=datetime.now)
class Meta:
verbose_name = 'token'
verbose_name_plural = verbose_name
def __str__(self):
return 'token'
| from datetime import datetime
from django.db import models
from django.contrib.auth.models import AbstractUser
from django.utils.translation import gettext_lazy as _
from users.validators import validate_username
from clubs.models import Institute
from clubs.models import Club
# from ckeditor_uploader.fields import RichTextUploadingField
from DjangoUeditor.models import UEditorField
class AdminInfo(AbstractUser):
"""后台管理员表
Notes
-----
AdminInfo 和 学生表是分开来的, AdminInfo 是提供给后台内部使用的
一般用于后台统计数据, 导出统计数据文件等, 因此 AdminInfo 只包含了
少数的字段
"""
username = models.CharField(max_length=10, validators=[validate_username], verbose_name='学号', unique=True)
password = models.CharField(_('密码'), max_length=128)
nickname = models.CharField(max_length=30, verbose_name='昵称', blank=True, null=True)
mobile = models.CharField(max_length=11, verbose_name='手机号', blank=True, null=True)
email = models.EmailField(_('邮箱'), blank=True)
class Meta:
verbose_name = '管理员'
verbose_name_plural = verbose_name
def __str__(self):
return self.nickname
class Teacher(models.Model):
username = models.CharField(max_length=10, validators=[validate_username], verbose_name='工号', unique=True, \
null=False, blank=False)
nickname = models.CharField(max_length=20, verbose_name='老师名', null=False, blank=False)
mobile = models.CharField(max_length=11, verbose_name='手机号', null=False, blank=False)
club = models.OneToOneField(to=Club, on_delete=models.CASCADE, verbose_name='指导的社团', null=True)
class Meta:
verbose_name = '指导老师'
verbose_name_plural = '指导老师'
def __str__(self):
return self.username
class User(models.Model):
username = models.CharField(max_length=10, validators=[validate_username], verbose_name='学号', unique=True)
password = models.CharField(verbose_name='密码', max_length=128, blank=False, null=False)
nickname = models.CharField(max_length=30, verbose_name='学生名', blank=False, null=False)
mobile = models.CharField(max_length=11, verbose_name='手机号', null=True, blank=True)
is_admin = models.BooleanField(verbose_name='是否为管理员', null=False, blank=False, default=False)
institute = models.ForeignKey(to=Institute, on_delete=models.CASCADE, verbose_name='所属学院', null=False, blank=False)
admission_time = models.DateField(verbose_name='入学时间', null=False, blank=False)
introduction = models.TextField(max_length=3000, verbose_name='个人简介')
# introduction = UEditorField(verbose_name='个人简介', width=600, height=300, toolbars="full")
icon = models.ImageField(upload_to='media/images/users/icons', null=True, blank=True)
clubs = models.ManyToManyField(to=Club, verbose_name='加入的社团', null=True, blank=True)
class Meta:
verbose_name = '用户'
verbose_name_plural = '用户'
def __str__(self):
return self.username
class Token(models.Model):
token = models.CharField(max_length=128, verbose_name='token')
user = models.OneToOneField(to=User, verbose_name='用户', on_delete=models.CASCADE)
create_time = models.DateTimeField(verbose_name='创建时间', default=datetime.now)
class Meta:
verbose_name = 'token'
verbose_name_plural = verbose_name
def __str__(self):
return 'token' | zh | 0.79467 | # from ckeditor_uploader.fields import RichTextUploadingField 后台管理员表 Notes ----- AdminInfo 和 学生表是分开来的, AdminInfo 是提供给后台内部使用的 一般用于后台统计数据, 导出统计数据文件等, 因此 AdminInfo 只包含了 少数的字段 # introduction = UEditorField(verbose_name='个人简介', width=600, height=300, toolbars="full") | 2.132805 | 2 |
guest_user/settings.py | blag/django-guest-user | 11 | 6613014 | import sys # noqa
from .app_settings import AppSettings
app_settings = AppSettings("GUEST_USER_")
app_settings.__name__ = __name__
sys.modules[__name__] = app_settings
| import sys # noqa
from .app_settings import AppSettings
app_settings = AppSettings("GUEST_USER_")
app_settings.__name__ = __name__
sys.modules[__name__] = app_settings
| none | 1 | 1.185285 | 1 | |
owcsimpy/geoobjects/models/pointsource_py.py | ardimasp/owcsimpy | 0 | 6613015 | <gh_stars>0
import math
import itertools
import numpy as np
from owcsimpy.geoobjects.bases.vector_py import Vector_py as Vector
class PointSource_py(Vector):
""" A point source models.
Mainly used for modeling an LED.
HumanCube_py is inherited from
:class:`~owcsimpy.geoobjects.bases.vector_py.Vector_py`
See Also
--------
:class:`~owcsimpy.geoobjects.bases.vector_py.Vector_py`
Parameters
----------
polar: float
Polar angle of the normal vector in rads
azimuth: float
Azimuth angle in rads
loc: ndarray(3,)
Location of the point source.
Attributes
----------
loc: ndarray(3,)
Location of the point source
Examples
--------
.. plot::
:format: doctest
:include-source: True
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>>
>>> from owcsimpy.geoobjects.models.pointsource_py import PointSource_py as PointSource
>>> from owcsimpy.geoutils.draw import draw
>>>
>>> led = PointSource(np.pi,0,np.array([2.5,2,3]))
>>>
>>> draw(vectors=led,xlim=[0,5],ylim=[0,4],zlim=[0,3])
>>>
>>> plt.show()
"""
def __init__(self,polar,azimuth,loc,m=1):
coord = np.array([1,polar,azimuth])
super().__init__(coord=coord,refPoint=loc,which='spherical')
self.loc = loc
self.m = m
def getSimplePointSource(self):
""" Get a simple point source.
Returns
-------
tuple:
(normalVect: ndarray(3,),ctrPoint: ndarray(3,), m: float)
Notes
-----
The order of the output matters.
"""
return (self.cartesian,self.loc,self.m)
| import math
import itertools
import numpy as np
from owcsimpy.geoobjects.bases.vector_py import Vector_py as Vector
class PointSource_py(Vector):
""" A point source models.
Mainly used for modeling an LED.
HumanCube_py is inherited from
:class:`~owcsimpy.geoobjects.bases.vector_py.Vector_py`
See Also
--------
:class:`~owcsimpy.geoobjects.bases.vector_py.Vector_py`
Parameters
----------
polar: float
Polar angle of the normal vector in rads
azimuth: float
Azimuth angle in rads
loc: ndarray(3,)
Location of the point source.
Attributes
----------
loc: ndarray(3,)
Location of the point source
Examples
--------
.. plot::
:format: doctest
:include-source: True
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>>
>>> from owcsimpy.geoobjects.models.pointsource_py import PointSource_py as PointSource
>>> from owcsimpy.geoutils.draw import draw
>>>
>>> led = PointSource(np.pi,0,np.array([2.5,2,3]))
>>>
>>> draw(vectors=led,xlim=[0,5],ylim=[0,4],zlim=[0,3])
>>>
>>> plt.show()
"""
def __init__(self,polar,azimuth,loc,m=1):
coord = np.array([1,polar,azimuth])
super().__init__(coord=coord,refPoint=loc,which='spherical')
self.loc = loc
self.m = m
def getSimplePointSource(self):
""" Get a simple point source.
Returns
-------
tuple:
(normalVect: ndarray(3,),ctrPoint: ndarray(3,), m: float)
Notes
-----
The order of the output matters.
"""
return (self.cartesian,self.loc,self.m) | en | 0.48464 | A point source models. Mainly used for modeling an LED. HumanCube_py is inherited from :class:`~owcsimpy.geoobjects.bases.vector_py.Vector_py` See Also -------- :class:`~owcsimpy.geoobjects.bases.vector_py.Vector_py` Parameters ---------- polar: float Polar angle of the normal vector in rads azimuth: float Azimuth angle in rads loc: ndarray(3,) Location of the point source. Attributes ---------- loc: ndarray(3,) Location of the point source Examples -------- .. plot:: :format: doctest :include-source: True >>> import matplotlib.pyplot as plt >>> import numpy as np >>> >>> from owcsimpy.geoobjects.models.pointsource_py import PointSource_py as PointSource >>> from owcsimpy.geoutils.draw import draw >>> >>> led = PointSource(np.pi,0,np.array([2.5,2,3])) >>> >>> draw(vectors=led,xlim=[0,5],ylim=[0,4],zlim=[0,3]) >>> >>> plt.show() Get a simple point source. Returns ------- tuple: (normalVect: ndarray(3,),ctrPoint: ndarray(3,), m: float) Notes ----- The order of the output matters. | 2.979675 | 3 |
python_lib/mitxgraders/matrixsampling.py | haharay/python_lib | 17 | 6613016 | <reponame>haharay/python_lib
"""
matrixsampling.py
Contains classes for sampling vector/matrix/tensor values:
* RealVectors
* ComplexVectors
* RealMatrices
* ComplexMatrices
* RealTensors
* ComplexTensors
* IdentityMatrixMultiples
* SquareMatrices
* OrthogonalMatrices
* UnitaryMatrices
All of these classes perform random sampling. To obtain a sample, use class.gen_sample()
"""
from __future__ import print_function, division, absolute_import, unicode_literals
import numpy as np
from voluptuous import Schema, Required, All, Coerce, Any, Range
from mitxgraders.exceptions import ConfigError
from mitxgraders.sampling import VariableSamplingSet, RealInterval, ScalarSamplingSet
from mitxgraders.helpers.validatorfuncs import NumberRange, is_shape_specification
from mitxgraders.helpers.calc import MathArray
class Unavailable(object):
def rvs(self, dimension):
raise NotImplementedError('This feature requires newer versions of numpy '
'and scipy than are available.')
try:
from scipy.stats import ortho_group, special_ortho_group, unitary_group
except ImportError:
ortho_group = Unavailable()
special_ortho_group = Unavailable()
unitary_group = Unavailable()
# Set the objects to be imported from this grader
__all__ = [
"RealVectors",
"ComplexVectors",
"RealMatrices",
"ComplexMatrices",
"RealTensors",
"ComplexTensors",
"IdentityMatrixMultiples",
"SquareMatrices",
"OrthogonalMatrices",
"UnitaryMatrices"
]
class Retry(Exception):
"""
Raised to indicate that the randomly generated array cannot satisfy the desired
constraints, and a new random draw should be taken.
"""
class ArraySamplingSet(VariableSamplingSet):
"""
Represents a set from which random array variable samples are taken.
The norm used is standard Euclidean norm: root-square-sum of all entries in the array.
This is the most low-level array sampling set we have, and is subclassed for various
specific purposes. While we cannot make this class abstract, we strongly discourage
its use.
Config:
=======
- shape (int|(int)|[int]): Dimensions of the array, specified as a list or tuple of
the dimensions in each index as (n_1, n_2, ...). Can also use an integer
to select a vector of that length. (required; no default)
- norm ([start, stop]): Range for the overall norm of the array. Can be a
list [start, stop] or a dictionary {'start':start, 'stop':stop}.
(default [1, 5])
- complex (bool): Whether or not the matrix is complex (default False)
"""
schema_config = Schema({
Required('shape'): is_shape_specification(min_dim=1),
Required('norm', default=[1, 5]): NumberRange(),
Required('complex', default=False): bool
})
def __init__(self, config=None, **kwargs):
"""
Configure the class as normal, then set up norm as a RealInterval
"""
super(ArraySamplingSet, self).__init__(config, **kwargs)
self.norm = RealInterval(self.config['norm'])
def gen_sample(self):
"""
Generates an array sample and returns it as a MathArray.
This calls generate_sample, which is the routine that should be subclassed if
needed, rather than this one.
"""
array = self.generate_sample()
return MathArray(array)
def generate_sample(self):
"""
Generates a random array of shape and norm determined by config. After
generation, the apply_symmetry and normalize functions are applied to the result.
These functions may be shadowed by a subclass.
If apply_symmetry or normalize raise the Retry exception, a new sample is
generated, and the procedure starts anew.
Returns a numpy array.
"""
# Loop until a good sample is found
loops = 0
while loops < 100:
loops += 1
# Construct an array with entries in [-0.5, 0.5)
array = np.random.random_sample(self.config['shape']) - 0.5
# Make the array complex if needed
if self.config['complex']:
imarray = np.random.random_sample(self.config['shape']) - 0.5
array = array + 1j*imarray
try:
# Apply any symmetries to the array
array = self.apply_symmetry(array)
# Normalize the result
array = self.normalize(array)
# Return the result
return array
except Retry:
continue
raise ValueError('Unable to construct sample for {}'
.format(type(self).__name__)) # pragma: no cover
def apply_symmetry(self, array):
"""
Applies the required symmetries to the array.
This method exists to be shadowed by subclasses.
"""
return array
def normalize(self, array):
"""
Normalizes the array to fall into the desired norm.
This method can be shadowed by subclasses.
"""
actual_norm = np.linalg.norm(array)
desired_norm = self.norm.gen_sample()
return array * desired_norm / actual_norm
class VectorSamplingSet(ArraySamplingSet):
"""
Sampling set of vectors. While we cannot make this class abstract, you should use
RealVectors or ComplexVectors instead.
Config:
=======
Same as ArraySamplingSet, but:
- shape can be a plain integer indicating number of components
- if shape is tuple/list, must have length 1
- default shape is (3, ), for a 3D vector
"""
schema_config = ArraySamplingSet.schema_config.extend({
Required('shape', default=(3,)): is_shape_specification(min_dim=1, max_dim=1)
})
class RealVectors(VectorSamplingSet):
"""
Sampling set of real vectors.
Config:
=======
Same as VectorSamplingSet, but:
- complex is always False
Usage:
======
By default, vectors have 3 components:
>>> vectors = RealVectors()
>>> vectors.gen_sample().shape
(3,)
"""
schema_config = VectorSamplingSet.schema_config.extend({
Required('complex', default=False): False
})
class ComplexVectors(VectorSamplingSet):
"""
Sampling set of complex vectors.
Config:
=======
Same as VectorSamplingSet, but:
- complex is always True
Usage:
======
Complex vectors have complex components:
>>> vectors = ComplexVectors()
>>> v = vectors.gen_sample()
>>> np.array_equal(v, np.conj(v))
False
"""
schema_config = VectorSamplingSet.schema_config.extend({
Required('complex', default=True): True
})
class TensorSamplingSet(ArraySamplingSet):
"""
Sampling set of tensors. While we cannot make this class abstract, you should use
RealTensors or ComplexTensors instead.
Config:
=======
Same as ArraySamplingSet, but:
- shape must be a tuple with at least 3 dimensions
"""
schema_config = ArraySamplingSet.schema_config.extend({
Required('shape'): is_shape_specification(min_dim=3)
})
class RealTensors(TensorSamplingSet):
"""
Sampling set of real tensors.
Config:
=======
Same as TensorSamplingSet, but:
- complex is always False
Usage:
======
Sample tensors with shape [4, 2, 5]:
>>> real_tensors = RealTensors(shape=[4, 2, 5])
>>> sample = real_tensors.gen_sample()
>>> sample.shape
(4, 2, 5)
Samples are of class MathArray:
>>> isinstance(sample, MathArray)
True
Specify a range for the tensor's norm:
>>> real_tensors = RealTensors(shape=[4, 2, 5], norm=[10, 20])
>>> sample = real_tensors.gen_sample()
>>> 10 < np.linalg.norm(sample) < 20
True
"""
schema_config = TensorSamplingSet.schema_config.extend({
Required('complex', default=False): False
})
class ComplexTensors(TensorSamplingSet):
"""
Sampling set of complex tensors.
Config:
=======
Same as TensorSamplingSet, but:
- complex is always True
Usage:
======
Sample tensors with shape [4, 2, 5]:
>>> tensors = ComplexTensors(shape=[4, 2, 5])
>>> t = tensors.gen_sample()
>>> t.shape
(4, 2, 5)
Complex tensors have complex components:
>>> np.array_equal(t, np.conj(t))
False
"""
schema_config = TensorSamplingSet.schema_config.extend({
Required('complex', default=True): True
})
class MatrixSamplingSet(ArraySamplingSet):
"""
Base sampling set of matrices. While we cannot make this class abstract, you should
use a more specific subclass instead.
Config:
=======
Same as ArraySamplingSet, but:
- shape must be a tuple/list with length 2
- default shape is (2, 2), for a 2x2 matrix
"""
schema_config = ArraySamplingSet.schema_config.extend({
Required('shape', default=(2, 2)): is_shape_specification(min_dim=2, max_dim=2)
})
class GeneralMatrices(MatrixSamplingSet):
"""
Base sampling set of general matrices. While we cannot make this class abstract, you
should use RealMatrices or ComplexMatrices instead.
Config:
=======
Same as MatrixSamplingSet, but:
- triangular (None, 'upper', 'lower'): Specify if you want a triangular
matrix (default None)
"""
schema_config = MatrixSamplingSet.schema_config.extend({
Required('triangular', default=None): Any(None, 'upper', 'lower')
})
def apply_symmetry(self, array):
"""Impose the triangular requirement on the array"""
if self.config['triangular'] == 'upper':
return np.triu(array)
elif self.config['triangular'] == 'lower':
return np.tril(array)
return array
class RealMatrices(GeneralMatrices):
"""
Sampling set of real matrices.
Config:
=======
Same as GeneralMatrices, but:
- complex is always False
Usage:
======
By default, matrices have two rows and two columns:
>>> matrices = RealMatrices()
>>> matrices.gen_sample().shape
(2, 2)
We can generate upper triangular matrices:
>>> from mitxgraders.helpers.calc import within_tolerance
>>> matrices = RealMatrices(triangular='upper')
>>> m = matrices.gen_sample()
>>> within_tolerance(m, MathArray(np.triu(m)), 0)
True
and lower triangular matrices:
>>> matrices = RealMatrices(triangular='lower')
>>> m = matrices.gen_sample()
>>> within_tolerance(m, MathArray(np.tril(m)), 0)
True
"""
schema_config = GeneralMatrices.schema_config.extend({
Required('complex', default=False): False
})
class ComplexMatrices(GeneralMatrices):
"""
Sampling set of complex matrices.
Config:
=======
Same as GeneralMatrices, but:
- complex is always True
Usage:
======
Complex matrices have complex components:
>>> matrices = ComplexMatrices()
>>> m = matrices.gen_sample()
>>> np.array_equal(m, np.conj(m))
False
"""
schema_config = GeneralMatrices.schema_config.extend({
Required('complex', default=True): True
})
class SquareMatrixSamplingSet(MatrixSamplingSet):
"""
Base sampling set of square matrices. While we cannot make this class abstract, you
want to use a subclass instead (likely SquareMatrices).
Config:
=======
Same as MatrixSamplingSet, but:
- dimension (int): Dimension of the matrix (minimum 2).
The 'shape' property is not used.
"""
schema_config = MatrixSamplingSet.schema_config.extend({
Required('shape', default=None): None,
Required('dimension', default=2): All(int, Range(2, float('inf')))
})
def __init__(self, config=None, **kwargs):
"""
Configure the class as normal, then modify the shape appropriately
"""
super(SquareMatrixSamplingSet, self).__init__(config, **kwargs)
self.config['shape'] = (self.config['dimension'], self.config['dimension'])
class IdentityMatrixMultiples(SquareMatrixSamplingSet):
"""
Class representing a collection of multiples of the identity matrix
of a given dimension.
Config:
=======
Same as MatrixSamplingSet, but:
- sampler: A scalar sampling set for the multiplicative constant
(default RealInterval([1, 5]))
Note that the 'complex' and 'norm' properties are ignored.
Usage:
======
By default, we generate 2x2 matrices:
>>> matrices = IdentityMatrixMultiples()
>>> matrices.gen_sample().shape
(2, 2)
We can generate NxN matrices by specifying the dimension:
>>> matrices = IdentityMatrixMultiples(dimension=4)
>>> matrices.gen_sample().shape
(4, 4)
The scalar multiple can be generated in a number of ways:
>>> from mitxgraders import ComplexSector
>>> matrices = IdentityMatrixMultiples(sampler=[1,3])
>>> sect = ComplexSector(modulus=[0,1], argument=[-np.pi,np.pi])
>>> matrices = IdentityMatrixMultiples(sampler=sect)
The resulting samples are simply a scalar times the identity matrix:
>>> matrices = IdentityMatrixMultiples()
>>> m = matrices.gen_sample()
>>> np.array_equal(m, m[0, 0] * np.eye(2))
True
"""
# Sampling set for the multiplicative constant
# Accept anything that FormulaGrader would accept for a sampling set, restricted to
# scalar sampling sets. Hence, ScalarSamplingSets and ranges are allowed.
# Note: Does not support DependentSampler or DiscreteSet, as they are not guaranteed
# to return a scalar value.
schema_config = SquareMatrixSamplingSet.schema_config.extend({
Required('sampler', default=RealInterval()): Any(ScalarSamplingSet,
All(list, Coerce(RealInterval)))
})
def generate_sample(self):
"""
Generates an identity matrix of specified dimension multiplied by a random scalar
"""
# Sample the multiplicative constant
scaling = self.config['sampler'].gen_sample()
# Create the numpy matrix
array = scaling * np.eye(self.config['dimension'])
# Return the result
return array
class SquareMatrices(SquareMatrixSamplingSet):
"""
Sampling set for square matrices. Various symmetry properties are possible, including
diagonal, symmetric, antisymmetric, hermitian and antihermitian. The trace and
determinant can also be controlled.
There are four kinds of special square matrices that covered by other sampling sets:
* OrthogonalMatrices
* UnitaryMatrices
* Multiples of the identity (use IdentityMatrixMultiples)
* Triangular matrices (use RealMatrices or ComplexMatrices)
Our approach to generating these matrices is to first generate a random real/complex
matrix of the appropriate shape, and then enforce, in order:
* diagonal/symmetric/antisymmetric/hermitian/antihermitian
* tracelessness
* determinant 0 or 1
* norm (if determinant != 1)
The determinant step is sometimes problematic. To achieve unit determinant, we attempt
to rescale the matrix. This can't always be done, and we try a new random generation
in such cases. To achieve zero determinant, we attempt to subtract lambda*I from the
matrix. This can't be done for traceless matrices while preserving those properties,
and we also can't handle zero determinant antisymmetric matrices that are complex, or
real in even dimensions.
Some special cases that don't exist:
* Real, diagonal, traceless, unit determinant, 2x2 matrix
* Real, symmetric, traceless, unit determinant, 2x2 matrix
* Hermitian, traceless, unit determinant, 2x2 matrix
* Odd-dimension, unit-determinant antisymmetric matrix
* Odd-dimension, unit-determinant antihermitian matrix
Config:
=======
Same as SquareMatrixSamplingSet, but:
- symmetry (None, 'diagonal', 'symmetric', 'antisymmetric',
'hermitian', 'antihermitian'): Entry describing the desired
symmetry of the matrix. Note: If 'hermitian' or 'antihermitian'
are chosen, 'complex' is set to True. (default None)
- traceless (bool): Whether or not to ensure the matrix is traceless
(default False)
- determinant (None, 0, 1): If set to 0 or 1, sets the determinant of the
matrix to be 0 or 1 correspondingly. If None or 0, uses 'norm' to
normalize the matrix.
Usage:
======
By default, we generate real 2x2 matrices with no symmetry:
>>> matrices = SquareMatrices()
>>> mat = matrices.gen_sample()
>>> mat.shape
(2, 2)
>>> np.array_equal(mat, np.conj(mat))
True
We can make it NxN by specifying the dimension:
>>> matrices = SquareMatrices(dimension=4)
>>> matrices.gen_sample().shape
(4, 4)
Some combinations: diagonal, complex, traceless and unit determinant
>>> from mitxgraders.helpers.calc import within_tolerance
>>> matrices = SquareMatrices(symmetry='diagonal', complex=True, traceless=True,
... determinant=1)
>>> mat = matrices.gen_sample()
>>> np.array_equal(np.diag(np.diag(mat)), mat) # Diagonal
True
>>> np.array_equal(mat, np.conj(mat)) # Complex
False
>>> within_tolerance(mat.trace(), 0, 5e-13) # Traceless
True
>>> within_tolerance(np.linalg.det(mat), 1, 1e-12) # Unit determinant
True
More combinations: symmetric, real, zero determinant and norm in [6, 10]
>>> matrices = SquareMatrices(symmetry='symmetric', determinant=0, norm=[6, 10])
>>> mat = matrices.gen_sample()
>>> np.array_equal(mat, mat.T) # Symmetric
True
>>> np.array_equal(mat, np.conj(mat)) # Real
True
>>> within_tolerance(np.linalg.det(mat), 0, 1e-12) # Zero determinant
True
>>> 6 <= np.linalg.norm(mat) <= 10 # Norm in [6, 10]
True
More combinations: antisymmetric and complex
>>> matrices = SquareMatrices(symmetry='antisymmetric', complex=True)
>>> mat = matrices.gen_sample()
>>> np.array_equal(mat, -mat.T) # Antisymmetric
True
>>> np.array_equal(mat, np.conj(mat)) # Complex
False
More combinations: hermitian (enforces complex), zero determinant and norm in [6, 10]
>>> matrices = SquareMatrices(symmetry='hermitian', determinant=0, norm=[6, 10])
>>> mat = matrices.gen_sample()
>>> np.array_equal(mat, np.conj(mat.T)) # Hermitian
True
>>> within_tolerance(np.linalg.det(mat), 0, 1e-12) # Zero determinant
True
>>> 6 <= np.linalg.norm(mat) <= 10 # Norm in [6, 10]
True
More combinations: antihermitian (enforces complex), unit determinant and traceless
>>> matrices = SquareMatrices(symmetry='antihermitian', determinant=1, traceless=True)
>>> mat = matrices.gen_sample()
>>> np.array_equal(mat, -np.conj(mat.T)) # Antihermitian
True
>>> within_tolerance(np.linalg.det(mat), 1, 1e-12) # Unit determinant
True
>>> within_tolerance(mat.trace(), 0, 5e-13) # Traceless
True
"""
schema_config = SquareMatrixSamplingSet.schema_config.extend({
Required('symmetry', default=None): Any(None, 'diagonal', 'symmetric',
'antisymmetric', 'hermitian',
'antihermitian'),
Required('traceless', default=False): bool,
Required('determinant', default=None): Any(None, 0, 1)
})
def __init__(self, config=None, **kwargs):
"""
Configure the class as normal, then set complex for hermitian/antihermitian
"""
super(SquareMatrices, self).__init__(config, **kwargs)
if self.config['symmetry'] in ['hermitian', 'antihermitian']:
self.config['complex'] = True
# A couple of cases that are possible but we can't handle:
if self.config['determinant'] == 0:
if self.config['traceless']:
raise ConfigError("Unable to generate zero determinant traceless matrices")
if self.config['symmetry'] == 'antisymmetric':
# Real antisymmetric matrices in odd dimension automatically have zero determinant
if self.config['complex']:
raise ConfigError("Unable to generate complex zero determinant antisymmetric matrices")
if self.config['dimension'] % 2 == 0:
raise ConfigError("Unable to generate real zero determinant antisymmetric matrices in even dimensions")
# And a handful of cases that don't exist
if self.config['determinant'] == 1:
if self.config['dimension'] == 2 and self.config['traceless']:
if self.config['symmetry'] == 'diagonal' and not self.config['complex']:
raise ConfigError("No real, traceless, unit-determinant, diagonal 2x2 matrix exists")
elif self.config['symmetry'] == 'symmetric' and not self.config['complex']:
raise ConfigError("No real, traceless, unit-determinant, symmetric 2x2 matrix exists")
elif self.config['symmetry'] == 'hermitian':
raise ConfigError("No traceless, unit-determinant, Hermitian 2x2 matrix exists")
if self.config['dimension'] % 2 == 1: # Odd dimension
if self.config['symmetry'] == 'antisymmetric':
# Eigenvalues are all imaginary, so determinant is imaginary
raise ConfigError("No unit-determinant antisymmetric matrix exists in odd dimensions")
if self.config['symmetry'] == 'antihermitian':
# Eigenvalues are all imaginary, so determinant is imaginary
raise ConfigError("No unit-determinant antihermitian matrix exists in odd dimensions")
def apply_symmetry(self, array):
"""
Applies the required symmetries to the array
"""
# Apply the symmetry property
if self.config['symmetry'] == 'diagonal':
working = np.diag(np.diag(array))
elif self.config['symmetry'] == 'symmetric':
working = array + array.transpose()
elif self.config['symmetry'] == 'antisymmetric':
working = array - array.transpose()
elif self.config['symmetry'] == 'hermitian':
working = array + np.conj(array.transpose())
elif self.config['symmetry'] == 'antihermitian':
working = array - np.conj(array.transpose())
else:
working = array
# Apply the traceless property
if self.config['traceless']:
trace = np.trace(working)
dim = self.config['dimension']
working = working - trace / dim * np.eye(dim)
return working
def normalize(self, array):
"""
Set either the norm or determinant of the matrix to the desired value.
"""
if self.config['determinant'] == 1:
# No need to normalize
return self.make_det_one(array)
elif self.config['determinant'] == 0:
array = self.make_det_zero(array)
return super(SquareMatrices, self).normalize(array)
def make_det_one(self, array):
"""
Scale an array to have unit determinant, or raise Retry if not possible.
Note that odd-dimensional antisymmetric and antihermitian symmetries should
not be able to make it to here, as their determinants are always purely imaginary.
"""
assert not (self.config['symmetry'] in ['antisymmetric', 'antihermitian']
and self.config['dimension'] % 2 == 1)
# Compute the determinant
det = np.linalg.det(array)
# Is the determinant guaranteed to be real?
if (not self.config['complex']
or self.config['symmetry'] in ['hermitian', 'antihermitian']):
det = np.real(det) # Get rid of numerical error
if det > 0:
# This is the easy case: Just scale the determinant
return array / np.power(det, 1/self.config['dimension'])
elif self.config['dimension'] % 2 == 1 and det < 0:
# Odd-dimension matrices can also have their determinant scaled
return - array / np.power(-det, 1/self.config['dimension'])
else:
# Can't rescale our way out of this one
raise Retry()
elif (self.config['symmetry'] in [None, 'diagonal', 'symmetric', 'antisymmetric']
and self.config['complex']):
# Check to ensure that det isn't 0 before we get a division by zero
if np.abs(det) < 5e-13:
raise Retry() # pragma: no cover
# Complex matrices are easy: we can just rescale the matrix
return array / np.power(det + 0.0j, 1/self.config['dimension'])
# We should never get here
raise ValueError('Unknown class configuration') # pragma: no cover
def make_det_zero(self, array):
"""Modify an array to have zero determinant, or raise Retry if not possible"""
if np.abs(np.linalg.det(array)) < 5e-13:
# This is close enough to zero for our purposes!
# This occurs for real, antisymmetric matrices in odd dimensions, for example.
return array
# Pick a random number!
index = np.random.randint(self.config['dimension'])
# What's our symmetry?
if self.config['symmetry'] == 'diagonal':
# Choose a random diagonal entry to be zero
array[index, index] = 0
return array
elif ((self.config['symmetry'] == 'symmetric' and not self.config['complex'])
or self.config['symmetry'] == 'hermitian'):
# Eigenvalues are all real - use special algorithm to compute eigenvalues
eigenvalues = np.linalg.eigvalsh(array)
eigenvalue = np.real(eigenvalues[index])
elif self.config['symmetry'] == 'antihermitian':
# Eigenvalues are all imaginary
# Temporarily convert the matrix into a hermitian matrix
# and use the special algorithm
eigenvalues = np.linalg.eigvalsh(1j * array)
eigenvalue = -1j * np.real(eigenvalues[index])
else:
# No relevant symmetry. Use a general algorithm to compute eigenvalues.
eigenvalues = np.linalg.eigvals(array)
if not self.config['complex']:
# We need to select a real eigenvalue.
idxs = np.where(np.abs(np.imag(eigenvalues)) < 5e-13)[0]
# idxs now stores any indices that have real eigenvalues
if len(idxs) == 0:
# No real eigenvalues. Try again.
raise Retry() # pragma: no cover
# np.random.choice was introduced in 1.7.0; edX has 1.6.0
take = np.random.randint(len(idxs))
index = idxs[take]
eigenvalue = np.real(eigenvalues[index])
else:
eigenvalue = eigenvalues[index]
# Subtract the eigenvalue from the array
return array - np.eye(self.config['dimension']) * eigenvalue
class OrthogonalMatrices(SquareMatrixSamplingSet):
"""
Sampling set for orthogonal matrices.
Note: This will only work with scipy 0.18 and numpy 1.7.1, which requires the python3
implementation of edX.
Config:
=======
Same as SquareMatrixSamplingSet, but:
- unitdet (bool): Boolean specifying whether to sample from unit determinant
matrices SO(n) (True) or arbitrary determinant matrices O(n) (False, default)
The options 'complex' and 'norm' are ignored.
Usage:
======
>>> import six, pytest
>>> if six.PY2:
... pytest.skip('These doctests only work in python 3')
By default, we generate 2x2 matrices:
>>> matrices = OrthogonalMatrices()
>>> matrices.gen_sample().shape
(2, 2)
We can generate NxN matrices by specifying the dimension:
>>> matrices = OrthogonalMatrices(dimension=4)
>>> matrices.gen_sample().shape
(4, 4)
If unitdet is specified, the determinant is 1:
>>> from mitxgraders.helpers.calc import within_tolerance
>>> matrices = OrthogonalMatrices(unitdet=True)
>>> within_tolerance(np.linalg.det(matrices.gen_sample()), 1, 5e-13)
True
Otherwise, it could be +1 or -1.
The resulting samples are orthogonal matrices:
>>> matrices = OrthogonalMatrices(unitdet=True)
>>> m = matrices.gen_sample()
>>> within_tolerance(m * np.transpose(m), MathArray(np.eye(2)), 5e-13)
True
>>> matrices = OrthogonalMatrices(unitdet=False)
>>> m = matrices.gen_sample()
>>> within_tolerance(m * np.transpose(m), MathArray(np.eye(2)), 5e-13)
True
"""
schema_config = SquareMatrixSamplingSet.schema_config.extend({
Required('unitdet', default=False): bool
})
def generate_sample(self):
"""
Generates an orthogonal matrix
"""
# Generate the array
if self.config['unitdet']:
array = special_ortho_group.rvs(self.config['dimension'])
else:
array = ortho_group.rvs(self.config['dimension'])
# Return the result
return array
class UnitaryMatrices(SquareMatrixSamplingSet):
"""
Sampling set for unitary matrices.
Note: This will only work with scipy 0.18 and numpy 1.7.1, which requires the python3
implementation of edX.
Config:
=======
Same as SquareMatrixSamplingSet, but:
- unitdet (bool): Boolean specifying whether to sample from unit determinant
matrices SU(n) (True) or arbitrary determinant matrices U(n) (False, default)
The options 'complex' and 'norm' are ignored.
Usage:
======
>>> import six, pytest
>>> if six.PY2:
... pytest.skip('These doctests only work in python 3')
By default, we generate 2x2 matrices:
>>> matrices = UnitaryMatrices()
>>> matrices.gen_sample().shape
(2, 2)
We can generate NxN matrices by specifying the dimension:
>>> matrices = UnitaryMatrices(dimension=4)
>>> matrices.gen_sample().shape
(4, 4)
If unitdet is specified, the determinant is 1:
>>> from mitxgraders.helpers.calc import within_tolerance
>>> matrices = UnitaryMatrices(unitdet=True)
>>> within_tolerance(np.linalg.det(matrices.gen_sample()), 1, 5e-13)
True
Otherwise, it's typically not (though it could randomly be):
>>> matrices = UnitaryMatrices(unitdet=False)
>>> within_tolerance(np.linalg.det(matrices.gen_sample()), 1, 5e-13)
False
The resulting samples are unitary matrices:
>>> matrices = UnitaryMatrices(unitdet=True)
>>> m = matrices.gen_sample()
>>> within_tolerance(m * np.conjugate(np.transpose(m)), MathArray(np.eye(2)), 5e-13)
True
>>> matrices = UnitaryMatrices(unitdet=False)
>>> m = matrices.gen_sample()
>>> within_tolerance(m * np.conjugate(np.transpose(m)), MathArray(np.eye(2)), 5e-13)
True
"""
schema_config = SquareMatrixSamplingSet.schema_config.extend({
Required('unitdet', default=False): bool
})
def generate_sample(self):
"""
Generates an orthogonal matrix as appropriate
"""
# Generate the array
array = unitary_group.rvs(self.config['dimension'])
# Fix the determinant if need be
if self.config['unitdet']:
det = np.linalg.det(array)
array /= det**(1/self.config['dimension'])
# Return the result
return array
| """
matrixsampling.py
Contains classes for sampling vector/matrix/tensor values:
* RealVectors
* ComplexVectors
* RealMatrices
* ComplexMatrices
* RealTensors
* ComplexTensors
* IdentityMatrixMultiples
* SquareMatrices
* OrthogonalMatrices
* UnitaryMatrices
All of these classes perform random sampling. To obtain a sample, use class.gen_sample()
"""
from __future__ import print_function, division, absolute_import, unicode_literals
import numpy as np
from voluptuous import Schema, Required, All, Coerce, Any, Range
from mitxgraders.exceptions import ConfigError
from mitxgraders.sampling import VariableSamplingSet, RealInterval, ScalarSamplingSet
from mitxgraders.helpers.validatorfuncs import NumberRange, is_shape_specification
from mitxgraders.helpers.calc import MathArray
class Unavailable(object):
def rvs(self, dimension):
raise NotImplementedError('This feature requires newer versions of numpy '
'and scipy than are available.')
try:
from scipy.stats import ortho_group, special_ortho_group, unitary_group
except ImportError:
ortho_group = Unavailable()
special_ortho_group = Unavailable()
unitary_group = Unavailable()
# Set the objects to be imported from this grader
__all__ = [
"RealVectors",
"ComplexVectors",
"RealMatrices",
"ComplexMatrices",
"RealTensors",
"ComplexTensors",
"IdentityMatrixMultiples",
"SquareMatrices",
"OrthogonalMatrices",
"UnitaryMatrices"
]
class Retry(Exception):
"""
Raised to indicate that the randomly generated array cannot satisfy the desired
constraints, and a new random draw should be taken.
"""
class ArraySamplingSet(VariableSamplingSet):
"""
Represents a set from which random array variable samples are taken.
The norm used is standard Euclidean norm: root-square-sum of all entries in the array.
This is the most low-level array sampling set we have, and is subclassed for various
specific purposes. While we cannot make this class abstract, we strongly discourage
its use.
Config:
=======
- shape (int|(int)|[int]): Dimensions of the array, specified as a list or tuple of
the dimensions in each index as (n_1, n_2, ...). Can also use an integer
to select a vector of that length. (required; no default)
- norm ([start, stop]): Range for the overall norm of the array. Can be a
list [start, stop] or a dictionary {'start':start, 'stop':stop}.
(default [1, 5])
- complex (bool): Whether or not the matrix is complex (default False)
"""
schema_config = Schema({
Required('shape'): is_shape_specification(min_dim=1),
Required('norm', default=[1, 5]): NumberRange(),
Required('complex', default=False): bool
})
def __init__(self, config=None, **kwargs):
"""
Configure the class as normal, then set up norm as a RealInterval
"""
super(ArraySamplingSet, self).__init__(config, **kwargs)
self.norm = RealInterval(self.config['norm'])
def gen_sample(self):
"""
Generates an array sample and returns it as a MathArray.
This calls generate_sample, which is the routine that should be subclassed if
needed, rather than this one.
"""
array = self.generate_sample()
return MathArray(array)
def generate_sample(self):
"""
Generates a random array of shape and norm determined by config. After
generation, the apply_symmetry and normalize functions are applied to the result.
These functions may be shadowed by a subclass.
If apply_symmetry or normalize raise the Retry exception, a new sample is
generated, and the procedure starts anew.
Returns a numpy array.
"""
# Loop until a good sample is found
loops = 0
while loops < 100:
loops += 1
# Construct an array with entries in [-0.5, 0.5)
array = np.random.random_sample(self.config['shape']) - 0.5
# Make the array complex if needed
if self.config['complex']:
imarray = np.random.random_sample(self.config['shape']) - 0.5
array = array + 1j*imarray
try:
# Apply any symmetries to the array
array = self.apply_symmetry(array)
# Normalize the result
array = self.normalize(array)
# Return the result
return array
except Retry:
continue
raise ValueError('Unable to construct sample for {}'
.format(type(self).__name__)) # pragma: no cover
def apply_symmetry(self, array):
"""
Applies the required symmetries to the array.
This method exists to be shadowed by subclasses.
"""
return array
def normalize(self, array):
"""
Normalizes the array to fall into the desired norm.
This method can be shadowed by subclasses.
"""
actual_norm = np.linalg.norm(array)
desired_norm = self.norm.gen_sample()
return array * desired_norm / actual_norm
class VectorSamplingSet(ArraySamplingSet):
"""
Sampling set of vectors. While we cannot make this class abstract, you should use
RealVectors or ComplexVectors instead.
Config:
=======
Same as ArraySamplingSet, but:
- shape can be a plain integer indicating number of components
- if shape is tuple/list, must have length 1
- default shape is (3, ), for a 3D vector
"""
schema_config = ArraySamplingSet.schema_config.extend({
Required('shape', default=(3,)): is_shape_specification(min_dim=1, max_dim=1)
})
class RealVectors(VectorSamplingSet):
"""
Sampling set of real vectors.
Config:
=======
Same as VectorSamplingSet, but:
- complex is always False
Usage:
======
By default, vectors have 3 components:
>>> vectors = RealVectors()
>>> vectors.gen_sample().shape
(3,)
"""
schema_config = VectorSamplingSet.schema_config.extend({
Required('complex', default=False): False
})
class ComplexVectors(VectorSamplingSet):
"""
Sampling set of complex vectors.
Config:
=======
Same as VectorSamplingSet, but:
- complex is always True
Usage:
======
Complex vectors have complex components:
>>> vectors = ComplexVectors()
>>> v = vectors.gen_sample()
>>> np.array_equal(v, np.conj(v))
False
"""
schema_config = VectorSamplingSet.schema_config.extend({
Required('complex', default=True): True
})
class TensorSamplingSet(ArraySamplingSet):
"""
Sampling set of tensors. While we cannot make this class abstract, you should use
RealTensors or ComplexTensors instead.
Config:
=======
Same as ArraySamplingSet, but:
- shape must be a tuple with at least 3 dimensions
"""
schema_config = ArraySamplingSet.schema_config.extend({
Required('shape'): is_shape_specification(min_dim=3)
})
class RealTensors(TensorSamplingSet):
"""
Sampling set of real tensors.
Config:
=======
Same as TensorSamplingSet, but:
- complex is always False
Usage:
======
Sample tensors with shape [4, 2, 5]:
>>> real_tensors = RealTensors(shape=[4, 2, 5])
>>> sample = real_tensors.gen_sample()
>>> sample.shape
(4, 2, 5)
Samples are of class MathArray:
>>> isinstance(sample, MathArray)
True
Specify a range for the tensor's norm:
>>> real_tensors = RealTensors(shape=[4, 2, 5], norm=[10, 20])
>>> sample = real_tensors.gen_sample()
>>> 10 < np.linalg.norm(sample) < 20
True
"""
schema_config = TensorSamplingSet.schema_config.extend({
Required('complex', default=False): False
})
class ComplexTensors(TensorSamplingSet):
"""
Sampling set of complex tensors.
Config:
=======
Same as TensorSamplingSet, but:
- complex is always True
Usage:
======
Sample tensors with shape [4, 2, 5]:
>>> tensors = ComplexTensors(shape=[4, 2, 5])
>>> t = tensors.gen_sample()
>>> t.shape
(4, 2, 5)
Complex tensors have complex components:
>>> np.array_equal(t, np.conj(t))
False
"""
schema_config = TensorSamplingSet.schema_config.extend({
Required('complex', default=True): True
})
class MatrixSamplingSet(ArraySamplingSet):
"""
Base sampling set of matrices. While we cannot make this class abstract, you should
use a more specific subclass instead.
Config:
=======
Same as ArraySamplingSet, but:
- shape must be a tuple/list with length 2
- default shape is (2, 2), for a 2x2 matrix
"""
schema_config = ArraySamplingSet.schema_config.extend({
Required('shape', default=(2, 2)): is_shape_specification(min_dim=2, max_dim=2)
})
class GeneralMatrices(MatrixSamplingSet):
"""
Base sampling set of general matrices. While we cannot make this class abstract, you
should use RealMatrices or ComplexMatrices instead.
Config:
=======
Same as MatrixSamplingSet, but:
- triangular (None, 'upper', 'lower'): Specify if you want a triangular
matrix (default None)
"""
schema_config = MatrixSamplingSet.schema_config.extend({
Required('triangular', default=None): Any(None, 'upper', 'lower')
})
def apply_symmetry(self, array):
"""Impose the triangular requirement on the array"""
if self.config['triangular'] == 'upper':
return np.triu(array)
elif self.config['triangular'] == 'lower':
return np.tril(array)
return array
class RealMatrices(GeneralMatrices):
"""
Sampling set of real matrices.
Config:
=======
Same as GeneralMatrices, but:
- complex is always False
Usage:
======
By default, matrices have two rows and two columns:
>>> matrices = RealMatrices()
>>> matrices.gen_sample().shape
(2, 2)
We can generate upper triangular matrices:
>>> from mitxgraders.helpers.calc import within_tolerance
>>> matrices = RealMatrices(triangular='upper')
>>> m = matrices.gen_sample()
>>> within_tolerance(m, MathArray(np.triu(m)), 0)
True
and lower triangular matrices:
>>> matrices = RealMatrices(triangular='lower')
>>> m = matrices.gen_sample()
>>> within_tolerance(m, MathArray(np.tril(m)), 0)
True
"""
schema_config = GeneralMatrices.schema_config.extend({
Required('complex', default=False): False
})
class ComplexMatrices(GeneralMatrices):
"""
Sampling set of complex matrices.
Config:
=======
Same as GeneralMatrices, but:
- complex is always True
Usage:
======
Complex matrices have complex components:
>>> matrices = ComplexMatrices()
>>> m = matrices.gen_sample()
>>> np.array_equal(m, np.conj(m))
False
"""
schema_config = GeneralMatrices.schema_config.extend({
Required('complex', default=True): True
})
class SquareMatrixSamplingSet(MatrixSamplingSet):
"""
Base sampling set of square matrices. While we cannot make this class abstract, you
want to use a subclass instead (likely SquareMatrices).
Config:
=======
Same as MatrixSamplingSet, but:
- dimension (int): Dimension of the matrix (minimum 2).
The 'shape' property is not used.
"""
schema_config = MatrixSamplingSet.schema_config.extend({
Required('shape', default=None): None,
Required('dimension', default=2): All(int, Range(2, float('inf')))
})
def __init__(self, config=None, **kwargs):
"""
Configure the class as normal, then modify the shape appropriately
"""
super(SquareMatrixSamplingSet, self).__init__(config, **kwargs)
self.config['shape'] = (self.config['dimension'], self.config['dimension'])
class IdentityMatrixMultiples(SquareMatrixSamplingSet):
"""
Class representing a collection of multiples of the identity matrix
of a given dimension.
Config:
=======
Same as MatrixSamplingSet, but:
- sampler: A scalar sampling set for the multiplicative constant
(default RealInterval([1, 5]))
Note that the 'complex' and 'norm' properties are ignored.
Usage:
======
By default, we generate 2x2 matrices:
>>> matrices = IdentityMatrixMultiples()
>>> matrices.gen_sample().shape
(2, 2)
We can generate NxN matrices by specifying the dimension:
>>> matrices = IdentityMatrixMultiples(dimension=4)
>>> matrices.gen_sample().shape
(4, 4)
The scalar multiple can be generated in a number of ways:
>>> from mitxgraders import ComplexSector
>>> matrices = IdentityMatrixMultiples(sampler=[1,3])
>>> sect = ComplexSector(modulus=[0,1], argument=[-np.pi,np.pi])
>>> matrices = IdentityMatrixMultiples(sampler=sect)
The resulting samples are simply a scalar times the identity matrix:
>>> matrices = IdentityMatrixMultiples()
>>> m = matrices.gen_sample()
>>> np.array_equal(m, m[0, 0] * np.eye(2))
True
"""
# Sampling set for the multiplicative constant
# Accept anything that FormulaGrader would accept for a sampling set, restricted to
# scalar sampling sets. Hence, ScalarSamplingSets and ranges are allowed.
# Note: Does not support DependentSampler or DiscreteSet, as they are not guaranteed
# to return a scalar value.
schema_config = SquareMatrixSamplingSet.schema_config.extend({
Required('sampler', default=RealInterval()): Any(ScalarSamplingSet,
All(list, Coerce(RealInterval)))
})
def generate_sample(self):
"""
Generates an identity matrix of specified dimension multiplied by a random scalar
"""
# Sample the multiplicative constant
scaling = self.config['sampler'].gen_sample()
# Create the numpy matrix
array = scaling * np.eye(self.config['dimension'])
# Return the result
return array
class SquareMatrices(SquareMatrixSamplingSet):
"""
Sampling set for square matrices. Various symmetry properties are possible, including
diagonal, symmetric, antisymmetric, hermitian and antihermitian. The trace and
determinant can also be controlled.
There are four kinds of special square matrices that covered by other sampling sets:
* OrthogonalMatrices
* UnitaryMatrices
* Multiples of the identity (use IdentityMatrixMultiples)
* Triangular matrices (use RealMatrices or ComplexMatrices)
Our approach to generating these matrices is to first generate a random real/complex
matrix of the appropriate shape, and then enforce, in order:
* diagonal/symmetric/antisymmetric/hermitian/antihermitian
* tracelessness
* determinant 0 or 1
* norm (if determinant != 1)
The determinant step is sometimes problematic. To achieve unit determinant, we attempt
to rescale the matrix. This can't always be done, and we try a new random generation
in such cases. To achieve zero determinant, we attempt to subtract lambda*I from the
matrix. This can't be done for traceless matrices while preserving those properties,
and we also can't handle zero determinant antisymmetric matrices that are complex, or
real in even dimensions.
Some special cases that don't exist:
* Real, diagonal, traceless, unit determinant, 2x2 matrix
* Real, symmetric, traceless, unit determinant, 2x2 matrix
* Hermitian, traceless, unit determinant, 2x2 matrix
* Odd-dimension, unit-determinant antisymmetric matrix
* Odd-dimension, unit-determinant antihermitian matrix
Config:
=======
Same as SquareMatrixSamplingSet, but:
- symmetry (None, 'diagonal', 'symmetric', 'antisymmetric',
'hermitian', 'antihermitian'): Entry describing the desired
symmetry of the matrix. Note: If 'hermitian' or 'antihermitian'
are chosen, 'complex' is set to True. (default None)
- traceless (bool): Whether or not to ensure the matrix is traceless
(default False)
- determinant (None, 0, 1): If set to 0 or 1, sets the determinant of the
matrix to be 0 or 1 correspondingly. If None or 0, uses 'norm' to
normalize the matrix.
Usage:
======
By default, we generate real 2x2 matrices with no symmetry:
>>> matrices = SquareMatrices()
>>> mat = matrices.gen_sample()
>>> mat.shape
(2, 2)
>>> np.array_equal(mat, np.conj(mat))
True
We can make it NxN by specifying the dimension:
>>> matrices = SquareMatrices(dimension=4)
>>> matrices.gen_sample().shape
(4, 4)
Some combinations: diagonal, complex, traceless and unit determinant
>>> from mitxgraders.helpers.calc import within_tolerance
>>> matrices = SquareMatrices(symmetry='diagonal', complex=True, traceless=True,
... determinant=1)
>>> mat = matrices.gen_sample()
>>> np.array_equal(np.diag(np.diag(mat)), mat) # Diagonal
True
>>> np.array_equal(mat, np.conj(mat)) # Complex
False
>>> within_tolerance(mat.trace(), 0, 5e-13) # Traceless
True
>>> within_tolerance(np.linalg.det(mat), 1, 1e-12) # Unit determinant
True
More combinations: symmetric, real, zero determinant and norm in [6, 10]
>>> matrices = SquareMatrices(symmetry='symmetric', determinant=0, norm=[6, 10])
>>> mat = matrices.gen_sample()
>>> np.array_equal(mat, mat.T) # Symmetric
True
>>> np.array_equal(mat, np.conj(mat)) # Real
True
>>> within_tolerance(np.linalg.det(mat), 0, 1e-12) # Zero determinant
True
>>> 6 <= np.linalg.norm(mat) <= 10 # Norm in [6, 10]
True
More combinations: antisymmetric and complex
>>> matrices = SquareMatrices(symmetry='antisymmetric', complex=True)
>>> mat = matrices.gen_sample()
>>> np.array_equal(mat, -mat.T) # Antisymmetric
True
>>> np.array_equal(mat, np.conj(mat)) # Complex
False
More combinations: hermitian (enforces complex), zero determinant and norm in [6, 10]
>>> matrices = SquareMatrices(symmetry='hermitian', determinant=0, norm=[6, 10])
>>> mat = matrices.gen_sample()
>>> np.array_equal(mat, np.conj(mat.T)) # Hermitian
True
>>> within_tolerance(np.linalg.det(mat), 0, 1e-12) # Zero determinant
True
>>> 6 <= np.linalg.norm(mat) <= 10 # Norm in [6, 10]
True
More combinations: antihermitian (enforces complex), unit determinant and traceless
>>> matrices = SquareMatrices(symmetry='antihermitian', determinant=1, traceless=True)
>>> mat = matrices.gen_sample()
>>> np.array_equal(mat, -np.conj(mat.T)) # Antihermitian
True
>>> within_tolerance(np.linalg.det(mat), 1, 1e-12) # Unit determinant
True
>>> within_tolerance(mat.trace(), 0, 5e-13) # Traceless
True
"""
schema_config = SquareMatrixSamplingSet.schema_config.extend({
Required('symmetry', default=None): Any(None, 'diagonal', 'symmetric',
'antisymmetric', 'hermitian',
'antihermitian'),
Required('traceless', default=False): bool,
Required('determinant', default=None): Any(None, 0, 1)
})
def __init__(self, config=None, **kwargs):
"""
Configure the class as normal, then set complex for hermitian/antihermitian
"""
super(SquareMatrices, self).__init__(config, **kwargs)
if self.config['symmetry'] in ['hermitian', 'antihermitian']:
self.config['complex'] = True
# A couple of cases that are possible but we can't handle:
if self.config['determinant'] == 0:
if self.config['traceless']:
raise ConfigError("Unable to generate zero determinant traceless matrices")
if self.config['symmetry'] == 'antisymmetric':
# Real antisymmetric matrices in odd dimension automatically have zero determinant
if self.config['complex']:
raise ConfigError("Unable to generate complex zero determinant antisymmetric matrices")
if self.config['dimension'] % 2 == 0:
raise ConfigError("Unable to generate real zero determinant antisymmetric matrices in even dimensions")
# And a handful of cases that don't exist
if self.config['determinant'] == 1:
if self.config['dimension'] == 2 and self.config['traceless']:
if self.config['symmetry'] == 'diagonal' and not self.config['complex']:
raise ConfigError("No real, traceless, unit-determinant, diagonal 2x2 matrix exists")
elif self.config['symmetry'] == 'symmetric' and not self.config['complex']:
raise ConfigError("No real, traceless, unit-determinant, symmetric 2x2 matrix exists")
elif self.config['symmetry'] == 'hermitian':
raise ConfigError("No traceless, unit-determinant, Hermitian 2x2 matrix exists")
if self.config['dimension'] % 2 == 1: # Odd dimension
if self.config['symmetry'] == 'antisymmetric':
# Eigenvalues are all imaginary, so determinant is imaginary
raise ConfigError("No unit-determinant antisymmetric matrix exists in odd dimensions")
if self.config['symmetry'] == 'antihermitian':
# Eigenvalues are all imaginary, so determinant is imaginary
raise ConfigError("No unit-determinant antihermitian matrix exists in odd dimensions")
def apply_symmetry(self, array):
"""
Applies the required symmetries to the array
"""
# Apply the symmetry property
if self.config['symmetry'] == 'diagonal':
working = np.diag(np.diag(array))
elif self.config['symmetry'] == 'symmetric':
working = array + array.transpose()
elif self.config['symmetry'] == 'antisymmetric':
working = array - array.transpose()
elif self.config['symmetry'] == 'hermitian':
working = array + np.conj(array.transpose())
elif self.config['symmetry'] == 'antihermitian':
working = array - np.conj(array.transpose())
else:
working = array
# Apply the traceless property
if self.config['traceless']:
trace = np.trace(working)
dim = self.config['dimension']
working = working - trace / dim * np.eye(dim)
return working
def normalize(self, array):
"""
Set either the norm or determinant of the matrix to the desired value.
"""
if self.config['determinant'] == 1:
# No need to normalize
return self.make_det_one(array)
elif self.config['determinant'] == 0:
array = self.make_det_zero(array)
return super(SquareMatrices, self).normalize(array)
def make_det_one(self, array):
"""
Scale an array to have unit determinant, or raise Retry if not possible.
Note that odd-dimensional antisymmetric and antihermitian symmetries should
not be able to make it to here, as their determinants are always purely imaginary.
"""
assert not (self.config['symmetry'] in ['antisymmetric', 'antihermitian']
and self.config['dimension'] % 2 == 1)
# Compute the determinant
det = np.linalg.det(array)
# Is the determinant guaranteed to be real?
if (not self.config['complex']
or self.config['symmetry'] in ['hermitian', 'antihermitian']):
det = np.real(det) # Get rid of numerical error
if det > 0:
# This is the easy case: Just scale the determinant
return array / np.power(det, 1/self.config['dimension'])
elif self.config['dimension'] % 2 == 1 and det < 0:
# Odd-dimension matrices can also have their determinant scaled
return - array / np.power(-det, 1/self.config['dimension'])
else:
# Can't rescale our way out of this one
raise Retry()
elif (self.config['symmetry'] in [None, 'diagonal', 'symmetric', 'antisymmetric']
and self.config['complex']):
# Check to ensure that det isn't 0 before we get a division by zero
if np.abs(det) < 5e-13:
raise Retry() # pragma: no cover
# Complex matrices are easy: we can just rescale the matrix
return array / np.power(det + 0.0j, 1/self.config['dimension'])
# We should never get here
raise ValueError('Unknown class configuration') # pragma: no cover
def make_det_zero(self, array):
"""Modify an array to have zero determinant, or raise Retry if not possible"""
if np.abs(np.linalg.det(array)) < 5e-13:
# This is close enough to zero for our purposes!
# This occurs for real, antisymmetric matrices in odd dimensions, for example.
return array
# Pick a random number!
index = np.random.randint(self.config['dimension'])
# What's our symmetry?
if self.config['symmetry'] == 'diagonal':
# Choose a random diagonal entry to be zero
array[index, index] = 0
return array
elif ((self.config['symmetry'] == 'symmetric' and not self.config['complex'])
or self.config['symmetry'] == 'hermitian'):
# Eigenvalues are all real - use special algorithm to compute eigenvalues
eigenvalues = np.linalg.eigvalsh(array)
eigenvalue = np.real(eigenvalues[index])
elif self.config['symmetry'] == 'antihermitian':
# Eigenvalues are all imaginary
# Temporarily convert the matrix into a hermitian matrix
# and use the special algorithm
eigenvalues = np.linalg.eigvalsh(1j * array)
eigenvalue = -1j * np.real(eigenvalues[index])
else:
# No relevant symmetry. Use a general algorithm to compute eigenvalues.
eigenvalues = np.linalg.eigvals(array)
if not self.config['complex']:
# We need to select a real eigenvalue.
idxs = np.where(np.abs(np.imag(eigenvalues)) < 5e-13)[0]
# idxs now stores any indices that have real eigenvalues
if len(idxs) == 0:
# No real eigenvalues. Try again.
raise Retry() # pragma: no cover
# np.random.choice was introduced in 1.7.0; edX has 1.6.0
take = np.random.randint(len(idxs))
index = idxs[take]
eigenvalue = np.real(eigenvalues[index])
else:
eigenvalue = eigenvalues[index]
# Subtract the eigenvalue from the array
return array - np.eye(self.config['dimension']) * eigenvalue
class OrthogonalMatrices(SquareMatrixSamplingSet):
"""
Sampling set for orthogonal matrices.
Note: This will only work with scipy 0.18 and numpy 1.7.1, which requires the python3
implementation of edX.
Config:
=======
Same as SquareMatrixSamplingSet, but:
- unitdet (bool): Boolean specifying whether to sample from unit determinant
matrices SO(n) (True) or arbitrary determinant matrices O(n) (False, default)
The options 'complex' and 'norm' are ignored.
Usage:
======
>>> import six, pytest
>>> if six.PY2:
... pytest.skip('These doctests only work in python 3')
By default, we generate 2x2 matrices:
>>> matrices = OrthogonalMatrices()
>>> matrices.gen_sample().shape
(2, 2)
We can generate NxN matrices by specifying the dimension:
>>> matrices = OrthogonalMatrices(dimension=4)
>>> matrices.gen_sample().shape
(4, 4)
If unitdet is specified, the determinant is 1:
>>> from mitxgraders.helpers.calc import within_tolerance
>>> matrices = OrthogonalMatrices(unitdet=True)
>>> within_tolerance(np.linalg.det(matrices.gen_sample()), 1, 5e-13)
True
Otherwise, it could be +1 or -1.
The resulting samples are orthogonal matrices:
>>> matrices = OrthogonalMatrices(unitdet=True)
>>> m = matrices.gen_sample()
>>> within_tolerance(m * np.transpose(m), MathArray(np.eye(2)), 5e-13)
True
>>> matrices = OrthogonalMatrices(unitdet=False)
>>> m = matrices.gen_sample()
>>> within_tolerance(m * np.transpose(m), MathArray(np.eye(2)), 5e-13)
True
"""
schema_config = SquareMatrixSamplingSet.schema_config.extend({
Required('unitdet', default=False): bool
})
def generate_sample(self):
"""
Generates an orthogonal matrix
"""
# Generate the array
if self.config['unitdet']:
array = special_ortho_group.rvs(self.config['dimension'])
else:
array = ortho_group.rvs(self.config['dimension'])
# Return the result
return array
class UnitaryMatrices(SquareMatrixSamplingSet):
"""
Sampling set for unitary matrices.
Note: This will only work with scipy 0.18 and numpy 1.7.1, which requires the python3
implementation of edX.
Config:
=======
Same as SquareMatrixSamplingSet, but:
- unitdet (bool): Boolean specifying whether to sample from unit determinant
matrices SU(n) (True) or arbitrary determinant matrices U(n) (False, default)
The options 'complex' and 'norm' are ignored.
Usage:
======
>>> import six, pytest
>>> if six.PY2:
... pytest.skip('These doctests only work in python 3')
By default, we generate 2x2 matrices:
>>> matrices = UnitaryMatrices()
>>> matrices.gen_sample().shape
(2, 2)
We can generate NxN matrices by specifying the dimension:
>>> matrices = UnitaryMatrices(dimension=4)
>>> matrices.gen_sample().shape
(4, 4)
If unitdet is specified, the determinant is 1:
>>> from mitxgraders.helpers.calc import within_tolerance
>>> matrices = UnitaryMatrices(unitdet=True)
>>> within_tolerance(np.linalg.det(matrices.gen_sample()), 1, 5e-13)
True
Otherwise, it's typically not (though it could randomly be):
>>> matrices = UnitaryMatrices(unitdet=False)
>>> within_tolerance(np.linalg.det(matrices.gen_sample()), 1, 5e-13)
False
The resulting samples are unitary matrices:
>>> matrices = UnitaryMatrices(unitdet=True)
>>> m = matrices.gen_sample()
>>> within_tolerance(m * np.conjugate(np.transpose(m)), MathArray(np.eye(2)), 5e-13)
True
>>> matrices = UnitaryMatrices(unitdet=False)
>>> m = matrices.gen_sample()
>>> within_tolerance(m * np.conjugate(np.transpose(m)), MathArray(np.eye(2)), 5e-13)
True
"""
schema_config = SquareMatrixSamplingSet.schema_config.extend({
Required('unitdet', default=False): bool
})
def generate_sample(self):
"""
Generates an orthogonal matrix as appropriate
"""
# Generate the array
array = unitary_group.rvs(self.config['dimension'])
# Fix the determinant if need be
if self.config['unitdet']:
det = np.linalg.det(array)
array /= det**(1/self.config['dimension'])
# Return the result
return array | en | 0.688278 | matrixsampling.py Contains classes for sampling vector/matrix/tensor values: * RealVectors * ComplexVectors * RealMatrices * ComplexMatrices * RealTensors * ComplexTensors * IdentityMatrixMultiples * SquareMatrices * OrthogonalMatrices * UnitaryMatrices All of these classes perform random sampling. To obtain a sample, use class.gen_sample() # Set the objects to be imported from this grader Raised to indicate that the randomly generated array cannot satisfy the desired constraints, and a new random draw should be taken. Represents a set from which random array variable samples are taken. The norm used is standard Euclidean norm: root-square-sum of all entries in the array. This is the most low-level array sampling set we have, and is subclassed for various specific purposes. While we cannot make this class abstract, we strongly discourage its use. Config: ======= - shape (int|(int)|[int]): Dimensions of the array, specified as a list or tuple of the dimensions in each index as (n_1, n_2, ...). Can also use an integer to select a vector of that length. (required; no default) - norm ([start, stop]): Range for the overall norm of the array. Can be a list [start, stop] or a dictionary {'start':start, 'stop':stop}. (default [1, 5]) - complex (bool): Whether or not the matrix is complex (default False) Configure the class as normal, then set up norm as a RealInterval Generates an array sample and returns it as a MathArray. This calls generate_sample, which is the routine that should be subclassed if needed, rather than this one. Generates a random array of shape and norm determined by config. After generation, the apply_symmetry and normalize functions are applied to the result. These functions may be shadowed by a subclass. If apply_symmetry or normalize raise the Retry exception, a new sample is generated, and the procedure starts anew. Returns a numpy array. # Loop until a good sample is found # Construct an array with entries in [-0.5, 0.5) # Make the array complex if needed # Apply any symmetries to the array # Normalize the result # Return the result # pragma: no cover Applies the required symmetries to the array. This method exists to be shadowed by subclasses. Normalizes the array to fall into the desired norm. This method can be shadowed by subclasses. Sampling set of vectors. While we cannot make this class abstract, you should use RealVectors or ComplexVectors instead. Config: ======= Same as ArraySamplingSet, but: - shape can be a plain integer indicating number of components - if shape is tuple/list, must have length 1 - default shape is (3, ), for a 3D vector Sampling set of real vectors. Config: ======= Same as VectorSamplingSet, but: - complex is always False Usage: ====== By default, vectors have 3 components: >>> vectors = RealVectors() >>> vectors.gen_sample().shape (3,) Sampling set of complex vectors. Config: ======= Same as VectorSamplingSet, but: - complex is always True Usage: ====== Complex vectors have complex components: >>> vectors = ComplexVectors() >>> v = vectors.gen_sample() >>> np.array_equal(v, np.conj(v)) False Sampling set of tensors. While we cannot make this class abstract, you should use RealTensors or ComplexTensors instead. Config: ======= Same as ArraySamplingSet, but: - shape must be a tuple with at least 3 dimensions Sampling set of real tensors. Config: ======= Same as TensorSamplingSet, but: - complex is always False Usage: ====== Sample tensors with shape [4, 2, 5]: >>> real_tensors = RealTensors(shape=[4, 2, 5]) >>> sample = real_tensors.gen_sample() >>> sample.shape (4, 2, 5) Samples are of class MathArray: >>> isinstance(sample, MathArray) True Specify a range for the tensor's norm: >>> real_tensors = RealTensors(shape=[4, 2, 5], norm=[10, 20]) >>> sample = real_tensors.gen_sample() >>> 10 < np.linalg.norm(sample) < 20 True Sampling set of complex tensors. Config: ======= Same as TensorSamplingSet, but: - complex is always True Usage: ====== Sample tensors with shape [4, 2, 5]: >>> tensors = ComplexTensors(shape=[4, 2, 5]) >>> t = tensors.gen_sample() >>> t.shape (4, 2, 5) Complex tensors have complex components: >>> np.array_equal(t, np.conj(t)) False Base sampling set of matrices. While we cannot make this class abstract, you should use a more specific subclass instead. Config: ======= Same as ArraySamplingSet, but: - shape must be a tuple/list with length 2 - default shape is (2, 2), for a 2x2 matrix Base sampling set of general matrices. While we cannot make this class abstract, you should use RealMatrices or ComplexMatrices instead. Config: ======= Same as MatrixSamplingSet, but: - triangular (None, 'upper', 'lower'): Specify if you want a triangular matrix (default None) Impose the triangular requirement on the array Sampling set of real matrices. Config: ======= Same as GeneralMatrices, but: - complex is always False Usage: ====== By default, matrices have two rows and two columns: >>> matrices = RealMatrices() >>> matrices.gen_sample().shape (2, 2) We can generate upper triangular matrices: >>> from mitxgraders.helpers.calc import within_tolerance >>> matrices = RealMatrices(triangular='upper') >>> m = matrices.gen_sample() >>> within_tolerance(m, MathArray(np.triu(m)), 0) True and lower triangular matrices: >>> matrices = RealMatrices(triangular='lower') >>> m = matrices.gen_sample() >>> within_tolerance(m, MathArray(np.tril(m)), 0) True Sampling set of complex matrices. Config: ======= Same as GeneralMatrices, but: - complex is always True Usage: ====== Complex matrices have complex components: >>> matrices = ComplexMatrices() >>> m = matrices.gen_sample() >>> np.array_equal(m, np.conj(m)) False Base sampling set of square matrices. While we cannot make this class abstract, you want to use a subclass instead (likely SquareMatrices). Config: ======= Same as MatrixSamplingSet, but: - dimension (int): Dimension of the matrix (minimum 2). The 'shape' property is not used. Configure the class as normal, then modify the shape appropriately Class representing a collection of multiples of the identity matrix of a given dimension. Config: ======= Same as MatrixSamplingSet, but: - sampler: A scalar sampling set for the multiplicative constant (default RealInterval([1, 5])) Note that the 'complex' and 'norm' properties are ignored. Usage: ====== By default, we generate 2x2 matrices: >>> matrices = IdentityMatrixMultiples() >>> matrices.gen_sample().shape (2, 2) We can generate NxN matrices by specifying the dimension: >>> matrices = IdentityMatrixMultiples(dimension=4) >>> matrices.gen_sample().shape (4, 4) The scalar multiple can be generated in a number of ways: >>> from mitxgraders import ComplexSector >>> matrices = IdentityMatrixMultiples(sampler=[1,3]) >>> sect = ComplexSector(modulus=[0,1], argument=[-np.pi,np.pi]) >>> matrices = IdentityMatrixMultiples(sampler=sect) The resulting samples are simply a scalar times the identity matrix: >>> matrices = IdentityMatrixMultiples() >>> m = matrices.gen_sample() >>> np.array_equal(m, m[0, 0] * np.eye(2)) True # Sampling set for the multiplicative constant # Accept anything that FormulaGrader would accept for a sampling set, restricted to # scalar sampling sets. Hence, ScalarSamplingSets and ranges are allowed. # Note: Does not support DependentSampler or DiscreteSet, as they are not guaranteed # to return a scalar value. Generates an identity matrix of specified dimension multiplied by a random scalar # Sample the multiplicative constant # Create the numpy matrix # Return the result Sampling set for square matrices. Various symmetry properties are possible, including diagonal, symmetric, antisymmetric, hermitian and antihermitian. The trace and determinant can also be controlled. There are four kinds of special square matrices that covered by other sampling sets: * OrthogonalMatrices * UnitaryMatrices * Multiples of the identity (use IdentityMatrixMultiples) * Triangular matrices (use RealMatrices or ComplexMatrices) Our approach to generating these matrices is to first generate a random real/complex matrix of the appropriate shape, and then enforce, in order: * diagonal/symmetric/antisymmetric/hermitian/antihermitian * tracelessness * determinant 0 or 1 * norm (if determinant != 1) The determinant step is sometimes problematic. To achieve unit determinant, we attempt to rescale the matrix. This can't always be done, and we try a new random generation in such cases. To achieve zero determinant, we attempt to subtract lambda*I from the matrix. This can't be done for traceless matrices while preserving those properties, and we also can't handle zero determinant antisymmetric matrices that are complex, or real in even dimensions. Some special cases that don't exist: * Real, diagonal, traceless, unit determinant, 2x2 matrix * Real, symmetric, traceless, unit determinant, 2x2 matrix * Hermitian, traceless, unit determinant, 2x2 matrix * Odd-dimension, unit-determinant antisymmetric matrix * Odd-dimension, unit-determinant antihermitian matrix Config: ======= Same as SquareMatrixSamplingSet, but: - symmetry (None, 'diagonal', 'symmetric', 'antisymmetric', 'hermitian', 'antihermitian'): Entry describing the desired symmetry of the matrix. Note: If 'hermitian' or 'antihermitian' are chosen, 'complex' is set to True. (default None) - traceless (bool): Whether or not to ensure the matrix is traceless (default False) - determinant (None, 0, 1): If set to 0 or 1, sets the determinant of the matrix to be 0 or 1 correspondingly. If None or 0, uses 'norm' to normalize the matrix. Usage: ====== By default, we generate real 2x2 matrices with no symmetry: >>> matrices = SquareMatrices() >>> mat = matrices.gen_sample() >>> mat.shape (2, 2) >>> np.array_equal(mat, np.conj(mat)) True We can make it NxN by specifying the dimension: >>> matrices = SquareMatrices(dimension=4) >>> matrices.gen_sample().shape (4, 4) Some combinations: diagonal, complex, traceless and unit determinant >>> from mitxgraders.helpers.calc import within_tolerance >>> matrices = SquareMatrices(symmetry='diagonal', complex=True, traceless=True, ... determinant=1) >>> mat = matrices.gen_sample() >>> np.array_equal(np.diag(np.diag(mat)), mat) # Diagonal True >>> np.array_equal(mat, np.conj(mat)) # Complex False >>> within_tolerance(mat.trace(), 0, 5e-13) # Traceless True >>> within_tolerance(np.linalg.det(mat), 1, 1e-12) # Unit determinant True More combinations: symmetric, real, zero determinant and norm in [6, 10] >>> matrices = SquareMatrices(symmetry='symmetric', determinant=0, norm=[6, 10]) >>> mat = matrices.gen_sample() >>> np.array_equal(mat, mat.T) # Symmetric True >>> np.array_equal(mat, np.conj(mat)) # Real True >>> within_tolerance(np.linalg.det(mat), 0, 1e-12) # Zero determinant True >>> 6 <= np.linalg.norm(mat) <= 10 # Norm in [6, 10] True More combinations: antisymmetric and complex >>> matrices = SquareMatrices(symmetry='antisymmetric', complex=True) >>> mat = matrices.gen_sample() >>> np.array_equal(mat, -mat.T) # Antisymmetric True >>> np.array_equal(mat, np.conj(mat)) # Complex False More combinations: hermitian (enforces complex), zero determinant and norm in [6, 10] >>> matrices = SquareMatrices(symmetry='hermitian', determinant=0, norm=[6, 10]) >>> mat = matrices.gen_sample() >>> np.array_equal(mat, np.conj(mat.T)) # Hermitian True >>> within_tolerance(np.linalg.det(mat), 0, 1e-12) # Zero determinant True >>> 6 <= np.linalg.norm(mat) <= 10 # Norm in [6, 10] True More combinations: antihermitian (enforces complex), unit determinant and traceless >>> matrices = SquareMatrices(symmetry='antihermitian', determinant=1, traceless=True) >>> mat = matrices.gen_sample() >>> np.array_equal(mat, -np.conj(mat.T)) # Antihermitian True >>> within_tolerance(np.linalg.det(mat), 1, 1e-12) # Unit determinant True >>> within_tolerance(mat.trace(), 0, 5e-13) # Traceless True Configure the class as normal, then set complex for hermitian/antihermitian # A couple of cases that are possible but we can't handle: # Real antisymmetric matrices in odd dimension automatically have zero determinant # And a handful of cases that don't exist # Odd dimension # Eigenvalues are all imaginary, so determinant is imaginary # Eigenvalues are all imaginary, so determinant is imaginary Applies the required symmetries to the array # Apply the symmetry property # Apply the traceless property Set either the norm or determinant of the matrix to the desired value. # No need to normalize Scale an array to have unit determinant, or raise Retry if not possible. Note that odd-dimensional antisymmetric and antihermitian symmetries should not be able to make it to here, as their determinants are always purely imaginary. # Compute the determinant # Is the determinant guaranteed to be real? # Get rid of numerical error # This is the easy case: Just scale the determinant # Odd-dimension matrices can also have their determinant scaled # Can't rescale our way out of this one # Check to ensure that det isn't 0 before we get a division by zero # pragma: no cover # Complex matrices are easy: we can just rescale the matrix # We should never get here # pragma: no cover Modify an array to have zero determinant, or raise Retry if not possible # This is close enough to zero for our purposes! # This occurs for real, antisymmetric matrices in odd dimensions, for example. # Pick a random number! # What's our symmetry? # Choose a random diagonal entry to be zero # Eigenvalues are all real - use special algorithm to compute eigenvalues # Eigenvalues are all imaginary # Temporarily convert the matrix into a hermitian matrix # and use the special algorithm # No relevant symmetry. Use a general algorithm to compute eigenvalues. # We need to select a real eigenvalue. # idxs now stores any indices that have real eigenvalues # No real eigenvalues. Try again. # pragma: no cover # np.random.choice was introduced in 1.7.0; edX has 1.6.0 # Subtract the eigenvalue from the array Sampling set for orthogonal matrices. Note: This will only work with scipy 0.18 and numpy 1.7.1, which requires the python3 implementation of edX. Config: ======= Same as SquareMatrixSamplingSet, but: - unitdet (bool): Boolean specifying whether to sample from unit determinant matrices SO(n) (True) or arbitrary determinant matrices O(n) (False, default) The options 'complex' and 'norm' are ignored. Usage: ====== >>> import six, pytest >>> if six.PY2: ... pytest.skip('These doctests only work in python 3') By default, we generate 2x2 matrices: >>> matrices = OrthogonalMatrices() >>> matrices.gen_sample().shape (2, 2) We can generate NxN matrices by specifying the dimension: >>> matrices = OrthogonalMatrices(dimension=4) >>> matrices.gen_sample().shape (4, 4) If unitdet is specified, the determinant is 1: >>> from mitxgraders.helpers.calc import within_tolerance >>> matrices = OrthogonalMatrices(unitdet=True) >>> within_tolerance(np.linalg.det(matrices.gen_sample()), 1, 5e-13) True Otherwise, it could be +1 or -1. The resulting samples are orthogonal matrices: >>> matrices = OrthogonalMatrices(unitdet=True) >>> m = matrices.gen_sample() >>> within_tolerance(m * np.transpose(m), MathArray(np.eye(2)), 5e-13) True >>> matrices = OrthogonalMatrices(unitdet=False) >>> m = matrices.gen_sample() >>> within_tolerance(m * np.transpose(m), MathArray(np.eye(2)), 5e-13) True Generates an orthogonal matrix # Generate the array # Return the result Sampling set for unitary matrices. Note: This will only work with scipy 0.18 and numpy 1.7.1, which requires the python3 implementation of edX. Config: ======= Same as SquareMatrixSamplingSet, but: - unitdet (bool): Boolean specifying whether to sample from unit determinant matrices SU(n) (True) or arbitrary determinant matrices U(n) (False, default) The options 'complex' and 'norm' are ignored. Usage: ====== >>> import six, pytest >>> if six.PY2: ... pytest.skip('These doctests only work in python 3') By default, we generate 2x2 matrices: >>> matrices = UnitaryMatrices() >>> matrices.gen_sample().shape (2, 2) We can generate NxN matrices by specifying the dimension: >>> matrices = UnitaryMatrices(dimension=4) >>> matrices.gen_sample().shape (4, 4) If unitdet is specified, the determinant is 1: >>> from mitxgraders.helpers.calc import within_tolerance >>> matrices = UnitaryMatrices(unitdet=True) >>> within_tolerance(np.linalg.det(matrices.gen_sample()), 1, 5e-13) True Otherwise, it's typically not (though it could randomly be): >>> matrices = UnitaryMatrices(unitdet=False) >>> within_tolerance(np.linalg.det(matrices.gen_sample()), 1, 5e-13) False The resulting samples are unitary matrices: >>> matrices = UnitaryMatrices(unitdet=True) >>> m = matrices.gen_sample() >>> within_tolerance(m * np.conjugate(np.transpose(m)), MathArray(np.eye(2)), 5e-13) True >>> matrices = UnitaryMatrices(unitdet=False) >>> m = matrices.gen_sample() >>> within_tolerance(m * np.conjugate(np.transpose(m)), MathArray(np.eye(2)), 5e-13) True Generates an orthogonal matrix as appropriate # Generate the array # Fix the determinant if need be # Return the result | 2.912172 | 3 |
module2-sql-for-analysis/insert_titanic.py | ivaben/DS-Unit-3-Sprint-2-SQL-and-Databases | 0 | 6613017 | import sqlite3
import psycopg2
import pandas
titanic_df = pandas.read_csv('titanic.csv')
dbname = 'wuufwvap'
user = 'wuufwvap'
password = '<PASSWORD>'
host = 'rajje.db.elephantsql.com'
pg_conn = psycopg2.connect(dbname=dbname, user=user, password=password, host=host)
pg_curs = pg_conn.cursor()
# enumerated type of declaration
CREATE TYPE survived_e AS ENUM ('0','1');
CREATE TYPE gender_e AS ENUM ('male', 'female');
pg_curs.execute(survived_e)
pg_conn.commit()
pg_curs.execute(gender_e)
pg_conn.commit()
create_table_statement = """
CREATE TABLE titanic ( Survived survived_e,
Pclass INT,
Name VARCHAR(150),
Sex gender_e,
Age NUMERIC(6),
Siblings Spouses VARCHAR(40),
Aboard INT,
Parents Children INT,
Aboard INT,
Fare FLOAT
);
"""
pg_curs.execute(create_table_statement)
pg_conn.commit()
insert_statement = """
INSERT INTO titanic_table (Survived,
Pclass,
Name,
Sex,
Age,
Siblings/Spouses,
Aboard,
Parents/Children,
Aboard,
Fare ) VALUES ();
"""
pg_curs.execute(insert_statement)
pg_conn.commit()
query = "SELECT * FROM titanic_table;"
pg_curs.execute(query)
pg_curs.fetchall()
| import sqlite3
import psycopg2
import pandas
titanic_df = pandas.read_csv('titanic.csv')
dbname = 'wuufwvap'
user = 'wuufwvap'
password = '<PASSWORD>'
host = 'rajje.db.elephantsql.com'
pg_conn = psycopg2.connect(dbname=dbname, user=user, password=password, host=host)
pg_curs = pg_conn.cursor()
# enumerated type of declaration
CREATE TYPE survived_e AS ENUM ('0','1');
CREATE TYPE gender_e AS ENUM ('male', 'female');
pg_curs.execute(survived_e)
pg_conn.commit()
pg_curs.execute(gender_e)
pg_conn.commit()
create_table_statement = """
CREATE TABLE titanic ( Survived survived_e,
Pclass INT,
Name VARCHAR(150),
Sex gender_e,
Age NUMERIC(6),
Siblings Spouses VARCHAR(40),
Aboard INT,
Parents Children INT,
Aboard INT,
Fare FLOAT
);
"""
pg_curs.execute(create_table_statement)
pg_conn.commit()
insert_statement = """
INSERT INTO titanic_table (Survived,
Pclass,
Name,
Sex,
Age,
Siblings/Spouses,
Aboard,
Parents/Children,
Aboard,
Fare ) VALUES ();
"""
pg_curs.execute(insert_statement)
pg_conn.commit()
query = "SELECT * FROM titanic_table;"
pg_curs.execute(query)
pg_curs.fetchall()
| en | 0.682562 | # enumerated type of declaration CREATE TABLE titanic ( Survived survived_e, Pclass INT, Name VARCHAR(150), Sex gender_e, Age NUMERIC(6), Siblings Spouses VARCHAR(40), Aboard INT, Parents Children INT, Aboard INT, Fare FLOAT ); INSERT INTO titanic_table (Survived, Pclass, Name, Sex, Age, Siblings/Spouses, Aboard, Parents/Children, Aboard, Fare ) VALUES (); | 3.253057 | 3 |
assimilate-assess.py | macdaliot/assimilate | 81 | 6613018 | #
# Assimilate-Assess.py
# Copyright 2017 <NAME>
# Credit for the excellent BroLogReader code is to Mike Sconzo - https://github.com/ClickSecurity/data_hacking/blob/master/browser_fingerprinting/bro_log_reader.py
#
import os, io, csv, datetime, itertools
import numpy
from sklearn.externals import joblib
from pandas import DataFrame
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import MultinomialNB
from optparse import OptionParser
from assimilate_utils import BroLogReader
if __name__ == "__main__":
__version__ = '1.0'
usage = """assimilate-assess [options] bro_http_header_file"""
parser = OptionParser(usage=usage, version=__version__)
parser.add_option("-f", "--headerfile", action="store", type="string", \
default=None, help="the Bro HTTP Header file to analyze")
parser.add_option("-d", "--dirheaderfiles", action="store", type="string", \
default=None, help="directory of Bro HTTP Header files to analyze")
parser.add_option("-b", "--bayesianfile", action="store", type="string", \
default='./nb.pkl', help="the location to load the bayesian classifier")
parser.add_option("-x", "--vectorizerfile", action="store", type="string", \
default='./vectorizers.pkl', help="the location to load the vectorizer")
parser.add_option("-o", "--outputfile", action="store", type="string", \
default=None, help="the file to store results in")
parser.add_option("-v", "--verbose", action="store_true", default=False, \
help="enable verbose output")
(opts, args) = parser.parse_args()
if (opts.headerfile == None) & (opts.dirheaderfiles == None):
parser.error('Need either a bro_http_header_file or a directory of bro_header_files to assess')
blr = BroLogReader()
data = DataFrame({'header': [], 'class': []})
header_rows = []
vectorizer = CountVectorizer()
counts = vectorizer
classifier = MultinomialNB()
print('Loading models...')
classifier = joblib.load(opts.bayesianfile)
vectorizer = joblib.load(opts.vectorizerfile)
if opts.headerfile != None:
print('Assessing HTTP Header file...')
header_rows = blr.dataFrameFromFile(opts.headerfile)
rowindex = 1
if opts.outputfile != None:
of = open(opts.outputfile, "w")
for r1 in header_rows:
if opts.verbose:
print("Checking line "+str(rowindex))
indhdr = [r1['header']]
tstcounts = vectorizer.transform(indhdr)
predictions = classifier.predict(tstcounts)
if predictions[0] == 'bad':
if len(r1['header']) > 60:
print("Line "+str(rowindex)+" looks suspicious: "+r1['header'][:60])
else:
print("Line "+str(rowindex)+" looks suspicious: "+r1['header'])
if opts.outputfile != None:
of.write("Line "+str(rowindex)+" looks suspicious: "+r1['header']+"\n")
rowindex += 1
if opts.outputfile != None:
of.close()
print('Done!')
else:
print('Assessing directory '+opts.dirheaderfiles+'...')
header_rows = blr.AssessdataFrameFromDirectory(opts.dirheaderfiles)
rowindex = 1
fn = header_rows[0]['filename']
if opts.outputfile != None:
of = open(opts.outputfile, "w")
for r1 in header_rows:
if fn != r1['filename']:
rowindex = 1
fn = r1['filename']
if opts.verbose:
print("Checking file "+r1['filename']+" line "+str(rowindex)+" of file "+r1['filename'])
indhdr = [r1['header']]
tstcounts = vectorizer.transform(indhdr)
predictions = classifier.predict(tstcounts)
if predictions[0] == 'bad':
if len(r1['header']) > 40:
print("File "+r1['filename']+" Line "+str(rowindex)+" looks suspicious: "+r1['header'][:40])
else:
print("File "+r1['filename']+" Line "+str(rowindex)+" looks suspicious: "+r1['header'])
if opts.outputfile != None:
of.write("File "+r1['filename']+" Line "+str(rowindex)+" looks suspicious: "+r1['header']+"\n")
rowindex += 1
if opts.outputfile != None:
of.close()
print('Done!')
| #
# Assimilate-Assess.py
# Copyright 2017 <NAME>
# Credit for the excellent BroLogReader code is to Mike Sconzo - https://github.com/ClickSecurity/data_hacking/blob/master/browser_fingerprinting/bro_log_reader.py
#
import os, io, csv, datetime, itertools
import numpy
from sklearn.externals import joblib
from pandas import DataFrame
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import MultinomialNB
from optparse import OptionParser
from assimilate_utils import BroLogReader
if __name__ == "__main__":
__version__ = '1.0'
usage = """assimilate-assess [options] bro_http_header_file"""
parser = OptionParser(usage=usage, version=__version__)
parser.add_option("-f", "--headerfile", action="store", type="string", \
default=None, help="the Bro HTTP Header file to analyze")
parser.add_option("-d", "--dirheaderfiles", action="store", type="string", \
default=None, help="directory of Bro HTTP Header files to analyze")
parser.add_option("-b", "--bayesianfile", action="store", type="string", \
default='./nb.pkl', help="the location to load the bayesian classifier")
parser.add_option("-x", "--vectorizerfile", action="store", type="string", \
default='./vectorizers.pkl', help="the location to load the vectorizer")
parser.add_option("-o", "--outputfile", action="store", type="string", \
default=None, help="the file to store results in")
parser.add_option("-v", "--verbose", action="store_true", default=False, \
help="enable verbose output")
(opts, args) = parser.parse_args()
if (opts.headerfile == None) & (opts.dirheaderfiles == None):
parser.error('Need either a bro_http_header_file or a directory of bro_header_files to assess')
blr = BroLogReader()
data = DataFrame({'header': [], 'class': []})
header_rows = []
vectorizer = CountVectorizer()
counts = vectorizer
classifier = MultinomialNB()
print('Loading models...')
classifier = joblib.load(opts.bayesianfile)
vectorizer = joblib.load(opts.vectorizerfile)
if opts.headerfile != None:
print('Assessing HTTP Header file...')
header_rows = blr.dataFrameFromFile(opts.headerfile)
rowindex = 1
if opts.outputfile != None:
of = open(opts.outputfile, "w")
for r1 in header_rows:
if opts.verbose:
print("Checking line "+str(rowindex))
indhdr = [r1['header']]
tstcounts = vectorizer.transform(indhdr)
predictions = classifier.predict(tstcounts)
if predictions[0] == 'bad':
if len(r1['header']) > 60:
print("Line "+str(rowindex)+" looks suspicious: "+r1['header'][:60])
else:
print("Line "+str(rowindex)+" looks suspicious: "+r1['header'])
if opts.outputfile != None:
of.write("Line "+str(rowindex)+" looks suspicious: "+r1['header']+"\n")
rowindex += 1
if opts.outputfile != None:
of.close()
print('Done!')
else:
print('Assessing directory '+opts.dirheaderfiles+'...')
header_rows = blr.AssessdataFrameFromDirectory(opts.dirheaderfiles)
rowindex = 1
fn = header_rows[0]['filename']
if opts.outputfile != None:
of = open(opts.outputfile, "w")
for r1 in header_rows:
if fn != r1['filename']:
rowindex = 1
fn = r1['filename']
if opts.verbose:
print("Checking file "+r1['filename']+" line "+str(rowindex)+" of file "+r1['filename'])
indhdr = [r1['header']]
tstcounts = vectorizer.transform(indhdr)
predictions = classifier.predict(tstcounts)
if predictions[0] == 'bad':
if len(r1['header']) > 40:
print("File "+r1['filename']+" Line "+str(rowindex)+" looks suspicious: "+r1['header'][:40])
else:
print("File "+r1['filename']+" Line "+str(rowindex)+" looks suspicious: "+r1['header'])
if opts.outputfile != None:
of.write("File "+r1['filename']+" Line "+str(rowindex)+" looks suspicious: "+r1['header']+"\n")
rowindex += 1
if opts.outputfile != None:
of.close()
print('Done!')
| en | 0.587975 | # # Assimilate-Assess.py # Copyright 2017 <NAME> # Credit for the excellent BroLogReader code is to Mike Sconzo - https://github.com/ClickSecurity/data_hacking/blob/master/browser_fingerprinting/bro_log_reader.py # assimilate-assess [options] bro_http_header_file | 2.566307 | 3 |
test/magic_method07.py | pythonDa/base_study | 0 | 6613019 | <reponame>pythonDa/base_study
"""
比较操作:
__cmp__ python3好像不支持此方法
__eq__
__ne__
__lt__
__gt__
"""
from filecmp import cmp
class Person01(object):
"""
__cmp__ 比较
"""
def __init__(self, uid):
self.uid = uid
def __cmp__(self, other):
if self.uid == other.uid:
return 0
elif self.uid > other.uid:
return 1
return -1
def __eq__(self, other):
return self.uid == other.uid
def __gt__(self, other):
return self.uid > other.uid
def __lt__(self, other):
return self.uid < other.uid
if __name__ == '__main__':
p1 = Person01(1)
p2 = Person01(2)
# c = p1 > p2
# print(c)
print(p1 > p2) | """
比较操作:
__cmp__ python3好像不支持此方法
__eq__
__ne__
__lt__
__gt__
"""
from filecmp import cmp
class Person01(object):
"""
__cmp__ 比较
"""
def __init__(self, uid):
self.uid = uid
def __cmp__(self, other):
if self.uid == other.uid:
return 0
elif self.uid > other.uid:
return 1
return -1
def __eq__(self, other):
return self.uid == other.uid
def __gt__(self, other):
return self.uid > other.uid
def __lt__(self, other):
return self.uid < other.uid
if __name__ == '__main__':
p1 = Person01(1)
p2 = Person01(2)
# c = p1 > p2
# print(c)
print(p1 > p2) | zh | 0.503165 | 比较操作: __cmp__ python3好像不支持此方法 __eq__ __ne__ __lt__ __gt__ __cmp__ 比较 # c = p1 > p2 # print(c) | 3.954092 | 4 |
hello.py | wenkejiang/say2hello | 0 | 6613020 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2021/8/7 17:06
# @Author : JiangWenKe
# @Site :
# @File : hello.py
# @Software: PyCharm
def say():
print("hello pip...")
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2021/8/7 17:06
# @Author : JiangWenKe
# @Site :
# @File : hello.py
# @Software: PyCharm
def say():
print("hello pip...")
| en | 0.228715 | #!/usr/bin/env python # -*- coding: utf-8 -*- # @Time : 2021/8/7 17:06 # @Author : JiangWenKe # @Site : # @File : hello.py # @Software: PyCharm | 1.569282 | 2 |
src/normalization.py | masashi-y/abduction_kbc | 8 | 6613021 | <reponame>masashi-y/abduction_kbc<filename>src/normalization.py
# -*- coding: utf-8 -*-
#
# Copyright 2015 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
def normalize_token(token):
"""
Convert symbols to avoid collisions with reserved punctuation
in NLTK and coq.
To avoid collisions with reserved words, we prefix each token
with an underscore '_'.
"""
normalized = token
normalized = re.sub(r'\.', '_DOT', normalized)
normalized = re.sub(r',', '_COMMA', normalized)
normalized = re.sub(r'\(', '_LEFTB', normalized)
normalized = re.sub(r'\)', '_RIGHTB', normalized)
normalized = re.sub(r'^-$', '_HYPHEN', normalized)
normalized = re.sub(r'^&$', '_AMPERSAND', normalized)
normalized = re.sub(r'-', '_dash_', normalized)
if not normalized.startswith('_'):
normalized = '_' + normalized
return normalized
def denormalize_token(token):
"""
Unconvert symbols. This is the reverse operation as above.
"""
denormalized = token
denormalized = re.sub('_DOT', r'\.', denormalized)
denormalized = re.sub('_COMMA', r',', denormalized)
denormalized = re.sub('_LEFTB', r'\(', denormalized)
denormalized = re.sub('_RIGHTB', r'\)', denormalized)
denormalized = re.sub('_HYPHEN', r'^-$', denormalized)
denormalized = re.sub('_AMPERSAND', r'^&$', denormalized)
denormalized = re.sub('_dash_', r'-', denormalized)
denormalized = denormalized.lstrip('_')
return denormalized
| # -*- coding: utf-8 -*-
#
# Copyright 2015 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
def normalize_token(token):
"""
Convert symbols to avoid collisions with reserved punctuation
in NLTK and coq.
To avoid collisions with reserved words, we prefix each token
with an underscore '_'.
"""
normalized = token
normalized = re.sub(r'\.', '_DOT', normalized)
normalized = re.sub(r',', '_COMMA', normalized)
normalized = re.sub(r'\(', '_LEFTB', normalized)
normalized = re.sub(r'\)', '_RIGHTB', normalized)
normalized = re.sub(r'^-$', '_HYPHEN', normalized)
normalized = re.sub(r'^&$', '_AMPERSAND', normalized)
normalized = re.sub(r'-', '_dash_', normalized)
if not normalized.startswith('_'):
normalized = '_' + normalized
return normalized
def denormalize_token(token):
"""
Unconvert symbols. This is the reverse operation as above.
"""
denormalized = token
denormalized = re.sub('_DOT', r'\.', denormalized)
denormalized = re.sub('_COMMA', r',', denormalized)
denormalized = re.sub('_LEFTB', r'\(', denormalized)
denormalized = re.sub('_RIGHTB', r'\)', denormalized)
denormalized = re.sub('_HYPHEN', r'^-$', denormalized)
denormalized = re.sub('_AMPERSAND', r'^&$', denormalized)
denormalized = re.sub('_dash_', r'-', denormalized)
denormalized = denormalized.lstrip('_')
return denormalized | en | 0.865194 | # -*- coding: utf-8 -*- # # Copyright 2015 <NAME> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Convert symbols to avoid collisions with reserved punctuation in NLTK and coq. To avoid collisions with reserved words, we prefix each token with an underscore '_'. Unconvert symbols. This is the reverse operation as above. | 2.647728 | 3 |
app/services.py | vend/SampleOAuth2_UsingPythonClient | 1 | 6613022 | import requests
from django.conf import settings
from quickbooks.objects.salesreceipt import SalesReceipt
from quickbooks.objects.detailline import SalesItemLine, SalesItemLineDetail, Ref
from quickbooks.objects.invoice import Invoice
import json
def qbo_api_call(access_token, realm_id):
"""[summary]
"""
if settings.ENVIRONMENT == 'production':
base_url = settings.QBO_BASE_PROD
else:
base_url = settings.QBO_BASE_SANDBOX
route = '/v3/company/{0}/companyinfo/{0}'.format(realm_id)
auth_header = 'Bearer {0}'.format(access_token)
headers = {
'Authorization': auth_header,
'Accept': 'application/json'
}
return requests.get('{0}{1}'.format(base_url, route), headers=headers)
sale_json = """[
{
"id": "e541c29f-8657-a3be-11e9-802e3ba64743",
"short_code": "a5bx9l",
"sale_date": "2019-05-27T15:19:52+12:00",
"status": "CLOSED",
"customer_id": null,
"register_id": "02dcd191-ae2b-11e6-f485-a54b98a1d0bc",
"user_id": "02dcd191-ae2b-11e6-f485-a54b98a2e27e",
"invoice_number": "155",
"invoice_sequence": 155,
"total_tax": "0.65217",
"total_price": "4.34783",
"note": "cash payment single product",
"receipt_address": "",
"register_sale_products": [
{
"id": "e541c29f-8657-a3be-11e9-802e43390f65",
"product_id": "product1-id",
"price": 4.34783,
"price_set": 0,
"discount": 0,
"tax": "0.65217",
"tax_id": "02dcd191-ae2b-11e6-f485-a54b9896a941",
"quantity": "1",
"sequence": 0,
"status": "SAVED",
"attributes": [{ "name": "line_note", "value": "" }]
}
],
"register_sale_payments": [
{
"id": "e541c29f-8657-a3be-11e9-802e4abf2d57",
"register_id": "02dcd191-ae2b-11e6-f485-a54b98a1d0bc",
"payment_type_id": 1,
"retailer_payment_type_id": "02dcd191-ae2b-11e6-f485-a54b98a273f4",
"payment_date": "2019-05-27T15:19:52+12:00",
"amount": "5",
"currency": "NZD"
}
]
},
{
"id": "e541c29f-8657-a3be-11e9-802e3ba64743",
"short_code": "a5bx9l",
"sale_date": "2019-05-27T15:19:52+12:00",
"status": "CLOSED",
"customer_id": null,
"register_id": "02dcd191-ae2b-11e6-f485-a54b98a1d0bc",
"user_id": "02dcd191-ae2b-11e6-f485-a54b98a2e27e",
"invoice_number": "155",
"invoice_sequence": 155,
"total_tax": "0.65217",
"total_price": "4.34783",
"note": "cash payment two line item, different product and different account code",
"receipt_address": "",
"register_sale_products": [
{
"id": "e541c29f-8657-a3be-11e9-802e43390f65",
"product_id": "product1-id",
"price": 4.34783,
"price_set": 0,
"discount": 0,
"tax": "0.65217",
"tax_id": "02dcd191-ae2b-11e6-f485-a54b9896a941",
"quantity": "1",
"sequence": 0,
"status": "SAVED",
"attributes": [{ "name": "line_note", "value": "" }]
},
{
"id": "e541c29f-8657-a3be-11e9-802e43390f64",
"product_id": "product2-id",
"price": 4.34783,
"price_set": 0,
"discount": 0,
"tax": "0.65217",
"tax_id": "02dcd191-ae2b-11e6-f485-a54b9896a941",
"quantity": "1",
"sequence": 0,
"status": "SAVED",
"attributes": [{ "name": "line_note", "value": "" }]
}
],
"register_sale_payments": [
{
"id": "e541c29f-8657-a3be-11e9-802e4abf2d57",
"register_id": "02dcd191-ae2b-11e6-f485-a54b98a1d0bc",
"payment_type_id": 1,
"retailer_payment_type_id": "02dcd191-ae2b-11e6-f485-a54b98a273f4",
"payment_date": "2019-05-27T15:19:52+12:00",
"amount": "10",
"currency": "NZD"
}
]
},
{
"id": "e541c29f-8657-a3be-11e9-802e3ba64743",
"short_code": "a5bx9l",
"sale_date": "2019-05-27T15:19:52+12:00",
"status": "CLOSED",
"customer_id": null,
"register_id": "02dcd191-ae2b-11e6-f485-a54b98a1d0bc",
"user_id": "02dcd191-ae2b-11e6-f485-a54b98a2e27e",
"invoice_number": "155",
"invoice_sequence": 155,
"total_tax": "0.65217",
"total_price": "4.34783",
"note": "cash payment two line item 2 different product, same code",
"receipt_address": "",
"register_sale_products": [
{
"id": "e541c29f-8657-a3be-11e9-802e43390f65",
"product_id": "product2-id",
"price": 4.34783,
"price_set": 0,
"discount": 0,
"tax": "0.65217",
"tax_id": "02dcd191-ae2b-11e6-f485-a54b9896a941",
"quantity": "1",
"sequence": 0,
"status": "SAVED",
"attributes": [{ "name": "line_note", "value": "" }]
},
{
"id": "e541c29f-8657-a3be-11e9-802e43390f64",
"product_id": "product3-id",
"price": 4.34783,
"price_set": 0,
"discount": 0,
"tax": "0.65217",
"tax_id": "02dcd191-ae2b-11e6-f485-a54b9896a941",
"quantity": "1",
"sequence": 0,
"status": "SAVED",
"attributes": [{ "name": "line_note", "value": "" }]
}
],
"register_sale_payments": [
{
"id": "e541c29f-8657-a3be-11e9-802e4abf2d57",
"register_id": "02dcd191-ae2b-11e6-f485-a54b98a1d0bc",
"payment_type_id": 1,
"retailer_payment_type_id": "02dcd191-ae2b-11e6-f485-a54b98a273f4",
"payment_date": "2019-05-27T15:19:52+12:00",
"amount": "10",
"currency": "NZD"
}
]
},
{
"id": "e541c29f-8657-a3be-11e9-802e3ba64743",
"short_code": "a5bx9l",
"sale_date": "2019-05-27T15:19:52+12:00",
"status": "CLOSED",
"customer_id": null,
"register_id": "02dcd191-ae2b-11e6-f485-a54b98a1d0bc",
"user_id": "02dcd191-ae2b-11e6-f485-a54b98a2e27e",
"invoice_number": "155",
"invoice_sequence": 155,
"total_tax": "0.65217",
"total_price": "4.34783",
"note": "cash payment two line item same product, same code",
"receipt_address": "",
"register_sale_products": [
{
"id": "e541c29f-8657-a3be-11e9-802e43390f65",
"product_id": "product2-id",
"price": 4.34783,
"price_set": 0,
"discount": 0,
"tax": "0.65217",
"tax_id": "02dcd191-ae2b-11e6-f485-a54b9896a941",
"quantity": "1",
"sequence": 0,
"status": "SAVED",
"attributes": [{ "name": "line_note", "value": "" }]
},
{
"id": "e541c29f-8657-a3be-11e9-802e43390f64",
"product_id": "product2-id",
"price": 4.34783,
"price_set": 0,
"discount": 0,
"tax": "0.65217",
"tax_id": "02dcd191-ae2b-11e6-f485-a54b9896a941",
"quantity": "1",
"sequence": 0,
"status": "SAVED",
"attributes": [{ "name": "line_note", "value": "" }]
}
],
"register_sale_payments": [
{
"id": "e541c29f-8657-a3be-11e9-802e4abf2d57",
"register_id": "02dcd191-ae2b-11e6-f485-a54b98a1d0bc",
"payment_type_id": 1,
"retailer_payment_type_id": "02dcd191-ae2b-11e6-f485-a54b98a273f4",
"payment_date": "2019-05-27T15:19:52+12:00",
"amount": "10",
"currency": "NZD"
}
]
}
]
"""
# Create your models here.
product_to_account_code = {
"product1-id": 1,
"product2-id": 2,
"product3-id": 2,
"0242ac12-0002-11e9-e945-7d27ba9c3903": 1,
"0242ac12-0002-11e9-e945-7d27bac8894e": 2
}
sales = json.loads(sale_json)
def generate_sale_invoice(sales):
payload = {
"Line": [],
"CustomerRef": {
"value": "1"
}
}
sale_invoice = Invoice()
account_code_tax_code_mapping = {}
overall_total_amount = 0
# iterate over the sales to aggregate the line items by account code and tax code
for sale in sales:
for line_item in sale["register_sale_products"]:
account_code = product_to_account_code[line_item["product_id"]]
tax_code = line_item["tax_id"]
key = (account_code, tax_code)
price = float(line_item["price"])
quantity = float(line_item["quantity"])
tax = float(line_item["tax"])
total_amount = (price + tax) * quantity
if key in account_code_tax_code_mapping:
account_code_tax_code_mapping[key] += total_amount
else:
account_code_tax_code_mapping[key] = total_amount
overall_total_amount += total_amount
# sale_invoice.TotalAmt = overall_total_amount
payload["TotalAmt"] = overall_total_amount
for key, value in account_code_tax_code_mapping.items():
account_code = key[0]
tax_code = key[1]
external_tax_code = "NON"
if tax_code == "02dcd191-ae2b-11e6-f485-a54b9896a941" or tax_code == "00000000-0002-0002-0002-000000000003":
external_tax_code = "TAX"
# line_item = SalesItemLine()
# line_item.Amount = value
# line_item.Description = "Account code: {} and Tax code: {}".format(account_code, tax_code)
line_item = {
"DetailType": "SalesItemLineDetail",
"Amount": value,
"Description": "Account code: {} and Tax code: {}".format(account_code, external_tax_code),
"SalesItemLineDetail": {
"ItemRef": {
},
"TaxCodeRef": {
}
}
}
line_item["SalesItemLineDetail"]["ItemRef"]["value"] = account_code
line_item["SalesItemLineDetail"]["TaxCodeRef"]["value"] = external_tax_code
payload["Line"].append(line_item)
return payload
def generate_sale_payment(sales, external_invoice_id):
payload = {
"TotalAmt": 0,
"CustomerRef": {
"value": "1"
},
"Line": [
{
"Amount": 0,
"LinkedTxn": [
{
"TxnId": external_invoice_id,
"TxnType": "Invoice"
}
]
}
]
}
# assume only 1 type of payment
total_cash_payment = 0
# iterate over the sales to aggregate the line items by account code and tax code
for sale in sales:
for payment in sale["register_sale_payments"]:
amount = float(payment["amount"])
total_cash_payment += amount
payload["TotalAmt"] = total_cash_payment
payload["Line"][0]["Amount"] = total_cash_payment
return payload
def post_sale_invoice(access_token, realm_id, sales=sales):
"""[summary]
"""
if settings.ENVIRONMENT == 'production':
base_url = settings.QBO_BASE_PROD
else:
base_url = settings.QBO_BASE_SANDBOX
route = '/v3/company/{0}/invoice'.format(realm_id)
auth_header = 'Bearer {0}'.format(access_token)
headers = {
'Authorization': auth_header,
'Accept': 'application/json'
}
payload = generate_sale_invoice(sales)
return requests.post('{0}{1}'.format(base_url, route), json=payload, headers=headers)
def post_invoice_payment(access_token, realm_id, external_invoice_id, sales=sales):
"""[summary]
"""
if settings.ENVIRONMENT == 'production':
base_url = settings.QBO_BASE_PROD
else:
base_url = settings.QBO_BASE_SANDBOX
route = '/v3/company/{0}/payment'.format(realm_id)
auth_header = 'Bearer {0}'.format(access_token)
headers = {
'Authorization': auth_header,
'Accept': 'application/json'
}
payload = generate_sale_payment(sales, external_invoice_id)
print(payload)
return requests.post('{0}{1}'.format(base_url, route), json=payload, headers=headers)
def retrieve_vend_sales():
path = "https://weggieincl.dev.vendhq.works/api/register_sales"
auth_header = "Bearer {0}".format("<KEY>")
headers = {
'Authorization': auth_header,
'Accept': 'application/json'
}
return requests.get(path, headers=headers) | import requests
from django.conf import settings
from quickbooks.objects.salesreceipt import SalesReceipt
from quickbooks.objects.detailline import SalesItemLine, SalesItemLineDetail, Ref
from quickbooks.objects.invoice import Invoice
import json
def qbo_api_call(access_token, realm_id):
"""[summary]
"""
if settings.ENVIRONMENT == 'production':
base_url = settings.QBO_BASE_PROD
else:
base_url = settings.QBO_BASE_SANDBOX
route = '/v3/company/{0}/companyinfo/{0}'.format(realm_id)
auth_header = 'Bearer {0}'.format(access_token)
headers = {
'Authorization': auth_header,
'Accept': 'application/json'
}
return requests.get('{0}{1}'.format(base_url, route), headers=headers)
sale_json = """[
{
"id": "e541c29f-8657-a3be-11e9-802e3ba64743",
"short_code": "a5bx9l",
"sale_date": "2019-05-27T15:19:52+12:00",
"status": "CLOSED",
"customer_id": null,
"register_id": "02dcd191-ae2b-11e6-f485-a54b98a1d0bc",
"user_id": "02dcd191-ae2b-11e6-f485-a54b98a2e27e",
"invoice_number": "155",
"invoice_sequence": 155,
"total_tax": "0.65217",
"total_price": "4.34783",
"note": "cash payment single product",
"receipt_address": "",
"register_sale_products": [
{
"id": "e541c29f-8657-a3be-11e9-802e43390f65",
"product_id": "product1-id",
"price": 4.34783,
"price_set": 0,
"discount": 0,
"tax": "0.65217",
"tax_id": "02dcd191-ae2b-11e6-f485-a54b9896a941",
"quantity": "1",
"sequence": 0,
"status": "SAVED",
"attributes": [{ "name": "line_note", "value": "" }]
}
],
"register_sale_payments": [
{
"id": "e541c29f-8657-a3be-11e9-802e4abf2d57",
"register_id": "02dcd191-ae2b-11e6-f485-a54b98a1d0bc",
"payment_type_id": 1,
"retailer_payment_type_id": "02dcd191-ae2b-11e6-f485-a54b98a273f4",
"payment_date": "2019-05-27T15:19:52+12:00",
"amount": "5",
"currency": "NZD"
}
]
},
{
"id": "e541c29f-8657-a3be-11e9-802e3ba64743",
"short_code": "a5bx9l",
"sale_date": "2019-05-27T15:19:52+12:00",
"status": "CLOSED",
"customer_id": null,
"register_id": "02dcd191-ae2b-11e6-f485-a54b98a1d0bc",
"user_id": "02dcd191-ae2b-11e6-f485-a54b98a2e27e",
"invoice_number": "155",
"invoice_sequence": 155,
"total_tax": "0.65217",
"total_price": "4.34783",
"note": "cash payment two line item, different product and different account code",
"receipt_address": "",
"register_sale_products": [
{
"id": "e541c29f-8657-a3be-11e9-802e43390f65",
"product_id": "product1-id",
"price": 4.34783,
"price_set": 0,
"discount": 0,
"tax": "0.65217",
"tax_id": "02dcd191-ae2b-11e6-f485-a54b9896a941",
"quantity": "1",
"sequence": 0,
"status": "SAVED",
"attributes": [{ "name": "line_note", "value": "" }]
},
{
"id": "e541c29f-8657-a3be-11e9-802e43390f64",
"product_id": "product2-id",
"price": 4.34783,
"price_set": 0,
"discount": 0,
"tax": "0.65217",
"tax_id": "02dcd191-ae2b-11e6-f485-a54b9896a941",
"quantity": "1",
"sequence": 0,
"status": "SAVED",
"attributes": [{ "name": "line_note", "value": "" }]
}
],
"register_sale_payments": [
{
"id": "e541c29f-8657-a3be-11e9-802e4abf2d57",
"register_id": "02dcd191-ae2b-11e6-f485-a54b98a1d0bc",
"payment_type_id": 1,
"retailer_payment_type_id": "02dcd191-ae2b-11e6-f485-a54b98a273f4",
"payment_date": "2019-05-27T15:19:52+12:00",
"amount": "10",
"currency": "NZD"
}
]
},
{
"id": "e541c29f-8657-a3be-11e9-802e3ba64743",
"short_code": "a5bx9l",
"sale_date": "2019-05-27T15:19:52+12:00",
"status": "CLOSED",
"customer_id": null,
"register_id": "02dcd191-ae2b-11e6-f485-a54b98a1d0bc",
"user_id": "02dcd191-ae2b-11e6-f485-a54b98a2e27e",
"invoice_number": "155",
"invoice_sequence": 155,
"total_tax": "0.65217",
"total_price": "4.34783",
"note": "cash payment two line item 2 different product, same code",
"receipt_address": "",
"register_sale_products": [
{
"id": "e541c29f-8657-a3be-11e9-802e43390f65",
"product_id": "product2-id",
"price": 4.34783,
"price_set": 0,
"discount": 0,
"tax": "0.65217",
"tax_id": "02dcd191-ae2b-11e6-f485-a54b9896a941",
"quantity": "1",
"sequence": 0,
"status": "SAVED",
"attributes": [{ "name": "line_note", "value": "" }]
},
{
"id": "e541c29f-8657-a3be-11e9-802e43390f64",
"product_id": "product3-id",
"price": 4.34783,
"price_set": 0,
"discount": 0,
"tax": "0.65217",
"tax_id": "02dcd191-ae2b-11e6-f485-a54b9896a941",
"quantity": "1",
"sequence": 0,
"status": "SAVED",
"attributes": [{ "name": "line_note", "value": "" }]
}
],
"register_sale_payments": [
{
"id": "e541c29f-8657-a3be-11e9-802e4abf2d57",
"register_id": "02dcd191-ae2b-11e6-f485-a54b98a1d0bc",
"payment_type_id": 1,
"retailer_payment_type_id": "02dcd191-ae2b-11e6-f485-a54b98a273f4",
"payment_date": "2019-05-27T15:19:52+12:00",
"amount": "10",
"currency": "NZD"
}
]
},
{
"id": "e541c29f-8657-a3be-11e9-802e3ba64743",
"short_code": "a5bx9l",
"sale_date": "2019-05-27T15:19:52+12:00",
"status": "CLOSED",
"customer_id": null,
"register_id": "02dcd191-ae2b-11e6-f485-a54b98a1d0bc",
"user_id": "02dcd191-ae2b-11e6-f485-a54b98a2e27e",
"invoice_number": "155",
"invoice_sequence": 155,
"total_tax": "0.65217",
"total_price": "4.34783",
"note": "cash payment two line item same product, same code",
"receipt_address": "",
"register_sale_products": [
{
"id": "e541c29f-8657-a3be-11e9-802e43390f65",
"product_id": "product2-id",
"price": 4.34783,
"price_set": 0,
"discount": 0,
"tax": "0.65217",
"tax_id": "02dcd191-ae2b-11e6-f485-a54b9896a941",
"quantity": "1",
"sequence": 0,
"status": "SAVED",
"attributes": [{ "name": "line_note", "value": "" }]
},
{
"id": "e541c29f-8657-a3be-11e9-802e43390f64",
"product_id": "product2-id",
"price": 4.34783,
"price_set": 0,
"discount": 0,
"tax": "0.65217",
"tax_id": "02dcd191-ae2b-11e6-f485-a54b9896a941",
"quantity": "1",
"sequence": 0,
"status": "SAVED",
"attributes": [{ "name": "line_note", "value": "" }]
}
],
"register_sale_payments": [
{
"id": "e541c29f-8657-a3be-11e9-802e4abf2d57",
"register_id": "02dcd191-ae2b-11e6-f485-a54b98a1d0bc",
"payment_type_id": 1,
"retailer_payment_type_id": "02dcd191-ae2b-11e6-f485-a54b98a273f4",
"payment_date": "2019-05-27T15:19:52+12:00",
"amount": "10",
"currency": "NZD"
}
]
}
]
"""
# Create your models here.
product_to_account_code = {
"product1-id": 1,
"product2-id": 2,
"product3-id": 2,
"0242ac12-0002-11e9-e945-7d27ba9c3903": 1,
"0242ac12-0002-11e9-e945-7d27bac8894e": 2
}
sales = json.loads(sale_json)
def generate_sale_invoice(sales):
payload = {
"Line": [],
"CustomerRef": {
"value": "1"
}
}
sale_invoice = Invoice()
account_code_tax_code_mapping = {}
overall_total_amount = 0
# iterate over the sales to aggregate the line items by account code and tax code
for sale in sales:
for line_item in sale["register_sale_products"]:
account_code = product_to_account_code[line_item["product_id"]]
tax_code = line_item["tax_id"]
key = (account_code, tax_code)
price = float(line_item["price"])
quantity = float(line_item["quantity"])
tax = float(line_item["tax"])
total_amount = (price + tax) * quantity
if key in account_code_tax_code_mapping:
account_code_tax_code_mapping[key] += total_amount
else:
account_code_tax_code_mapping[key] = total_amount
overall_total_amount += total_amount
# sale_invoice.TotalAmt = overall_total_amount
payload["TotalAmt"] = overall_total_amount
for key, value in account_code_tax_code_mapping.items():
account_code = key[0]
tax_code = key[1]
external_tax_code = "NON"
if tax_code == "02dcd191-ae2b-11e6-f485-a54b9896a941" or tax_code == "00000000-0002-0002-0002-000000000003":
external_tax_code = "TAX"
# line_item = SalesItemLine()
# line_item.Amount = value
# line_item.Description = "Account code: {} and Tax code: {}".format(account_code, tax_code)
line_item = {
"DetailType": "SalesItemLineDetail",
"Amount": value,
"Description": "Account code: {} and Tax code: {}".format(account_code, external_tax_code),
"SalesItemLineDetail": {
"ItemRef": {
},
"TaxCodeRef": {
}
}
}
line_item["SalesItemLineDetail"]["ItemRef"]["value"] = account_code
line_item["SalesItemLineDetail"]["TaxCodeRef"]["value"] = external_tax_code
payload["Line"].append(line_item)
return payload
def generate_sale_payment(sales, external_invoice_id):
payload = {
"TotalAmt": 0,
"CustomerRef": {
"value": "1"
},
"Line": [
{
"Amount": 0,
"LinkedTxn": [
{
"TxnId": external_invoice_id,
"TxnType": "Invoice"
}
]
}
]
}
# assume only 1 type of payment
total_cash_payment = 0
# iterate over the sales to aggregate the line items by account code and tax code
for sale in sales:
for payment in sale["register_sale_payments"]:
amount = float(payment["amount"])
total_cash_payment += amount
payload["TotalAmt"] = total_cash_payment
payload["Line"][0]["Amount"] = total_cash_payment
return payload
def post_sale_invoice(access_token, realm_id, sales=sales):
"""[summary]
"""
if settings.ENVIRONMENT == 'production':
base_url = settings.QBO_BASE_PROD
else:
base_url = settings.QBO_BASE_SANDBOX
route = '/v3/company/{0}/invoice'.format(realm_id)
auth_header = 'Bearer {0}'.format(access_token)
headers = {
'Authorization': auth_header,
'Accept': 'application/json'
}
payload = generate_sale_invoice(sales)
return requests.post('{0}{1}'.format(base_url, route), json=payload, headers=headers)
def post_invoice_payment(access_token, realm_id, external_invoice_id, sales=sales):
"""[summary]
"""
if settings.ENVIRONMENT == 'production':
base_url = settings.QBO_BASE_PROD
else:
base_url = settings.QBO_BASE_SANDBOX
route = '/v3/company/{0}/payment'.format(realm_id)
auth_header = 'Bearer {0}'.format(access_token)
headers = {
'Authorization': auth_header,
'Accept': 'application/json'
}
payload = generate_sale_payment(sales, external_invoice_id)
print(payload)
return requests.post('{0}{1}'.format(base_url, route), json=payload, headers=headers)
def retrieve_vend_sales():
path = "https://weggieincl.dev.vendhq.works/api/register_sales"
auth_header = "Bearer {0}".format("<KEY>")
headers = {
'Authorization': auth_header,
'Accept': 'application/json'
}
return requests.get(path, headers=headers) | en | 0.353513 | [summary] [ { "id": "e541c29f-8657-a3be-11e9-802e3ba64743", "short_code": "a5bx9l", "sale_date": "2019-05-27T15:19:52+12:00", "status": "CLOSED", "customer_id": null, "register_id": "02dcd191-ae2b-11e6-f485-a54b98a1d0bc", "user_id": "02dcd191-ae2b-11e6-f485-a54b98a2e27e", "invoice_number": "155", "invoice_sequence": 155, "total_tax": "0.65217", "total_price": "4.34783", "note": "cash payment single product", "receipt_address": "", "register_sale_products": [ { "id": "e541c29f-8657-a3be-11e9-802e43390f65", "product_id": "product1-id", "price": 4.34783, "price_set": 0, "discount": 0, "tax": "0.65217", "tax_id": "02dcd191-ae2b-11e6-f485-a54b9896a941", "quantity": "1", "sequence": 0, "status": "SAVED", "attributes": [{ "name": "line_note", "value": "" }] } ], "register_sale_payments": [ { "id": "e541c29f-8657-a3be-11e9-802e4abf2d57", "register_id": "02dcd191-ae2b-11e6-f485-a54b98a1d0bc", "payment_type_id": 1, "retailer_payment_type_id": "02dcd191-ae2b-11e6-f485-a54b98a273f4", "payment_date": "2019-05-27T15:19:52+12:00", "amount": "5", "currency": "NZD" } ] }, { "id": "e541c29f-8657-a3be-11e9-802e3ba64743", "short_code": "a5bx9l", "sale_date": "2019-05-27T15:19:52+12:00", "status": "CLOSED", "customer_id": null, "register_id": "02dcd191-ae2b-11e6-f485-a54b98a1d0bc", "user_id": "02dcd191-ae2b-11e6-f485-a54b98a2e27e", "invoice_number": "155", "invoice_sequence": 155, "total_tax": "0.65217", "total_price": "4.34783", "note": "cash payment two line item, different product and different account code", "receipt_address": "", "register_sale_products": [ { "id": "e541c29f-8657-a3be-11e9-802e43390f65", "product_id": "product1-id", "price": 4.34783, "price_set": 0, "discount": 0, "tax": "0.65217", "tax_id": "02dcd191-ae2b-11e6-f485-a54b9896a941", "quantity": "1", "sequence": 0, "status": "SAVED", "attributes": [{ "name": "line_note", "value": "" }] }, { "id": "e541c29f-8657-a3be-11e9-802e43390f64", "product_id": "product2-id", "price": 4.34783, "price_set": 0, "discount": 0, "tax": "0.65217", "tax_id": "02dcd191-ae2b-11e6-f485-a54b9896a941", "quantity": "1", "sequence": 0, "status": "SAVED", "attributes": [{ "name": "line_note", "value": "" }] } ], "register_sale_payments": [ { "id": "e541c29f-8657-a3be-11e9-802e4abf2d57", "register_id": "02dcd191-ae2b-11e6-f485-a54b98a1d0bc", "payment_type_id": 1, "retailer_payment_type_id": "02dcd191-ae2b-11e6-f485-a54b98a273f4", "payment_date": "2019-05-27T15:19:52+12:00", "amount": "10", "currency": "NZD" } ] }, { "id": "e541c29f-8657-a3be-11e9-802e3ba64743", "short_code": "a5bx9l", "sale_date": "2019-05-27T15:19:52+12:00", "status": "CLOSED", "customer_id": null, "register_id": "02dcd191-ae2b-11e6-f485-a54b98a1d0bc", "user_id": "02dcd191-ae2b-11e6-f485-a54b98a2e27e", "invoice_number": "155", "invoice_sequence": 155, "total_tax": "0.65217", "total_price": "4.34783", "note": "cash payment two line item 2 different product, same code", "receipt_address": "", "register_sale_products": [ { "id": "e541c29f-8657-a3be-11e9-802e43390f65", "product_id": "product2-id", "price": 4.34783, "price_set": 0, "discount": 0, "tax": "0.65217", "tax_id": "02dcd191-ae2b-11e6-f485-a54b9896a941", "quantity": "1", "sequence": 0, "status": "SAVED", "attributes": [{ "name": "line_note", "value": "" }] }, { "id": "e541c29f-8657-a3be-11e9-802e43390f64", "product_id": "product3-id", "price": 4.34783, "price_set": 0, "discount": 0, "tax": "0.65217", "tax_id": "02dcd191-ae2b-11e6-f485-a54b9896a941", "quantity": "1", "sequence": 0, "status": "SAVED", "attributes": [{ "name": "line_note", "value": "" }] } ], "register_sale_payments": [ { "id": "e541c29f-8657-a3be-11e9-802e4abf2d57", "register_id": "02dcd191-ae2b-11e6-f485-a54b98a1d0bc", "payment_type_id": 1, "retailer_payment_type_id": "02dcd191-ae2b-11e6-f485-a54b98a273f4", "payment_date": "2019-05-27T15:19:52+12:00", "amount": "10", "currency": "NZD" } ] }, { "id": "e541c29f-8657-a3be-11e9-802e3ba64743", "short_code": "a5bx9l", "sale_date": "2019-05-27T15:19:52+12:00", "status": "CLOSED", "customer_id": null, "register_id": "02dcd191-ae2b-11e6-f485-a54b98a1d0bc", "user_id": "02dcd191-ae2b-11e6-f485-a54b98a2e27e", "invoice_number": "155", "invoice_sequence": 155, "total_tax": "0.65217", "total_price": "4.34783", "note": "cash payment two line item same product, same code", "receipt_address": "", "register_sale_products": [ { "id": "e541c29f-8657-a3be-11e9-802e43390f65", "product_id": "product2-id", "price": 4.34783, "price_set": 0, "discount": 0, "tax": "0.65217", "tax_id": "02dcd191-ae2b-11e6-f485-a54b9896a941", "quantity": "1", "sequence": 0, "status": "SAVED", "attributes": [{ "name": "line_note", "value": "" }] }, { "id": "e541c29f-8657-a3be-11e9-802e43390f64", "product_id": "product2-id", "price": 4.34783, "price_set": 0, "discount": 0, "tax": "0.65217", "tax_id": "02dcd191-ae2b-11e6-f485-a54b9896a941", "quantity": "1", "sequence": 0, "status": "SAVED", "attributes": [{ "name": "line_note", "value": "" }] } ], "register_sale_payments": [ { "id": "e541c29f-8657-a3be-11e9-802e4abf2d57", "register_id": "02dcd191-ae2b-11e6-f485-a54b98a1d0bc", "payment_type_id": 1, "retailer_payment_type_id": "02dcd191-ae2b-11e6-f485-a54b98a273f4", "payment_date": "2019-05-27T15:19:52+12:00", "amount": "10", "currency": "NZD" } ] } ] # Create your models here. # iterate over the sales to aggregate the line items by account code and tax code # sale_invoice.TotalAmt = overall_total_amount # line_item = SalesItemLine() # line_item.Amount = value # line_item.Description = "Account code: {} and Tax code: {}".format(account_code, tax_code) # assume only 1 type of payment # iterate over the sales to aggregate the line items by account code and tax code [summary] [summary] | 1.910544 | 2 |
pretrain.py | CJWBW/BLIP | 473 | 6613023 | '''
* Copyright (c) 2022, salesforce.com, inc.
* All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
* For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause
* By <NAME>
'''
import argparse
import os
import ruamel_yaml as yaml
import numpy as np
import random
import time
import datetime
import json
from pathlib import Path
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import torch.distributed as dist
from torch.utils.data import DataLoader
from models.blip_pretrain import blip_pretrain
import utils
from utils import warmup_lr_schedule, step_lr_schedule
from data import create_dataset, create_sampler, create_loader
def train(model, data_loader, optimizer, epoch, device, config):
# train
model.train()
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=50, fmt='{value:.6f}'))
metric_logger.add_meter('loss_ita', utils.SmoothedValue(window_size=50, fmt='{value:.4f}'))
metric_logger.add_meter('loss_itm', utils.SmoothedValue(window_size=50, fmt='{value:.4f}'))
metric_logger.add_meter('loss_lm', utils.SmoothedValue(window_size=50, fmt='{value:.4f}'))
header = 'Train Epoch: [{}]'.format(epoch)
print_freq = 50
if config['laion_path']:
data_loader.dataset.reload_laion(epoch)
data_loader.sampler.set_epoch(epoch)
for i, (image, caption) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
if epoch==0:
warmup_lr_schedule(optimizer, i, config['warmup_steps'], config['warmup_lr'], config['init_lr'])
optimizer.zero_grad()
image = image.to(device,non_blocking=True)
# ramp up alpha in the first 2 epochs
alpha = config['alpha']*min(1,(epoch*len(data_loader)+i)/(2*len(data_loader)))
loss_ita, loss_itm, loss_lm = model(image, caption, alpha = alpha)
loss = loss_ita + loss_itm + loss_lm
loss.backward()
optimizer.step()
metric_logger.update(loss_ita=loss_ita.item())
metric_logger.update(loss_itm=loss_itm.item())
metric_logger.update(loss_lm=loss_lm.item())
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger.global_avg())
return {k: "{:.3f}".format(meter.global_avg) for k, meter in metric_logger.meters.items()}
def main(args, config):
utils.init_distributed_mode(args)
device = torch.device(args.device)
# fix the seed for reproducibility
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
cudnn.benchmark = True
#### Dataset ####
print("Creating dataset")
datasets = [create_dataset('pretrain', config, min_scale=0.2)]
print('number of training samples: %d'%len(datasets[0]))
num_tasks = utils.get_world_size()
global_rank = utils.get_rank()
samplers = create_sampler(datasets, [True], num_tasks, global_rank)
data_loader = create_loader(datasets,samplers,batch_size=[config['batch_size']], num_workers=[4], is_trains=[True], collate_fns=[None])[0]
#### Model ####
print("Creating model")
model = blip_pretrain(image_size=config['image_size'], vit=config['vit'], vit_grad_ckpt=config['vit_grad_ckpt'],
vit_ckpt_layer=config['vit_ckpt_layer'], queue_size=config['queue_size'])
model = model.to(device)
optimizer = torch.optim.AdamW(params=model.parameters(), lr=config['init_lr'], weight_decay=config['weight_decay'])
start_epoch = 0
if args.checkpoint:
checkpoint = torch.load(args.checkpoint, map_location='cpu')
state_dict = checkpoint['model']
model.load_state_dict(state_dict)
optimizer.load_state_dict(checkpoint['optimizer'])
start_epoch = checkpoint['epoch']+1
print('resume checkpoint from %s'%args.checkpoint)
model_without_ddp = model
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
model_without_ddp = model.module
print("Start training")
start_time = time.time()
for epoch in range(start_epoch, config['max_epoch']):
step_lr_schedule(optimizer, epoch, config['init_lr'], config['min_lr'], config['lr_decay_rate'])
train_stats = train(model, data_loader, optimizer, epoch, device, config)
if utils.is_main_process():
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
'epoch': epoch,
}
save_obj = {
'model': model_without_ddp.state_dict(),
'optimizer': optimizer.state_dict(),
'config': config,
'epoch': epoch,
}
torch.save(save_obj, os.path.join(args.output_dir, 'checkpoint_%02d.pth'%epoch))
with open(os.path.join(args.output_dir, "log.txt"),"a") as f:
f.write(json.dumps(log_stats) + "\n")
dist.barrier()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config', default='./configs/pretrain.yaml')
parser.add_argument('--output_dir', default='output/Pretrain')
parser.add_argument('--checkpoint', default='')
parser.add_argument('--evaluate', action='store_true')
parser.add_argument('--device', default='cuda')
parser.add_argument('--seed', default=42, type=int)
parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
parser.add_argument('--distributed', default=True, type=bool)
args = parser.parse_args()
config = yaml.load(open(args.config, 'r'), Loader=yaml.Loader)
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
yaml.dump(config, open(os.path.join(args.output_dir, 'config.yaml'), 'w'))
main(args, config) | '''
* Copyright (c) 2022, salesforce.com, inc.
* All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
* For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause
* By <NAME>
'''
import argparse
import os
import ruamel_yaml as yaml
import numpy as np
import random
import time
import datetime
import json
from pathlib import Path
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import torch.distributed as dist
from torch.utils.data import DataLoader
from models.blip_pretrain import blip_pretrain
import utils
from utils import warmup_lr_schedule, step_lr_schedule
from data import create_dataset, create_sampler, create_loader
def train(model, data_loader, optimizer, epoch, device, config):
# train
model.train()
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=50, fmt='{value:.6f}'))
metric_logger.add_meter('loss_ita', utils.SmoothedValue(window_size=50, fmt='{value:.4f}'))
metric_logger.add_meter('loss_itm', utils.SmoothedValue(window_size=50, fmt='{value:.4f}'))
metric_logger.add_meter('loss_lm', utils.SmoothedValue(window_size=50, fmt='{value:.4f}'))
header = 'Train Epoch: [{}]'.format(epoch)
print_freq = 50
if config['laion_path']:
data_loader.dataset.reload_laion(epoch)
data_loader.sampler.set_epoch(epoch)
for i, (image, caption) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
if epoch==0:
warmup_lr_schedule(optimizer, i, config['warmup_steps'], config['warmup_lr'], config['init_lr'])
optimizer.zero_grad()
image = image.to(device,non_blocking=True)
# ramp up alpha in the first 2 epochs
alpha = config['alpha']*min(1,(epoch*len(data_loader)+i)/(2*len(data_loader)))
loss_ita, loss_itm, loss_lm = model(image, caption, alpha = alpha)
loss = loss_ita + loss_itm + loss_lm
loss.backward()
optimizer.step()
metric_logger.update(loss_ita=loss_ita.item())
metric_logger.update(loss_itm=loss_itm.item())
metric_logger.update(loss_lm=loss_lm.item())
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger.global_avg())
return {k: "{:.3f}".format(meter.global_avg) for k, meter in metric_logger.meters.items()}
def main(args, config):
utils.init_distributed_mode(args)
device = torch.device(args.device)
# fix the seed for reproducibility
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
cudnn.benchmark = True
#### Dataset ####
print("Creating dataset")
datasets = [create_dataset('pretrain', config, min_scale=0.2)]
print('number of training samples: %d'%len(datasets[0]))
num_tasks = utils.get_world_size()
global_rank = utils.get_rank()
samplers = create_sampler(datasets, [True], num_tasks, global_rank)
data_loader = create_loader(datasets,samplers,batch_size=[config['batch_size']], num_workers=[4], is_trains=[True], collate_fns=[None])[0]
#### Model ####
print("Creating model")
model = blip_pretrain(image_size=config['image_size'], vit=config['vit'], vit_grad_ckpt=config['vit_grad_ckpt'],
vit_ckpt_layer=config['vit_ckpt_layer'], queue_size=config['queue_size'])
model = model.to(device)
optimizer = torch.optim.AdamW(params=model.parameters(), lr=config['init_lr'], weight_decay=config['weight_decay'])
start_epoch = 0
if args.checkpoint:
checkpoint = torch.load(args.checkpoint, map_location='cpu')
state_dict = checkpoint['model']
model.load_state_dict(state_dict)
optimizer.load_state_dict(checkpoint['optimizer'])
start_epoch = checkpoint['epoch']+1
print('resume checkpoint from %s'%args.checkpoint)
model_without_ddp = model
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
model_without_ddp = model.module
print("Start training")
start_time = time.time()
for epoch in range(start_epoch, config['max_epoch']):
step_lr_schedule(optimizer, epoch, config['init_lr'], config['min_lr'], config['lr_decay_rate'])
train_stats = train(model, data_loader, optimizer, epoch, device, config)
if utils.is_main_process():
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
'epoch': epoch,
}
save_obj = {
'model': model_without_ddp.state_dict(),
'optimizer': optimizer.state_dict(),
'config': config,
'epoch': epoch,
}
torch.save(save_obj, os.path.join(args.output_dir, 'checkpoint_%02d.pth'%epoch))
with open(os.path.join(args.output_dir, "log.txt"),"a") as f:
f.write(json.dumps(log_stats) + "\n")
dist.barrier()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config', default='./configs/pretrain.yaml')
parser.add_argument('--output_dir', default='output/Pretrain')
parser.add_argument('--checkpoint', default='')
parser.add_argument('--evaluate', action='store_true')
parser.add_argument('--device', default='cuda')
parser.add_argument('--seed', default=42, type=int)
parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
parser.add_argument('--distributed', default=True, type=bool)
args = parser.parse_args()
config = yaml.load(open(args.config, 'r'), Loader=yaml.Loader)
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
yaml.dump(config, open(os.path.join(args.output_dir, 'config.yaml'), 'w'))
main(args, config) | en | 0.615712 | * Copyright (c) 2022, salesforce.com, inc. * All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause * By <NAME> # train # ramp up alpha in the first 2 epochs # gather the stats from all processes # fix the seed for reproducibility #### Dataset #### #### Model #### | 1.769273 | 2 |
allmychanges/tests/tasks.py | AllMyChanges/allmychanges.com | 46 | 6613024 | import mock
from allmychanges.tasks import update_changelog_task
from allmychanges.models import Changelog
from .utils import refresh, eq_, dt_eq
from django.utils import timezone
def test_update_changelog_task_stops_future_changelog_updates_in_case_of_error():
changelog = Changelog.objects.create(
namespace='python', name='pip', source='test')
with mock.patch('allmychanges.tasks.update_preview_or_changelog') as func:
func.side_effect = RuntimeError('Blah minor')
update_changelog_task(changelog.id)
changelog = refresh(changelog)
dt_eq(changelog.paused_at, timezone.now())
eq_(1, changelog.issues.count())
issue = changelog.issues.all()[0]
eq_('auto-paused', issue.type)
eq_('Paused because of error: "Blah minor"', issue.comment)
| import mock
from allmychanges.tasks import update_changelog_task
from allmychanges.models import Changelog
from .utils import refresh, eq_, dt_eq
from django.utils import timezone
def test_update_changelog_task_stops_future_changelog_updates_in_case_of_error():
changelog = Changelog.objects.create(
namespace='python', name='pip', source='test')
with mock.patch('allmychanges.tasks.update_preview_or_changelog') as func:
func.side_effect = RuntimeError('Blah minor')
update_changelog_task(changelog.id)
changelog = refresh(changelog)
dt_eq(changelog.paused_at, timezone.now())
eq_(1, changelog.issues.count())
issue = changelog.issues.all()[0]
eq_('auto-paused', issue.type)
eq_('Paused because of error: "Blah minor"', issue.comment)
| none | 1 | 2.089849 | 2 | |
populate_db.py | SerhatTeker/django-rest-filtering-tutorial | 0 | 6613025 | <reponame>SerhatTeker/django-rest-filtering-tutorial
# type: ignore
# flake8: noqa
import django
import logging
import os
import sys
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "src.settings")
sys.path.append(os.path.join(os.path.realpath(os.path.dirname(__file__)), "..", ".."))
django.setup()
from src.articles.models import Article
from src.regions.models import Region
from src.authors.models import Author
from src.users.models import User
from django.core import management
# Migrate
management.call_command("migrate", no_input=True)
# Seed
# users
user_1 = User.objects.create_user(username="User1", email="<EMAIL>", name="UserName1", password="<PASSWORD>")
user_2 = User.objects.create_user(username="User2", email="<EMAIL>", name="UserName2", password="<PASSWORD>")
user_3 = User.objects.create_user(username="User3", email="<EMAIL>", name="UserName3", password="<PASSWORD>")
# authors
author_1 = Author.objects.create(user=user_1, first_name="Name1", last_name="Surname1")
author_2 = Author.objects.create(first_name="Name2", last_name="Surname2")
author_3 = Author.objects.create(user=user_3, first_name="Name3", last_name="Surname3")
# regions
region_de = Region.objects.create(code="DE", name="Germany")
region_uk = Region.objects.create(code="UK", name="United Kingdom")
# Articles
# articles with author without regions
Article.objects.create(title="Fake Article1", content="Fake Content1", author=author_1)
Article.objects.create(title="Fake Article2", content="Fake Content2", author=author_2)
Article.objects.create(title="Fake Article3", content="Fake Content3", author=author_3)
# articles with regions
Article.objects.create(title="Fake Article4", content="Fake Content5").regions.set(
[
Region.objects.create(code="CA", name="Canadia"),
Region.objects.create(code="AU", name="Australia"),
]
)
Article.objects.create(title="Fake Article5", content="Fake Content5").regions.set(
[
Region.objects.create(code="IT", name="Italy"),
Region.objects.create(code="CH", name="Switzerland"),
]
)
# articles with author and regions
for index in range(6, 11):
Article.objects.create(
title=f"Fake Article{index}", content=f"Fake Content{index}", author=author_3
).regions.set([region_de.id, region_uk.id])
logging.info("Database populated with dummy data.")
| # type: ignore
# flake8: noqa
import django
import logging
import os
import sys
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "src.settings")
sys.path.append(os.path.join(os.path.realpath(os.path.dirname(__file__)), "..", ".."))
django.setup()
from src.articles.models import Article
from src.regions.models import Region
from src.authors.models import Author
from src.users.models import User
from django.core import management
# Migrate
management.call_command("migrate", no_input=True)
# Seed
# users
user_1 = User.objects.create_user(username="User1", email="<EMAIL>", name="UserName1", password="<PASSWORD>")
user_2 = User.objects.create_user(username="User2", email="<EMAIL>", name="UserName2", password="<PASSWORD>")
user_3 = User.objects.create_user(username="User3", email="<EMAIL>", name="UserName3", password="<PASSWORD>")
# authors
author_1 = Author.objects.create(user=user_1, first_name="Name1", last_name="Surname1")
author_2 = Author.objects.create(first_name="Name2", last_name="Surname2")
author_3 = Author.objects.create(user=user_3, first_name="Name3", last_name="Surname3")
# regions
region_de = Region.objects.create(code="DE", name="Germany")
region_uk = Region.objects.create(code="UK", name="United Kingdom")
# Articles
# articles with author without regions
Article.objects.create(title="Fake Article1", content="Fake Content1", author=author_1)
Article.objects.create(title="Fake Article2", content="Fake Content2", author=author_2)
Article.objects.create(title="Fake Article3", content="Fake Content3", author=author_3)
# articles with regions
Article.objects.create(title="Fake Article4", content="Fake Content5").regions.set(
[
Region.objects.create(code="CA", name="Canadia"),
Region.objects.create(code="AU", name="Australia"),
]
)
Article.objects.create(title="Fake Article5", content="Fake Content5").regions.set(
[
Region.objects.create(code="IT", name="Italy"),
Region.objects.create(code="CH", name="Switzerland"),
]
)
# articles with author and regions
for index in range(6, 11):
Article.objects.create(
title=f"Fake Article{index}", content=f"Fake Content{index}", author=author_3
).regions.set([region_de.id, region_uk.id])
logging.info("Database populated with dummy data.") | en | 0.749907 | # type: ignore # flake8: noqa # Migrate # Seed # users # authors # regions # Articles # articles with author without regions # articles with regions # articles with author and regions | 2.025143 | 2 |
src/structProfile.py | lehecht/k-mer-Dash | 0 | 6613026 | from src.profile import Profile
class StructProfile(Profile):
template_string = None # element-string
dotbracket_string = None # template in dotbracket notation
alphabet = None # alphabet of used elements
def __init__(self, profile, name, alpha):
super().__init__(profile, name)
self.alphabet = alpha
def setTemplate(self, template):
self.template_string = template
def setDotbracket(self, db):
self.dotbracket_string = db
def setAlphabet(self, a):
self.alphabet = a
def getTemplate(self):
return self.template_string
def getDotbracket(self):
return self.dotbracket_string
def getAlphabet(self):
return self.alphabet
| from src.profile import Profile
class StructProfile(Profile):
template_string = None # element-string
dotbracket_string = None # template in dotbracket notation
alphabet = None # alphabet of used elements
def __init__(self, profile, name, alpha):
super().__init__(profile, name)
self.alphabet = alpha
def setTemplate(self, template):
self.template_string = template
def setDotbracket(self, db):
self.dotbracket_string = db
def setAlphabet(self, a):
self.alphabet = a
def getTemplate(self):
return self.template_string
def getDotbracket(self):
return self.dotbracket_string
def getAlphabet(self):
return self.alphabet
| en | 0.479377 | # element-string # template in dotbracket notation # alphabet of used elements | 2.625857 | 3 |
app/src/user_tree.py | abhinandshibu/StopUnderThinking | 1 | 6613027 | <filename>app/src/user_tree.py
from app.models import User, Journal, Section, Entry
from sqlalchemy import desc
def user_tree(user_id):
# quite hard to get your head round because of the layers of iteration
# triple nested for loop
tree = []
user = User.query.filter_by(id=user_id).first()
tree.append([user.id, user.firstname])
journals = Journal.query.filter_by(user_id=user_id).order_by(desc(Journal.last_mod)).all()
for journal_no in range(len(journals)):
current_journal = journals[journal_no]
temp_journal = [[current_journal.id, current_journal.name, current_journal.last_mod]]
sections = Section.query.filter_by(journal_id=current_journal.id).order_by(desc(Section.last_mod)).all()
for section_no in range(len(sections)):
current_section = sections[section_no]
temp_section = [[current_section.id, current_section.name, current_section.last_mod]]
entries = Entry.query.filter_by(section_id=current_section.id).order_by(desc(Entry.last_mod)).all()
for entry_no in range(len(entries)):
current_entry = entries[entry_no]
temp_section.append([current_entry.id, current_entry.name, current_entry.last_mod])
temp_journal.append(temp_section)
tree.append(temp_journal)
return tree
| <filename>app/src/user_tree.py
from app.models import User, Journal, Section, Entry
from sqlalchemy import desc
def user_tree(user_id):
# quite hard to get your head round because of the layers of iteration
# triple nested for loop
tree = []
user = User.query.filter_by(id=user_id).first()
tree.append([user.id, user.firstname])
journals = Journal.query.filter_by(user_id=user_id).order_by(desc(Journal.last_mod)).all()
for journal_no in range(len(journals)):
current_journal = journals[journal_no]
temp_journal = [[current_journal.id, current_journal.name, current_journal.last_mod]]
sections = Section.query.filter_by(journal_id=current_journal.id).order_by(desc(Section.last_mod)).all()
for section_no in range(len(sections)):
current_section = sections[section_no]
temp_section = [[current_section.id, current_section.name, current_section.last_mod]]
entries = Entry.query.filter_by(section_id=current_section.id).order_by(desc(Entry.last_mod)).all()
for entry_no in range(len(entries)):
current_entry = entries[entry_no]
temp_section.append([current_entry.id, current_entry.name, current_entry.last_mod])
temp_journal.append(temp_section)
tree.append(temp_journal)
return tree
| en | 0.920114 | # quite hard to get your head round because of the layers of iteration # triple nested for loop | 2.939597 | 3 |
tests/datasets/test_gaussian_mixture.py | rflperry/mvlearn | 92 | 6613028 | import pytest
from mvlearn.datasets import make_gaussian_mixture
from numpy.testing import assert_equal
import numpy as np
n_samples = 100
centers = [[-1, 0], [1, 0]]
covariances = [[[1, 0], [0, 1]], [[1, 0], [1, 2]]]
class_probs = [0.3, 0.7]
@pytest.mark.parametrize("centers, covariances, class_probs", [
(centers, covariances, class_probs),
(centers[0], covariances[0], None)]
)
def test_formats(centers, covariances, class_probs):
Xs, y, latents = make_gaussian_mixture(
n_samples, centers, covariances, class_probs=class_probs,
return_latents=True)
assert_equal(n_samples, len(latents))
assert_equal(len(covariances[0]), latents.shape[1])
assert_equal(Xs[0], latents)
if class_probs is not None:
for i, p in enumerate(class_probs):
assert_equal(int(p * n_samples), list(y).count(i))
@pytest.mark.parametrize(
"transform", ["linear", "poly", "sin", lambda x: 2 * x + 1])
def test_transforms(transform):
Xs, y, latents = make_gaussian_mixture(
n_samples, centers, covariances, class_probs=class_probs,
return_latents=True, transform=transform, noise_dims=2)
assert_equal(len(Xs), 2)
assert_equal(Xs[0].shape, (n_samples, 4))
assert_equal(Xs[1].shape, (n_samples, 4))
def test_bad_class_probs():
with pytest.raises(ValueError) as e:
make_gaussian_mixture(
n_samples, centers, covariances, class_probs=[0.3, 0.4]
)
assert str(e.value) == "elements of `class_probs` must sum to 1"
@pytest.mark.parametrize(
"transform", [list(), None])
def test_bad_transform_value(transform):
with pytest.raises(TypeError):
make_gaussian_mixture(
n_samples, centers, covariances, transform=transform)
@pytest.mark.parametrize(
"transform", ["error"])
def test_bad_transform_type(transform):
with pytest.raises(ValueError):
make_gaussian_mixture(
n_samples, centers, covariances, transform=transform)
def test_bad_shapes():
with pytest.raises(ValueError) as e:
make_gaussian_mixture(n_samples, None, covariances)
assert str(e.value) == "centers is of the incorrect shape"
# Wrong Length
with pytest.raises(ValueError) as e:
make_gaussian_mixture(n_samples, [1], covariances)
assert str(e.value) == \
"The first dimensions of 2D centers and 3D covariances must be equal"
# Inconsistent dimension
with pytest.raises(ValueError) as e:
make_gaussian_mixture(
n_samples, centers, [np.eye(2), np.eye(3)],
class_probs=class_probs
)
assert str(e.value) == "covariance matrix is of the incorrect shape"
# Wrong uni dimensions
with pytest.raises(ValueError) as e:
make_gaussian_mixture(n_samples, [1, 0], [1, 0])
assert str(e.value) == "covariance matrix is of the incorrect shape"
# Wrong centerslti sizes
with pytest.raises(ValueError) as e:
make_gaussian_mixture(
n_samples, centers, covariances, class_probs=[0.3, 0.1, 0.6]
)
assert str(e.value) == \
"centers, covariances, and class_probs must be of equal length"
@pytest.mark.parametrize("noise", [None, 0, 1])
def test_random_state(noise):
Xs_1, y_1 = make_gaussian_mixture(
10, centers, covariances, class_probs=class_probs,
transform='poly', random_state=42, noise=noise
)
Xs_2, y_2 = make_gaussian_mixture(
10, centers, covariances, class_probs=class_probs,
transform='poly', random_state=42, noise=noise
)
for view1, view2 in zip(Xs_1, Xs_2):
assert np.allclose(view1, view2)
assert np.allclose(y_1, y_2)
def test_noise_dims_not_same_but_reproducible():
Xs_1, _ = make_gaussian_mixture(
20, centers, covariances, class_probs=class_probs, random_state=42,
transform="poly", noise_dims=2
)
view1_noise, view2_noise = Xs_1[0][:, -2:], Xs_1[1][:, -2:]
assert not np.allclose(view1_noise, view2_noise)
Xs_2, _ = make_gaussian_mixture(
20, centers, covariances, class_probs=class_probs, random_state=42,
transform="poly", noise_dims=2
)
view1_noise2, view2_noise2 = Xs_2[0][:, -2:], Xs_2[1][:, -2:]
assert np.allclose(view1_noise, view1_noise2)
assert np.allclose(view2_noise, view2_noise2)
@pytest.mark.parametrize(
"transform", ["linear", "poly", "sin", lambda x: 2 * x + 1])
def test_signal_noise_not_same_but_reproducible(transform):
Xs_1, _ = make_gaussian_mixture(
20, centers, covariances, class_probs=class_probs, random_state=42,
transform=transform, noise=1
)
view1_noise, view2_noise = Xs_1[0], Xs_1[1]
Xs_2, _ = make_gaussian_mixture(
20, centers, covariances, class_probs=class_probs, random_state=42,
transform=transform, noise=1
)
view1_noise2, view2_noise2 = Xs_2[0], Xs_2[1]
# Noise is reproducible and signal is the same
assert np.allclose(view1_noise, view1_noise2)
assert np.allclose(view2_noise, view2_noise2)
Xs_3, _ = make_gaussian_mixture(
20, centers, covariances, class_probs=class_probs, random_state=42,
transform=transform
)
view1_noise3, view2_noise3 = Xs_3[0], Xs_3[1]
# Noise varies view1, but keeps view 2 unaffects (i.e. the latents)
assert not np.allclose(view1_noise, view1_noise3)
assert np.allclose(view2_noise, view2_noise3)
def test_shuffle():
np.random.seed(42)
Xs_1, y_1 = make_gaussian_mixture(
20,
centers,
covariances,
class_probs=class_probs,
transform='poly',
random_state=42,
shuffle=True,
shuffle_random_state=42,
)
np.random.seed(30)
Xs_2, y_2 = make_gaussian_mixture(
20,
centers,
covariances,
class_probs=class_probs,
transform='poly',
random_state=42,
shuffle=True,
shuffle_random_state=10,
)
for view1, view2 in zip(Xs_1, Xs_2):
assert not np.allclose(view1, view2)
assert not np.allclose(y_1, y_2)
def test_shuffle_with_random_state():
Xs_1, y_1 = make_gaussian_mixture(
20,
centers,
covariances,
class_probs=class_probs,
transform='poly',
random_state=42,
shuffle=True,
shuffle_random_state=42,
)
Xs_2, y_2 = make_gaussian_mixture(
20,
centers,
covariances,
class_probs=class_probs,
transform='poly',
random_state=42,
shuffle=True,
shuffle_random_state=42,
)
for view1, view2 in zip(Xs_1, Xs_2):
assert np.allclose(view1, view2)
assert np.allclose(y_1, y_2)
| import pytest
from mvlearn.datasets import make_gaussian_mixture
from numpy.testing import assert_equal
import numpy as np
n_samples = 100
centers = [[-1, 0], [1, 0]]
covariances = [[[1, 0], [0, 1]], [[1, 0], [1, 2]]]
class_probs = [0.3, 0.7]
@pytest.mark.parametrize("centers, covariances, class_probs", [
(centers, covariances, class_probs),
(centers[0], covariances[0], None)]
)
def test_formats(centers, covariances, class_probs):
Xs, y, latents = make_gaussian_mixture(
n_samples, centers, covariances, class_probs=class_probs,
return_latents=True)
assert_equal(n_samples, len(latents))
assert_equal(len(covariances[0]), latents.shape[1])
assert_equal(Xs[0], latents)
if class_probs is not None:
for i, p in enumerate(class_probs):
assert_equal(int(p * n_samples), list(y).count(i))
@pytest.mark.parametrize(
"transform", ["linear", "poly", "sin", lambda x: 2 * x + 1])
def test_transforms(transform):
Xs, y, latents = make_gaussian_mixture(
n_samples, centers, covariances, class_probs=class_probs,
return_latents=True, transform=transform, noise_dims=2)
assert_equal(len(Xs), 2)
assert_equal(Xs[0].shape, (n_samples, 4))
assert_equal(Xs[1].shape, (n_samples, 4))
def test_bad_class_probs():
with pytest.raises(ValueError) as e:
make_gaussian_mixture(
n_samples, centers, covariances, class_probs=[0.3, 0.4]
)
assert str(e.value) == "elements of `class_probs` must sum to 1"
@pytest.mark.parametrize(
"transform", [list(), None])
def test_bad_transform_value(transform):
with pytest.raises(TypeError):
make_gaussian_mixture(
n_samples, centers, covariances, transform=transform)
@pytest.mark.parametrize(
"transform", ["error"])
def test_bad_transform_type(transform):
with pytest.raises(ValueError):
make_gaussian_mixture(
n_samples, centers, covariances, transform=transform)
def test_bad_shapes():
with pytest.raises(ValueError) as e:
make_gaussian_mixture(n_samples, None, covariances)
assert str(e.value) == "centers is of the incorrect shape"
# Wrong Length
with pytest.raises(ValueError) as e:
make_gaussian_mixture(n_samples, [1], covariances)
assert str(e.value) == \
"The first dimensions of 2D centers and 3D covariances must be equal"
# Inconsistent dimension
with pytest.raises(ValueError) as e:
make_gaussian_mixture(
n_samples, centers, [np.eye(2), np.eye(3)],
class_probs=class_probs
)
assert str(e.value) == "covariance matrix is of the incorrect shape"
# Wrong uni dimensions
with pytest.raises(ValueError) as e:
make_gaussian_mixture(n_samples, [1, 0], [1, 0])
assert str(e.value) == "covariance matrix is of the incorrect shape"
# Wrong centerslti sizes
with pytest.raises(ValueError) as e:
make_gaussian_mixture(
n_samples, centers, covariances, class_probs=[0.3, 0.1, 0.6]
)
assert str(e.value) == \
"centers, covariances, and class_probs must be of equal length"
@pytest.mark.parametrize("noise", [None, 0, 1])
def test_random_state(noise):
Xs_1, y_1 = make_gaussian_mixture(
10, centers, covariances, class_probs=class_probs,
transform='poly', random_state=42, noise=noise
)
Xs_2, y_2 = make_gaussian_mixture(
10, centers, covariances, class_probs=class_probs,
transform='poly', random_state=42, noise=noise
)
for view1, view2 in zip(Xs_1, Xs_2):
assert np.allclose(view1, view2)
assert np.allclose(y_1, y_2)
def test_noise_dims_not_same_but_reproducible():
Xs_1, _ = make_gaussian_mixture(
20, centers, covariances, class_probs=class_probs, random_state=42,
transform="poly", noise_dims=2
)
view1_noise, view2_noise = Xs_1[0][:, -2:], Xs_1[1][:, -2:]
assert not np.allclose(view1_noise, view2_noise)
Xs_2, _ = make_gaussian_mixture(
20, centers, covariances, class_probs=class_probs, random_state=42,
transform="poly", noise_dims=2
)
view1_noise2, view2_noise2 = Xs_2[0][:, -2:], Xs_2[1][:, -2:]
assert np.allclose(view1_noise, view1_noise2)
assert np.allclose(view2_noise, view2_noise2)
@pytest.mark.parametrize(
"transform", ["linear", "poly", "sin", lambda x: 2 * x + 1])
def test_signal_noise_not_same_but_reproducible(transform):
Xs_1, _ = make_gaussian_mixture(
20, centers, covariances, class_probs=class_probs, random_state=42,
transform=transform, noise=1
)
view1_noise, view2_noise = Xs_1[0], Xs_1[1]
Xs_2, _ = make_gaussian_mixture(
20, centers, covariances, class_probs=class_probs, random_state=42,
transform=transform, noise=1
)
view1_noise2, view2_noise2 = Xs_2[0], Xs_2[1]
# Noise is reproducible and signal is the same
assert np.allclose(view1_noise, view1_noise2)
assert np.allclose(view2_noise, view2_noise2)
Xs_3, _ = make_gaussian_mixture(
20, centers, covariances, class_probs=class_probs, random_state=42,
transform=transform
)
view1_noise3, view2_noise3 = Xs_3[0], Xs_3[1]
# Noise varies view1, but keeps view 2 unaffects (i.e. the latents)
assert not np.allclose(view1_noise, view1_noise3)
assert np.allclose(view2_noise, view2_noise3)
def test_shuffle():
np.random.seed(42)
Xs_1, y_1 = make_gaussian_mixture(
20,
centers,
covariances,
class_probs=class_probs,
transform='poly',
random_state=42,
shuffle=True,
shuffle_random_state=42,
)
np.random.seed(30)
Xs_2, y_2 = make_gaussian_mixture(
20,
centers,
covariances,
class_probs=class_probs,
transform='poly',
random_state=42,
shuffle=True,
shuffle_random_state=10,
)
for view1, view2 in zip(Xs_1, Xs_2):
assert not np.allclose(view1, view2)
assert not np.allclose(y_1, y_2)
def test_shuffle_with_random_state():
Xs_1, y_1 = make_gaussian_mixture(
20,
centers,
covariances,
class_probs=class_probs,
transform='poly',
random_state=42,
shuffle=True,
shuffle_random_state=42,
)
Xs_2, y_2 = make_gaussian_mixture(
20,
centers,
covariances,
class_probs=class_probs,
transform='poly',
random_state=42,
shuffle=True,
shuffle_random_state=42,
)
for view1, view2 in zip(Xs_1, Xs_2):
assert np.allclose(view1, view2)
assert np.allclose(y_1, y_2)
| en | 0.735084 | # Wrong Length # Inconsistent dimension # Wrong uni dimensions # Wrong centerslti sizes # Noise is reproducible and signal is the same # Noise varies view1, but keeps view 2 unaffects (i.e. the latents) | 2.488763 | 2 |
redis_speed_regression_utils/coordinator.py | filipecosta90/redis-speed-regression-utils | 1 | 6613029 | <filename>redis_speed_regression_utils/coordinator.py<gh_stars>1-10
import argparse
import logging
import random
import shutil
import subprocess
import tempfile
import git
import redis
import semantic_version
# logging settings
logging.basicConfig(
format="%(asctime)s %(levelname)-4s %(message)s",
level=logging.INFO,
datefmt="%Y-%m-%d %H:%M:%S",
)
def main():
parser = argparse.ArgumentParser(
description="tbd",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("--redis_mgt_host", type=str, default="localhost")
parser.add_argument("--redis_mgt_port", type=int, default=6379)
parser.add_argument("--redis_mgt_pass", type=str, default=None)
parser.add_argument("--redis_repo", type=str, default=None)
parser.add_argument("--trigger-version-tags", type=bool, default=True)
parser.add_argument("--trigger-unstable-commits", type=bool, default=False)
parser.add_argument("--dry-run", type=bool, default=False)
args = parser.parse_args()
redisMgtClient = None
stream = "speed-regression-commits"
redisDirPath = args.redis_repo
cleanUp = False
if redisDirPath is None:
cleanUp = True
redisDirPath = tempfile.mkdtemp()
logging.info("Retrieving redis repo from remote into {}.".format(redisDirPath))
cmd = "git clone https://github.com/redis/redis {}\n".format(redisDirPath)
process = subprocess.Popen('/bin/bash', stdin=subprocess.PIPE, stdout=subprocess.PIPE)
out, err = process.communicate(cmd.encode())
else:
logging.info(
"Using the following redis repo to retrieve versions info {}. No need to fetch remote data.".format(
redisDirPath))
repo = git.Repo(redisDirPath)
officialVersions = []
Commits = []
if args.trigger_version_tags is True:
logging.info(
"Using version tags to trigger speed tests.")
for tag in repo.tags:
if semantic_version.validate(tag.name) and "-" not in tag.name:
# if semantic_version.Version(tag.name).major >= 6:
officialVersions.append(tag)
logging.info("Will trigger {} distinct version tests by version: {}.".format(len(officialVersions), ",".join(
[x.name for x in officialVersions])))
if args.trigger_unstable_commits is True:
logging.info(
"Using version tags to trigger speed tests.")
for commit in repo.iter_commits():
Commits.append(commit)
logging.info(
"Will trigger {} distinct unstable branch commit tests.".format(len(Commits) - len(officialVersions)))
if args.dry_run is False:
redisMgtClient = redis.StrictRedis(host=args.redis_mgt_host, port=args.redis_mgt_port,
password=args.redis_mgt_pass,
decode_responses=True)
for rep in range(0, 5):
random.shuffle(officialVersions)
for tag in officialVersions:
redisMgtClient.xadd(stream, {'commit': tag.commit.hexsha, 'committed-date': tag.commit.committed_date,
'tag': tag.name,
'benchmark-tool': "redis-benchmark",
'setup': "oss-standalone"})
for rep in range(0, 5):
random.shuffle(officialVersions)
for commit in Commits:
redisMgtClient.xadd(stream,
{'commit': commit.hexsha, 'committed-date': commit.committed_date, 'tag': "",
'benchmark-tool': "redis-benchmark",
'setup': "oss-standalone"})
else:
logging.info("Skipping actual work trigger ( dry-run )")
if cleanUp is True:
logging.info("Removing temporary redis dir {}.".format(redisDirPath))
shutil.rmtree(redisDirPath)
| <filename>redis_speed_regression_utils/coordinator.py<gh_stars>1-10
import argparse
import logging
import random
import shutil
import subprocess
import tempfile
import git
import redis
import semantic_version
# logging settings
logging.basicConfig(
format="%(asctime)s %(levelname)-4s %(message)s",
level=logging.INFO,
datefmt="%Y-%m-%d %H:%M:%S",
)
def main():
parser = argparse.ArgumentParser(
description="tbd",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("--redis_mgt_host", type=str, default="localhost")
parser.add_argument("--redis_mgt_port", type=int, default=6379)
parser.add_argument("--redis_mgt_pass", type=str, default=None)
parser.add_argument("--redis_repo", type=str, default=None)
parser.add_argument("--trigger-version-tags", type=bool, default=True)
parser.add_argument("--trigger-unstable-commits", type=bool, default=False)
parser.add_argument("--dry-run", type=bool, default=False)
args = parser.parse_args()
redisMgtClient = None
stream = "speed-regression-commits"
redisDirPath = args.redis_repo
cleanUp = False
if redisDirPath is None:
cleanUp = True
redisDirPath = tempfile.mkdtemp()
logging.info("Retrieving redis repo from remote into {}.".format(redisDirPath))
cmd = "git clone https://github.com/redis/redis {}\n".format(redisDirPath)
process = subprocess.Popen('/bin/bash', stdin=subprocess.PIPE, stdout=subprocess.PIPE)
out, err = process.communicate(cmd.encode())
else:
logging.info(
"Using the following redis repo to retrieve versions info {}. No need to fetch remote data.".format(
redisDirPath))
repo = git.Repo(redisDirPath)
officialVersions = []
Commits = []
if args.trigger_version_tags is True:
logging.info(
"Using version tags to trigger speed tests.")
for tag in repo.tags:
if semantic_version.validate(tag.name) and "-" not in tag.name:
# if semantic_version.Version(tag.name).major >= 6:
officialVersions.append(tag)
logging.info("Will trigger {} distinct version tests by version: {}.".format(len(officialVersions), ",".join(
[x.name for x in officialVersions])))
if args.trigger_unstable_commits is True:
logging.info(
"Using version tags to trigger speed tests.")
for commit in repo.iter_commits():
Commits.append(commit)
logging.info(
"Will trigger {} distinct unstable branch commit tests.".format(len(Commits) - len(officialVersions)))
if args.dry_run is False:
redisMgtClient = redis.StrictRedis(host=args.redis_mgt_host, port=args.redis_mgt_port,
password=args.redis_mgt_pass,
decode_responses=True)
for rep in range(0, 5):
random.shuffle(officialVersions)
for tag in officialVersions:
redisMgtClient.xadd(stream, {'commit': tag.commit.hexsha, 'committed-date': tag.commit.committed_date,
'tag': tag.name,
'benchmark-tool': "redis-benchmark",
'setup': "oss-standalone"})
for rep in range(0, 5):
random.shuffle(officialVersions)
for commit in Commits:
redisMgtClient.xadd(stream,
{'commit': commit.hexsha, 'committed-date': commit.committed_date, 'tag': "",
'benchmark-tool': "redis-benchmark",
'setup': "oss-standalone"})
else:
logging.info("Skipping actual work trigger ( dry-run )")
if cleanUp is True:
logging.info("Removing temporary redis dir {}.".format(redisDirPath))
shutil.rmtree(redisDirPath)
| en | 0.358362 | # logging settings # if semantic_version.Version(tag.name).major >= 6: | 2.042601 | 2 |
setup.py | Pyco7/django-ajax-views | 6 | 6613030 | from setuptools import setup, find_packages
from ajaxviews import __version__
from codecs import open
from os import path
root_path = path.abspath(path.dirname(__file__))
with open(path.join(root_path, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='django-ajax-views',
version=__version__,
description='Django class-based views extension working together with JS library require-ajax-views.',
long_description=long_description,
url='https://github.com/Pyco7/django-ajax-views',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
# 'Programming Language :: Python :: 2',
# 'Programming Language :: Python :: 2.6',
# 'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
# 'Programming Language :: Python :: 3.3',
# 'Programming Language :: Python :: 3.4',
# 'Programming Language :: Python :: 3.5',
# 'Programming Language :: Python :: 3 :: Only',
'Programming Language :: JavaScript',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules',
],
keywords='django class-based views javascript coffeescript typescript ajax requirejs',
packages=find_packages(),
package_data={
'ajaxviews': [
'templates/ajaxviews/*.html',
'static/require-ajax-views/dist/ajaxviews.js',
'static/require-ajax-views/src/style.styl',
],
},
install_requires=[
'Django>=1.9',
'django-require',
'django-js-reverse',
'django-crispy-forms',
'python-dateutil',
],
extras_require={
# 'test': ['coverage'],
'optional': [
'django-guardian',
'django-extra-views',
'django-autocomplete-light==2.3.3',
],
'docs': [
'Sphinx',
'sphinx-rtd-theme',
],
},
)
| from setuptools import setup, find_packages
from ajaxviews import __version__
from codecs import open
from os import path
root_path = path.abspath(path.dirname(__file__))
with open(path.join(root_path, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='django-ajax-views',
version=__version__,
description='Django class-based views extension working together with JS library require-ajax-views.',
long_description=long_description,
url='https://github.com/Pyco7/django-ajax-views',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
# 'Programming Language :: Python :: 2',
# 'Programming Language :: Python :: 2.6',
# 'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
# 'Programming Language :: Python :: 3.3',
# 'Programming Language :: Python :: 3.4',
# 'Programming Language :: Python :: 3.5',
# 'Programming Language :: Python :: 3 :: Only',
'Programming Language :: JavaScript',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules',
],
keywords='django class-based views javascript coffeescript typescript ajax requirejs',
packages=find_packages(),
package_data={
'ajaxviews': [
'templates/ajaxviews/*.html',
'static/require-ajax-views/dist/ajaxviews.js',
'static/require-ajax-views/src/style.styl',
],
},
install_requires=[
'Django>=1.9',
'django-require',
'django-js-reverse',
'django-crispy-forms',
'python-dateutil',
],
extras_require={
# 'test': ['coverage'],
'optional': [
'django-guardian',
'django-extra-views',
'django-autocomplete-light==2.3.3',
],
'docs': [
'Sphinx',
'sphinx-rtd-theme',
],
},
)
| en | 0.400244 | # 'Programming Language :: Python :: 2', # 'Programming Language :: Python :: 2.6', # 'Programming Language :: Python :: 2.7', # 'Programming Language :: Python :: 3.3', # 'Programming Language :: Python :: 3.4', # 'Programming Language :: Python :: 3.5', # 'Programming Language :: Python :: 3 :: Only', # 'test': ['coverage'], | 1.399787 | 1 |
functional_tests/test_actionlist.py | XeryusTC/projman | 0 | 6613031 | # -*- coding:utf-8 -*-
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import unittest
from .base import FunctionalTestCase
from . import pages
from . import remote
import projects.factories
class ActionPageTests(FunctionalTestCase):
def test_can_add_items_to_action_list(self):
# Alice visits the website
self.create_and_login_user('alice', '<EMAIL>', 'alice')
# In the sidebar she finds an action list link and she clicks it
page = pages.projects.BaseProjectPage(self.browser)
page.action_link(page.sidebar).click()
# On the new page there is a text box where she is invited to enter
# a new action item
self.assertEqual(self.browser.title, 'Actions')
action_page = pages.projects.ActionlistPage(self.browser)
self.assertEqual(action_page.add_box.get_attribute('placeholder'),
'What do you need to do?')
# She enters something in the text box and hits enter
action_page.add_box.send_keys('Test the action list')
action_page.add_box.send_keys(Keys.RETURN)
# The page reloads and she sees that the item is in a list on the page
self.assertIn('Test the action list',
action_page.list_text(action_page.thelist))
# She decides to add a second item to the list
action_page.add_box.send_keys('R<PASSWORD> comet')
action_page.add_button.click()
# The page reloads again and now both items are on the page
self.assertIn('Test the action list',
action_page.list_text(action_page.thelist))
self.assertIn('Ride the comet',
action_page.list_text(action_page.thelist))
def test_action_list_items_are_not_visible_for_other_users(self):
# Alice visits the website and creates an item for the action list
self.create_and_login_user('alice', '<EMAIL>', 'alice')
self.create_action('alice', 'Pet the cat')
self.create_action('alice', 'Repaint the bed')
page = pages.projects.BaseProjectPage(self.browser)
page.action_link(page.sidebar).click()
action_page = pages.projects.ActionlistPage(self.browser)
# The items are both on the page
self.assertIn('Pet the cat',
action_page.list_text(action_page.thelist))
self.assertIn('Repaint the bed',
action_page.list_text(action_page.thelist))
# Bob is another user who goes to the action list page on the site
self.restart_browser()
page = pages.projects.BaseProjectPage(self.browser)
action_page = pages.projects.ActionlistPage(self.browser)
self.create_and_login_user('bob', '<EMAIL>', 'bob')
page.action_link(page.sidebar).click()
# He cannot see Alice's items
self.assertNotIn('Pet the cat',
action_page.list_text(action_page.thelist))
self.assertNotIn('Repaint the bed',
action_page.list_text(action_page.thelist))
# Bob enters some items of his own
action_page.add_box.send_keys('Eat some sushi')
action_page.add_box.send_keys(Keys.ENTER)
# There is still no sign of Alice's list, but Bob can see the item
# that he just added
self.assertNotIn('Pet the cat',
action_page.list_text(action_page.thelist))
self.assertNotIn('Repaint the bed',
action_page.list_text(action_page.thelist))
self.assertIn('Eat some sushi',
action_page.list_text(action_page.thelist))
def test_cannot_add_empty_items_to_action_list(self):
# Alice is a user who goes to the action list page
self.create_and_login_user('alice', '<EMAIL>', 'alice')
page = pages.projects.BaseProjectPage(self.browser)
page.action_link(page.sidebar).click()
# Alice tries to add an empty item
action_page = pages.projects.ActionlistPage(self.browser)
action_page.add_box.send_keys('\n')
# She sees an error on the page
self.assertIn('You cannot add empty items',
[error.text for error in action_page.error_lists])
def test_cannot_add_duplicate_items_to_action_list(self):
# Bob is a user who goes to the action list page
self.create_and_login_user('bob', '<EMAIL>', 'bob')
page = pages.projects.BaseProjectPage(self.browser)
page.action_link(page.sidebar).click()
# Bob adds an item
action_page = pages.projects.ActionlistPage(self.browser)
action_page.add_box.send_keys('Test duplication\n')
# He tries to add an item again but gets an error
action_page.add_box.send_keys('Test duplication\n')
self.assertIn("You already planned to do this",
[error.text for error in action_page.error_lists])
def test_can_complete_action_item(self):
# Alice is a user who logs in and goes to the action list page
self.create_and_login_user('alice', '<EMAIL>', 'alice')
self.create_action('alice', 'Check this action')
page = pages.projects.BaseProjectPage(self.browser)
page.action_link(page.sidebar).click()
# There is an action on the page (created earlier)
action_page = pages.projects.ActionlistPage(self.browser)
self.assertIn('Check this action',
action_page.list_text(action_page.thelist))
# She moves her mouse over the text and sees that it gets crossed out
item = action_page.get_list_rows(action_page.thelist)[0]
self.assertEqual(item['text'].value_of_css_property('text-decoration'),
'none')
chain = webdriver.ActionChains(self.browser)
chain.move_to_element(item['text'])
chain.perform()
self.assertEqual(item['item'].value_of_css_property('text-decoration'),
'line-through')
# She also notices that her curser indicates that she can click it
self.assertEqual(item['text'].value_of_css_property('cursor'), 'pointer')
# When she clicks it the page reloads and the action is "checked"
item['text'].click()
self.assertIn('Check this action',
action_page.list_text(action_page.checked_list))
def test_can_undo_completed_action_item(self):
# Alice is a user who logs in and goes to the action list page
self.create_and_login_user('alice', '<EMAIL>', 'alice')
self.create_action('alice', 'Uncomplete this action')
page = pages.projects.BaseProjectPage(self.browser)
page.action_link(page.sidebar).click()
# She adds an item to the action page and completes it
action_page = pages.projects.ActionlistPage(self.browser)
action_page.get_list_rows(action_page.thelist)[0]['text'].click()
# The item is in the completed list
self.assertIn('Uncomplete this action',
action_page.list_text(action_page.checked_list))
# She clicks the item in the complete list
action_page.get_list_rows(action_page.checked_list)[0]['text'].click()
# The item is back in the incomplete list
self.assertIn('Uncomplete this action',
action_page.list_text(action_page.thelist))
def test_can_delete_action_item(self):
# Alice is a user who logs in and goes to the action list page
self.create_and_login_user('alice', '<EMAIL>', 'alice')
self.create_action('alice', 'Create actions')
self.create_action('alice', 'Remove an action')
page = pages.projects.BaseProjectPage(self.browser)
page.action_link(page.sidebar).click()
# There are two items to the action_page
action_page = pages.projects.ActionlistPage(self.browser)
self.assertIn('Create actions',
action_page.list_text(action_page.thelist))
self.assertIn('Remove an action',
action_page.list_text(action_page.thelist))
# She wants to remove the last item that she has added, so she
# looks it up in the list and removes it
actions = action_page.get_list_rows(action_page.thelist)
for idx, elems in actions.items():
if elems['text'].text == 'Remove an action':
self.assertEqual('Delete',
elems['delete'].get_attribute('title'))
elems['delete'].click()
break
# She ends up on a new page that asks her if she wants to confirm
# to delete the item, she first checks whether the item is correct
self.assertEqual(self.browser.title, 'Delete action')
confirm_page = pages.projects.ActionDeletePage(self.browser)
self.assertIn('Remove an action', confirm_page.content.text)
# She clicks the confirm button
confirm_page.confirm.click()
# She is returned to the action list page, which doesn't have the
# item anymore, but the other one is still there
self.assertIn('Create actions',
action_page.list_text(action_page.thelist))
self.assertNotIn('Remove an action',
action_page.list_text(action_page.thelist))
def test_can_delete_completed_action_items(self):
# Alice is a user who logs in and goes to the action list page
self.create_and_login_user('alice', '<EMAIL>', 'alice')
self.create_action('alice', 'Complete action')
self.create_action('alice', 'Remove completed action')
page = pages.projects.BaseProjectPage(self.browser)
page.action_link(page.sidebar).click()
action_page = pages.projects.ActionlistPage(self.browser)
# Make sure the actions have been added
self.assertIn('Complete action',
action_page.list_text(action_page.thelist))
self.assertIn('Remove completed action',
action_page.list_text(action_page.thelist))
# Alice goes to complete the first action she's added
actions = action_page.get_list_rows(action_page.thelist)
for idx, elems in actions.items():
if elems['text'].text == 'Complete action':
elems['text'].click()
break
# The item is now in the completed list next to a delete button,
# she clicks it
actions = action_page.get_list_rows(action_page.checked_list)
for idx, elems in actions.items():
if elems['text'].text == 'Complete action':
self.assertEqual('Delete',
elems['delete'].get_attribute('title'))
elems['delete'].click()
break
# She ends up on a confirmation page which has the text of the
# item and a confirmation button on it, which she clicks
self.assertEqual(self.browser.title, 'Delete action')
confirm_page = pages.projects.ActionDeletePage(self.browser)
self.assertIn('Complete action', confirm_page.content.text)
confirm_page.confirm.click()
# She is returned to the action list page, which doesn't have the
# item in either list anymore
self.assertIn('Remove completed action',
action_page.list_text(action_page.thelist))
self.assertNotIn('Complete action',
action_page.list_text(action_page.thelist))
self.assertNotIn('complete action',
action_page.list_text(action_page.checked_list))
def test_can_change_inlist_items_into_action_item(self):
# Alice is a user who logs in and goes to the inlist page
self.create_and_login_user('alice', '<EMAIL>', 'alice')
page = pages.projects.BaseProjectPage(self.browser)
page.inlist_link(page.sidebar).click()
# She adds an item to the inlist page
inlist_page = pages.projects.InlistPage(self.browser)
inlist_page.add_box.send_keys('Create action\n')
self.assertIn('Create action',
[item.text for item in inlist_page.thelist])
# There is a button next to it that lets her convert it to an
# action, she clicks it
item = inlist_page.listrows[0]
self.assertEqual('Convert to action',
inlist_page.convert_action(item).get_attribute('title'))
inlist_page.convert_action(item).click()
# She ends up on a new page where she can create the action
self.assertEqual('Convert in item to action', self.browser.title)
convert_page = pages.projects.ConvertToActionPage(self.browser)
# The text box holds the text from the inlist item
self.assertEqual(convert_page.text_box.get_attribute('value'),
'Create action')
# She enters a new text
convert_page.text_box.clear()
convert_page.text_box.send_keys('Create an action')
# She clicks the convert button, which saves the action
convert_page.convert_button.click()
# She returns to the linst page
self.assertTrue(
self.browser.current_url.endswith('/projects/inlist/'))
# When she navigates to the action page she finds the item there
page.action_link(page.sidebar).click()
action_page = pages.projects.ActionlistPage(self.browser)
self.assertIn('Create an action',
action_page.list_text(action_page.thelist))
def test_action_list_and_inlist_are_separate_lists(self):
# Alice is a user who goes to the inlist page
self.create_and_login_user('alice', '<EMAIL>', 'alice')
page = pages.projects.BaseProjectPage(self.browser)
page.inlist_link(page.sidebar).click()
# She adds an item on the inlist page
inlist_page = pages.projects.InlistPage(self.browser)
inlist_page.add_box.send_keys('Inlist item\n')
self.assertIn('Inlist item',
[item.text for item in inlist_page.thelist])
# She then goes to the action list page
page.action_link(page.sidebar).click()
action_page = pages.projects.ActionlistPage(self.browser)
# The previously added item is not on this page
self.assertNotIn('Inlist item',
action_page.list_text(action_page.thelist))
# She adds an item on the action page
action_page.add_box.send_keys('Action list item\n')
self.assertIn('Action list item',
action_page.list_text(action_page.thelist))
# She navigates back to the inlist page and sees that the last
# added item is not on that list, but the first item is
page.inlist_link(page.sidebar).click()
self.assertNotIn('Action list item',
[item.text for item in inlist_page.thelist])
self.assertIn('Inlist item',
[item.text for item in inlist_page.thelist])
def test_can_logout_from_action_page(self):
# Alice is a user who goes to the actionlist page
self.create_and_login_user('alice', '<EMAIL>', 'alice')
page = pages.projects.BaseProjectPage(self.browser)
page.action_link(page.sidebar).click()
# Alice can click a log out button in the menu
page.menu.click()
page.logout.click()
# She lands on the logout confirmation page
confirm_page = pages.accounts.LogoutConfirmPage(self.browser)
confirm_page.confirm.click()
# She is now logged out and on the landing page again
self.assertTrue(self.browser.current_url.endswith('/en/'))
def test_converting_other_persons_inlist_item_to_action_gives_404(self):
# Alice is a user who logs in and goes to the inlist page
self.create_and_login_user('alice', '<EMAIL>', 'alice')
page = pages.projects.BaseProjectPage(self.browser)
page.inlist_link(page.sidebar).click()
# She adds an item
inlist_page = pages.projects.InlistPage(self.browser)
inlist_page.add_box.send_keys('Test forbidden status\n')
# She goes to the convert page
item = inlist_page.listrows[0]
inlist_page.convert_action(item).click()
## Copy the url so Trudy can use it
self.wait_for(lambda: self.assertIn('/convert/',
self.browser.current_url))
convert_url = self.browser.current_url
# Trudy is another user who tries to mess with Alice's items
self.restart_browser()
self.create_and_login_user('trudy', '<EMAIL>', 'trudy')
# Trudy directly enters the url
self.browser.get(convert_url)
# She is greeted with a 404 Not Found error
body_text = self.browser.find_element_by_tag_name('body').text
self.assertIn('404', self.browser.title)
self.assertIn('404', body_text)
self.assertIn('Not Found', body_text)
def test_deleting_other_persons_action_item_returns_404(self):
# Alice is a user who logs in and has an action on the action list
self.create_and_login_user('alice', '<EMAIL>', 'alice')
self.create_action('alice', 'Test forbidden status')
page = pages.projects.BaseProjectPage(self.browser)
page.action_link(page.sidebar).click()
action_page = pages.projects.ActionlistPage(self.browser)
# She goes to the delete page for the action
item = action_page.get_list_rows(action_page.thelist)[0]
item['delete'].click()
## Copy the url so Trudy can use it
self.wait_for(lambda: self.assertIn('/delete/',
self.browser.current_url))
delete_url = self.browser.current_url
# Trudy is another user who tries to delete Alice's action
self.restart_browser()
self.create_and_login_user('trudy', '<EMAIL>', 'trudy')
# Trudy directly enters the url
self.browser.get(delete_url)
# She sees a 404 Not Found error
body_text = self.browser.find_element_by_tag_name('body').text
self.assertIn('404', self.browser.title)
self.assertIn('404', body_text)
self.assertIn('Not Found', body_text)
def test_cannot_delete_action_project(self):
# Alice is a user who logs in and goes to the action project
self.create_and_login_user('alice', '<EMAIL>', 'alice')
page = pages.projects.BaseProjectPage(self.browser)
page.action_link(page.sidebar).click()
# She sees that there is no delete button on the page
action_page = pages.projects.ProjectPage(self.browser)
self.assertIsNone(action_page.delete)
# Going to the delete page directly shows a 403 Forbidden error
self.browser.get(self.browser.current_url + 'delete/')
self.assertIn('403', page.body.text)
self.assertIn('Forbidden', page.body.text)
def test_cannot_edit_action_project(self):
# Alice is a user who logs in and goes to the action project
self.create_and_login_user('alice', '<EMAIL>', 'alice')
page = pages.projects.BaseProjectPage(self.browser)
page.action_link(page.sidebar).click()
# She sees that there is no edit button on the page
action_page = pages.projects.ProjectPage(self.browser)
self.assertIsNone(action_page.edit)
# Going to the edit page directly shows a 403 Forbidden error
self.browser.get(self.browser.current_url + 'edit/')
self.assertIn('403', page.body.text)
self.assertIn('Forbidden', page.body.text)
def test_action_items_can_have_a_deadline(self):
# Alice is a user who has an item on her action list
user = self.create_and_login_user('alice', '<EMAIL>', 'alice')
if self.against_staging:
remote.create_action(self.server_host, 'alice', 'Edit action')
else:
projects.factories.ActionlistItemFactory(user=user,
text='Edit action')
page = pages.projects.BaseProjectPage(self.browser)
page.action_link(page.sidebar).click()
# She sees an edit button next to the item and clicks it
project_page = pages.projects.ProjectPage(self.browser)
item = project_page.get_list_rows(project_page.thelist)[0]
self.assertIsNone(item['deadline'])
item['edit'].click()
# She ends up on an edit action page
edit_page = pages.projects.EditActionPage(self.browser)
# The edit action page allows moving of the action
self.assertIsNotNone(edit_page.select)
# There is also a field to edit a date and time for a deadline
self.assertIn('Deadline', edit_page.content.text)
# She enters a date into the field and submits the form
edit_page.deadline_date.send_keys('1970-01-01')
edit_page.deadline_time.send_keys('00:00:00')
edit_page.confirm.click()
# Alice returns to the action list page, where the item has a
# deadline on it
item = project_page.get_list_rows(project_page.thelist)[0]
self.assertEqual('Jan. 1, 1970, midnight', item['deadline'].text)
# When she adds a second item it has no deadline on it
project_page.add_box.send_keys('Write <PASSWORD>\n')
self.assertIn('Write a novel',
project_page.list_text(project_page.thelist))
actions = project_page.get_list_rows(project_page.thelist)
for idx, item in actions.items():
if item['text'].text == 'Write a novel':
break
self.assertIsNone(item['deadline'])
# Alice goes to enter a deadline for that item as well
item['edit'].click()
edit_page.deadline_date.send_keys('2000-01-01')
edit_page.deadline_time.send_keys('00:00:00\n')
# When she returns to the action list she sees that both items
# have different dates
actions = project_page.get_list_rows(project_page.thelist)
for idx, item in actions.items():
if item['text'].text == 'Edit action':
self.assertEqual('Jan. 1, 1970, midnight',
item['deadline'].text)
elif item['text'].text == 'Write a novel':
self.assertEqual('Jan. 1, 2000, midnight',
item['deadline'].text)
def test_can_change_action_item_text(self):
# Alice is a user who has an item on her action list
user = self.create_and_login_user('alice', '<EMAIL>', 'alice')
if self.against_staging:
remote.create_action(self.server_host, 'alice', 'Play games')
else:
projects.factories.ActionlistItemFactory(user=user,
text='Play games')
project_page = pages.projects.BaseProjectPage(self.browser)
project_page.action_link(project_page.sidebar).click()
# She sees an edit button next to it, which she clicks
list_page = pages.projects.ProjectPage(self.browser)
item = list_page.get_list_rows(list_page.thelist)[0]
item['edit'].click()
# She sees that there is a text entry field with the action's name
# in it
edit_page = pages.projects.EditActionPage(self.browser)
self.assertEqual(edit_page.text_box.get_attribute('value'),
'Play games')
# Alice decides to change the text and saves her changes
edit_page.text_box.clear()
edit_page.text_box.send_keys('Play some games')
edit_page.confirm.click()
# She lands on the action list page and sees that her item has changed
self.assertIn('Play some games',
list_page.list_text(list_page.thelist))
self.assertNotIn('Play games',
list_page.list_text(list_page.thelist))
def test_cannot_change_action_item_text_when_it_is_duplicate(self):
# Alice is a user with two items on her action list
user = self.create_and_login_user('alice', '<EMAIL>', 'alice')
if self.against_staging:
remote.create_action(self.server_host, 'alice', 'Save the planet')
remote.create_action(self.server_host, 'alice',
'Defeat the aliens')
else:
projects.factories.ActionlistItemFactory(user=user,
text='Save the planet')
projects.factories.ActionlistItemFactory(user=user,
text='Defeat the aliens')
project_page = pages.projects.BaseProjectPage(self.browser)
project_page.action_link(project_page.sidebar).click()
# Alice realises that aliens don't exist, so she wants to change
# one of the actions
list_page = pages.projects.ProjectPage(self.browser)
for idx, item in list_page.get_list_rows(list_page.thelist).items():
if item['text'].text == 'Defeat the aliens':
item['edit'].click()
break
# Alice changes the item's text to be the same as the other's
edit_page = pages.projects.EditActionPage(self.browser)
self.assertEqual(edit_page.text_box.get_attribute('value'),
'Defeat the aliens')
edit_page.text_box.clear()
edit_page.text_box.send_keys('Save the planet\n')
# Instead of being send to the action list page she gets an error
self.assertIn("This is already planned for that project",
[error.text for error in edit_page.errors])
self.assertEqual(len(edit_page.errors), 1)
# When she returns to the action list without saving none of the
# items has changed
project_page.action_link(project_page.sidebar).click()
self.assertIn('Save the planet',
list_page.list_text(list_page.thelist))
self.assertIn('Defeat the aliens',
list_page.list_text(list_page.thelist))
self.assertEqual(len(list_page.thelist), 2)
def test_can_order_action_items_by_name(self):
# Alice is a user with two items on her action list
user = self.create_and_login_user('alice', '<EMAIL>', 'alice')
if self.against_staging:
remote.create_action(self.server_host, 'alice', 'A item')
remote.create_action(self.server_host, 'alice', 'B item')
else:
projects.factories.ActionlistItemFactory(user=user, text='A item')
projects.factories.ActionlistItemFactory(user=user, text='B item')
project_page = pages.projects.BaseProjectPage(self.browser)
project_page.action_link(project_page.sidebar).click()
# On the page she sees a selection box that allows her to change
# the order of the items
list_page = pages.projects.ProjectPage(self.browser)
self.assertIn('Text', [o.text for o in list_page.sort_method.options])
# When she selects alphabetical and clicks 'GO' she sees the page
# reload and that the items are sorted
list_page.sort_method.select_by_value('text')
list_page.apply_sort.click()
self.assertEqual(list_page.list_text(list_page.thelist),
['A item', 'B item'])
# When she selects the descending option and clicks 'GO' the order
# of the list is reversed
list_page.sort_method.select_by_value('text')
list_page.sort_order.select_by_value('-')
list_page.apply_sort.click()
self.assertEqual(list_page.list_text(list_page.thelist),
['B item', 'A item'])
# She can also revert back to unordered by selecting the top option
list_page.sort_method.select_by_index(0)
list_page.apply_sort.click()
self.assertEqual(len(list_page.error_lists), 0)
def test_can_order_action_items_by_complete_status(self):
# Alice is a user with two items on her action list
user = self.create_and_login_user('alice', '<EMAIL>', 'alice')
self.create_action('alice', 'Item 1')
self.create_action('alice', 'Item 2')
project_page = pages.projects.BaseProjectPage(self.browser)
project_page.action_link(project_page.sidebar).click()
# On the page she first switches the complete status of one action
action_page = pages.projects.ProjectPage(self.browser)
for key, a in action_page.get_list_rows(action_page.thelist).items():
if a['text'].text == 'Item 1':
a['item'].click()
break
# When she sorts by complete status she sees the uncompleted on top
self.assertIn('Completed', [o.text for o in
action_page.sort_method.options])
action_page.sort_method.select_by_value('complete')
action_page.sort_order.select_by_value('')
action_page.apply_sort.click()
self.assertEqual(action_page.list_text(action_page.thelist),
['Item 2', 'Item 1'])
# When she sorts descending she sees the completed on top
action_page.sort_method.select_by_value('complete')
action_page.sort_order.select_by_value('-')
action_page.apply_sort.click()
self.assertEqual(action_page.list_text(action_page.thelist),
['Item 1', 'Item 2'])
def test_can_order_action_items_by_deadline(self):
# Alice is a user with items on her action list
user = self.create_and_login_user('alice', '<EMAIL>', 'alice')
self.create_action('alice', 'Item 1')
self.create_action('alice', 'Item 2')
self.create_action('alice', 'Item 3')
project_page = pages.projects.BaseProjectPage(self.browser)
project_page.action_link(project_page.sidebar).click()
# On the page she changes the deadline of the first item
action_page = pages.projects.ProjectPage(self.browser)
for k, a in action_page.get_list_rows(action_page.thelist).items():
if a['text'].text == 'Item 1':
a['edit'].click()
break
edit_page = pages.projects.EditActionPage(self.browser)
edit_page.deadline_date.send_keys('1970-01-01')
edit_page.deadline_time.send_keys('00:00:00\n')
# Next she edits the second action
action_page = pages.projects.ProjectPage(self.browser)
for k, a in action_page.get_list_rows(action_page.thelist).items():
if a['text'].text == 'Item 2':
a['edit'].click()
break
edit_page.deadline_date.send_keys('2000-01-01')
edit_page.deadline_time.send_keys('00:00:00\n')
# On the action list she can sort the items by deadline
self.assertIn('Deadline', [o.text for o in
action_page.sort_method.options])
action_page.sort_method.select_by_value('deadline')
action_page.sort_order.select_by_value('')
action_page.apply_sort.click()
actions = action_page.list_text(action_page.thelist)
self.assertTrue(actions == ['Item 3', 'Item 1', 'Item 2'] or \
actions == ['Item 1', 'Item 2', 'Item 3'], actions)
# She can also reverse the sorting
self.assertIn('Deadline', [o.text for o in
action_page.sort_method.options])
action_page.sort_method.select_by_value('deadline')
action_page.sort_order.select_by_value('-')
action_page.apply_sort.click()
actions = action_page.list_text(action_page.thelist)
self.assertTrue(actions == ['Item 3', 'Item 2', 'Item 1'] or \
actions == ['Item 2', 'Item 1', 'Item 3'], actions)
| # -*- coding:utf-8 -*-
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import unittest
from .base import FunctionalTestCase
from . import pages
from . import remote
import projects.factories
class ActionPageTests(FunctionalTestCase):
def test_can_add_items_to_action_list(self):
# Alice visits the website
self.create_and_login_user('alice', '<EMAIL>', 'alice')
# In the sidebar she finds an action list link and she clicks it
page = pages.projects.BaseProjectPage(self.browser)
page.action_link(page.sidebar).click()
# On the new page there is a text box where she is invited to enter
# a new action item
self.assertEqual(self.browser.title, 'Actions')
action_page = pages.projects.ActionlistPage(self.browser)
self.assertEqual(action_page.add_box.get_attribute('placeholder'),
'What do you need to do?')
# She enters something in the text box and hits enter
action_page.add_box.send_keys('Test the action list')
action_page.add_box.send_keys(Keys.RETURN)
# The page reloads and she sees that the item is in a list on the page
self.assertIn('Test the action list',
action_page.list_text(action_page.thelist))
# She decides to add a second item to the list
action_page.add_box.send_keys('R<PASSWORD> comet')
action_page.add_button.click()
# The page reloads again and now both items are on the page
self.assertIn('Test the action list',
action_page.list_text(action_page.thelist))
self.assertIn('Ride the comet',
action_page.list_text(action_page.thelist))
def test_action_list_items_are_not_visible_for_other_users(self):
# Alice visits the website and creates an item for the action list
self.create_and_login_user('alice', '<EMAIL>', 'alice')
self.create_action('alice', 'Pet the cat')
self.create_action('alice', 'Repaint the bed')
page = pages.projects.BaseProjectPage(self.browser)
page.action_link(page.sidebar).click()
action_page = pages.projects.ActionlistPage(self.browser)
# The items are both on the page
self.assertIn('Pet the cat',
action_page.list_text(action_page.thelist))
self.assertIn('Repaint the bed',
action_page.list_text(action_page.thelist))
# Bob is another user who goes to the action list page on the site
self.restart_browser()
page = pages.projects.BaseProjectPage(self.browser)
action_page = pages.projects.ActionlistPage(self.browser)
self.create_and_login_user('bob', '<EMAIL>', 'bob')
page.action_link(page.sidebar).click()
# He cannot see Alice's items
self.assertNotIn('Pet the cat',
action_page.list_text(action_page.thelist))
self.assertNotIn('Repaint the bed',
action_page.list_text(action_page.thelist))
# Bob enters some items of his own
action_page.add_box.send_keys('Eat some sushi')
action_page.add_box.send_keys(Keys.ENTER)
# There is still no sign of Alice's list, but Bob can see the item
# that he just added
self.assertNotIn('Pet the cat',
action_page.list_text(action_page.thelist))
self.assertNotIn('Repaint the bed',
action_page.list_text(action_page.thelist))
self.assertIn('Eat some sushi',
action_page.list_text(action_page.thelist))
def test_cannot_add_empty_items_to_action_list(self):
# Alice is a user who goes to the action list page
self.create_and_login_user('alice', '<EMAIL>', 'alice')
page = pages.projects.BaseProjectPage(self.browser)
page.action_link(page.sidebar).click()
# Alice tries to add an empty item
action_page = pages.projects.ActionlistPage(self.browser)
action_page.add_box.send_keys('\n')
# She sees an error on the page
self.assertIn('You cannot add empty items',
[error.text for error in action_page.error_lists])
def test_cannot_add_duplicate_items_to_action_list(self):
# Bob is a user who goes to the action list page
self.create_and_login_user('bob', '<EMAIL>', 'bob')
page = pages.projects.BaseProjectPage(self.browser)
page.action_link(page.sidebar).click()
# Bob adds an item
action_page = pages.projects.ActionlistPage(self.browser)
action_page.add_box.send_keys('Test duplication\n')
# He tries to add an item again but gets an error
action_page.add_box.send_keys('Test duplication\n')
self.assertIn("You already planned to do this",
[error.text for error in action_page.error_lists])
def test_can_complete_action_item(self):
# Alice is a user who logs in and goes to the action list page
self.create_and_login_user('alice', '<EMAIL>', 'alice')
self.create_action('alice', 'Check this action')
page = pages.projects.BaseProjectPage(self.browser)
page.action_link(page.sidebar).click()
# There is an action on the page (created earlier)
action_page = pages.projects.ActionlistPage(self.browser)
self.assertIn('Check this action',
action_page.list_text(action_page.thelist))
# She moves her mouse over the text and sees that it gets crossed out
item = action_page.get_list_rows(action_page.thelist)[0]
self.assertEqual(item['text'].value_of_css_property('text-decoration'),
'none')
chain = webdriver.ActionChains(self.browser)
chain.move_to_element(item['text'])
chain.perform()
self.assertEqual(item['item'].value_of_css_property('text-decoration'),
'line-through')
# She also notices that her curser indicates that she can click it
self.assertEqual(item['text'].value_of_css_property('cursor'), 'pointer')
# When she clicks it the page reloads and the action is "checked"
item['text'].click()
self.assertIn('Check this action',
action_page.list_text(action_page.checked_list))
def test_can_undo_completed_action_item(self):
# Alice is a user who logs in and goes to the action list page
self.create_and_login_user('alice', '<EMAIL>', 'alice')
self.create_action('alice', 'Uncomplete this action')
page = pages.projects.BaseProjectPage(self.browser)
page.action_link(page.sidebar).click()
# She adds an item to the action page and completes it
action_page = pages.projects.ActionlistPage(self.browser)
action_page.get_list_rows(action_page.thelist)[0]['text'].click()
# The item is in the completed list
self.assertIn('Uncomplete this action',
action_page.list_text(action_page.checked_list))
# She clicks the item in the complete list
action_page.get_list_rows(action_page.checked_list)[0]['text'].click()
# The item is back in the incomplete list
self.assertIn('Uncomplete this action',
action_page.list_text(action_page.thelist))
def test_can_delete_action_item(self):
# Alice is a user who logs in and goes to the action list page
self.create_and_login_user('alice', '<EMAIL>', 'alice')
self.create_action('alice', 'Create actions')
self.create_action('alice', 'Remove an action')
page = pages.projects.BaseProjectPage(self.browser)
page.action_link(page.sidebar).click()
# There are two items to the action_page
action_page = pages.projects.ActionlistPage(self.browser)
self.assertIn('Create actions',
action_page.list_text(action_page.thelist))
self.assertIn('Remove an action',
action_page.list_text(action_page.thelist))
# She wants to remove the last item that she has added, so she
# looks it up in the list and removes it
actions = action_page.get_list_rows(action_page.thelist)
for idx, elems in actions.items():
if elems['text'].text == 'Remove an action':
self.assertEqual('Delete',
elems['delete'].get_attribute('title'))
elems['delete'].click()
break
# She ends up on a new page that asks her if she wants to confirm
# to delete the item, she first checks whether the item is correct
self.assertEqual(self.browser.title, 'Delete action')
confirm_page = pages.projects.ActionDeletePage(self.browser)
self.assertIn('Remove an action', confirm_page.content.text)
# She clicks the confirm button
confirm_page.confirm.click()
# She is returned to the action list page, which doesn't have the
# item anymore, but the other one is still there
self.assertIn('Create actions',
action_page.list_text(action_page.thelist))
self.assertNotIn('Remove an action',
action_page.list_text(action_page.thelist))
def test_can_delete_completed_action_items(self):
# Alice is a user who logs in and goes to the action list page
self.create_and_login_user('alice', '<EMAIL>', 'alice')
self.create_action('alice', 'Complete action')
self.create_action('alice', 'Remove completed action')
page = pages.projects.BaseProjectPage(self.browser)
page.action_link(page.sidebar).click()
action_page = pages.projects.ActionlistPage(self.browser)
# Make sure the actions have been added
self.assertIn('Complete action',
action_page.list_text(action_page.thelist))
self.assertIn('Remove completed action',
action_page.list_text(action_page.thelist))
# Alice goes to complete the first action she's added
actions = action_page.get_list_rows(action_page.thelist)
for idx, elems in actions.items():
if elems['text'].text == 'Complete action':
elems['text'].click()
break
# The item is now in the completed list next to a delete button,
# she clicks it
actions = action_page.get_list_rows(action_page.checked_list)
for idx, elems in actions.items():
if elems['text'].text == 'Complete action':
self.assertEqual('Delete',
elems['delete'].get_attribute('title'))
elems['delete'].click()
break
# She ends up on a confirmation page which has the text of the
# item and a confirmation button on it, which she clicks
self.assertEqual(self.browser.title, 'Delete action')
confirm_page = pages.projects.ActionDeletePage(self.browser)
self.assertIn('Complete action', confirm_page.content.text)
confirm_page.confirm.click()
# She is returned to the action list page, which doesn't have the
# item in either list anymore
self.assertIn('Remove completed action',
action_page.list_text(action_page.thelist))
self.assertNotIn('Complete action',
action_page.list_text(action_page.thelist))
self.assertNotIn('complete action',
action_page.list_text(action_page.checked_list))
def test_can_change_inlist_items_into_action_item(self):
# Alice is a user who logs in and goes to the inlist page
self.create_and_login_user('alice', '<EMAIL>', 'alice')
page = pages.projects.BaseProjectPage(self.browser)
page.inlist_link(page.sidebar).click()
# She adds an item to the inlist page
inlist_page = pages.projects.InlistPage(self.browser)
inlist_page.add_box.send_keys('Create action\n')
self.assertIn('Create action',
[item.text for item in inlist_page.thelist])
# There is a button next to it that lets her convert it to an
# action, she clicks it
item = inlist_page.listrows[0]
self.assertEqual('Convert to action',
inlist_page.convert_action(item).get_attribute('title'))
inlist_page.convert_action(item).click()
# She ends up on a new page where she can create the action
self.assertEqual('Convert in item to action', self.browser.title)
convert_page = pages.projects.ConvertToActionPage(self.browser)
# The text box holds the text from the inlist item
self.assertEqual(convert_page.text_box.get_attribute('value'),
'Create action')
# She enters a new text
convert_page.text_box.clear()
convert_page.text_box.send_keys('Create an action')
# She clicks the convert button, which saves the action
convert_page.convert_button.click()
# She returns to the linst page
self.assertTrue(
self.browser.current_url.endswith('/projects/inlist/'))
# When she navigates to the action page she finds the item there
page.action_link(page.sidebar).click()
action_page = pages.projects.ActionlistPage(self.browser)
self.assertIn('Create an action',
action_page.list_text(action_page.thelist))
def test_action_list_and_inlist_are_separate_lists(self):
# Alice is a user who goes to the inlist page
self.create_and_login_user('alice', '<EMAIL>', 'alice')
page = pages.projects.BaseProjectPage(self.browser)
page.inlist_link(page.sidebar).click()
# She adds an item on the inlist page
inlist_page = pages.projects.InlistPage(self.browser)
inlist_page.add_box.send_keys('Inlist item\n')
self.assertIn('Inlist item',
[item.text for item in inlist_page.thelist])
# She then goes to the action list page
page.action_link(page.sidebar).click()
action_page = pages.projects.ActionlistPage(self.browser)
# The previously added item is not on this page
self.assertNotIn('Inlist item',
action_page.list_text(action_page.thelist))
# She adds an item on the action page
action_page.add_box.send_keys('Action list item\n')
self.assertIn('Action list item',
action_page.list_text(action_page.thelist))
# She navigates back to the inlist page and sees that the last
# added item is not on that list, but the first item is
page.inlist_link(page.sidebar).click()
self.assertNotIn('Action list item',
[item.text for item in inlist_page.thelist])
self.assertIn('Inlist item',
[item.text for item in inlist_page.thelist])
def test_can_logout_from_action_page(self):
# Alice is a user who goes to the actionlist page
self.create_and_login_user('alice', '<EMAIL>', 'alice')
page = pages.projects.BaseProjectPage(self.browser)
page.action_link(page.sidebar).click()
# Alice can click a log out button in the menu
page.menu.click()
page.logout.click()
# She lands on the logout confirmation page
confirm_page = pages.accounts.LogoutConfirmPage(self.browser)
confirm_page.confirm.click()
# She is now logged out and on the landing page again
self.assertTrue(self.browser.current_url.endswith('/en/'))
def test_converting_other_persons_inlist_item_to_action_gives_404(self):
# Alice is a user who logs in and goes to the inlist page
self.create_and_login_user('alice', '<EMAIL>', 'alice')
page = pages.projects.BaseProjectPage(self.browser)
page.inlist_link(page.sidebar).click()
# She adds an item
inlist_page = pages.projects.InlistPage(self.browser)
inlist_page.add_box.send_keys('Test forbidden status\n')
# She goes to the convert page
item = inlist_page.listrows[0]
inlist_page.convert_action(item).click()
## Copy the url so Trudy can use it
self.wait_for(lambda: self.assertIn('/convert/',
self.browser.current_url))
convert_url = self.browser.current_url
# Trudy is another user who tries to mess with Alice's items
self.restart_browser()
self.create_and_login_user('trudy', '<EMAIL>', 'trudy')
# Trudy directly enters the url
self.browser.get(convert_url)
# She is greeted with a 404 Not Found error
body_text = self.browser.find_element_by_tag_name('body').text
self.assertIn('404', self.browser.title)
self.assertIn('404', body_text)
self.assertIn('Not Found', body_text)
def test_deleting_other_persons_action_item_returns_404(self):
# Alice is a user who logs in and has an action on the action list
self.create_and_login_user('alice', '<EMAIL>', 'alice')
self.create_action('alice', 'Test forbidden status')
page = pages.projects.BaseProjectPage(self.browser)
page.action_link(page.sidebar).click()
action_page = pages.projects.ActionlistPage(self.browser)
# She goes to the delete page for the action
item = action_page.get_list_rows(action_page.thelist)[0]
item['delete'].click()
## Copy the url so Trudy can use it
self.wait_for(lambda: self.assertIn('/delete/',
self.browser.current_url))
delete_url = self.browser.current_url
# Trudy is another user who tries to delete Alice's action
self.restart_browser()
self.create_and_login_user('trudy', '<EMAIL>', 'trudy')
# Trudy directly enters the url
self.browser.get(delete_url)
# She sees a 404 Not Found error
body_text = self.browser.find_element_by_tag_name('body').text
self.assertIn('404', self.browser.title)
self.assertIn('404', body_text)
self.assertIn('Not Found', body_text)
def test_cannot_delete_action_project(self):
# Alice is a user who logs in and goes to the action project
self.create_and_login_user('alice', '<EMAIL>', 'alice')
page = pages.projects.BaseProjectPage(self.browser)
page.action_link(page.sidebar).click()
# She sees that there is no delete button on the page
action_page = pages.projects.ProjectPage(self.browser)
self.assertIsNone(action_page.delete)
# Going to the delete page directly shows a 403 Forbidden error
self.browser.get(self.browser.current_url + 'delete/')
self.assertIn('403', page.body.text)
self.assertIn('Forbidden', page.body.text)
def test_cannot_edit_action_project(self):
# Alice is a user who logs in and goes to the action project
self.create_and_login_user('alice', '<EMAIL>', 'alice')
page = pages.projects.BaseProjectPage(self.browser)
page.action_link(page.sidebar).click()
# She sees that there is no edit button on the page
action_page = pages.projects.ProjectPage(self.browser)
self.assertIsNone(action_page.edit)
# Going to the edit page directly shows a 403 Forbidden error
self.browser.get(self.browser.current_url + 'edit/')
self.assertIn('403', page.body.text)
self.assertIn('Forbidden', page.body.text)
def test_action_items_can_have_a_deadline(self):
# Alice is a user who has an item on her action list
user = self.create_and_login_user('alice', '<EMAIL>', 'alice')
if self.against_staging:
remote.create_action(self.server_host, 'alice', 'Edit action')
else:
projects.factories.ActionlistItemFactory(user=user,
text='Edit action')
page = pages.projects.BaseProjectPage(self.browser)
page.action_link(page.sidebar).click()
# She sees an edit button next to the item and clicks it
project_page = pages.projects.ProjectPage(self.browser)
item = project_page.get_list_rows(project_page.thelist)[0]
self.assertIsNone(item['deadline'])
item['edit'].click()
# She ends up on an edit action page
edit_page = pages.projects.EditActionPage(self.browser)
# The edit action page allows moving of the action
self.assertIsNotNone(edit_page.select)
# There is also a field to edit a date and time for a deadline
self.assertIn('Deadline', edit_page.content.text)
# She enters a date into the field and submits the form
edit_page.deadline_date.send_keys('1970-01-01')
edit_page.deadline_time.send_keys('00:00:00')
edit_page.confirm.click()
# Alice returns to the action list page, where the item has a
# deadline on it
item = project_page.get_list_rows(project_page.thelist)[0]
self.assertEqual('Jan. 1, 1970, midnight', item['deadline'].text)
# When she adds a second item it has no deadline on it
project_page.add_box.send_keys('Write <PASSWORD>\n')
self.assertIn('Write a novel',
project_page.list_text(project_page.thelist))
actions = project_page.get_list_rows(project_page.thelist)
for idx, item in actions.items():
if item['text'].text == 'Write a novel':
break
self.assertIsNone(item['deadline'])
# Alice goes to enter a deadline for that item as well
item['edit'].click()
edit_page.deadline_date.send_keys('2000-01-01')
edit_page.deadline_time.send_keys('00:00:00\n')
# When she returns to the action list she sees that both items
# have different dates
actions = project_page.get_list_rows(project_page.thelist)
for idx, item in actions.items():
if item['text'].text == 'Edit action':
self.assertEqual('Jan. 1, 1970, midnight',
item['deadline'].text)
elif item['text'].text == 'Write a novel':
self.assertEqual('Jan. 1, 2000, midnight',
item['deadline'].text)
def test_can_change_action_item_text(self):
# Alice is a user who has an item on her action list
user = self.create_and_login_user('alice', '<EMAIL>', 'alice')
if self.against_staging:
remote.create_action(self.server_host, 'alice', 'Play games')
else:
projects.factories.ActionlistItemFactory(user=user,
text='Play games')
project_page = pages.projects.BaseProjectPage(self.browser)
project_page.action_link(project_page.sidebar).click()
# She sees an edit button next to it, which she clicks
list_page = pages.projects.ProjectPage(self.browser)
item = list_page.get_list_rows(list_page.thelist)[0]
item['edit'].click()
# She sees that there is a text entry field with the action's name
# in it
edit_page = pages.projects.EditActionPage(self.browser)
self.assertEqual(edit_page.text_box.get_attribute('value'),
'Play games')
# Alice decides to change the text and saves her changes
edit_page.text_box.clear()
edit_page.text_box.send_keys('Play some games')
edit_page.confirm.click()
# She lands on the action list page and sees that her item has changed
self.assertIn('Play some games',
list_page.list_text(list_page.thelist))
self.assertNotIn('Play games',
list_page.list_text(list_page.thelist))
def test_cannot_change_action_item_text_when_it_is_duplicate(self):
# Alice is a user with two items on her action list
user = self.create_and_login_user('alice', '<EMAIL>', 'alice')
if self.against_staging:
remote.create_action(self.server_host, 'alice', 'Save the planet')
remote.create_action(self.server_host, 'alice',
'Defeat the aliens')
else:
projects.factories.ActionlistItemFactory(user=user,
text='Save the planet')
projects.factories.ActionlistItemFactory(user=user,
text='Defeat the aliens')
project_page = pages.projects.BaseProjectPage(self.browser)
project_page.action_link(project_page.sidebar).click()
# Alice realises that aliens don't exist, so she wants to change
# one of the actions
list_page = pages.projects.ProjectPage(self.browser)
for idx, item in list_page.get_list_rows(list_page.thelist).items():
if item['text'].text == 'Defeat the aliens':
item['edit'].click()
break
# Alice changes the item's text to be the same as the other's
edit_page = pages.projects.EditActionPage(self.browser)
self.assertEqual(edit_page.text_box.get_attribute('value'),
'Defeat the aliens')
edit_page.text_box.clear()
edit_page.text_box.send_keys('Save the planet\n')
# Instead of being send to the action list page she gets an error
self.assertIn("This is already planned for that project",
[error.text for error in edit_page.errors])
self.assertEqual(len(edit_page.errors), 1)
# When she returns to the action list without saving none of the
# items has changed
project_page.action_link(project_page.sidebar).click()
self.assertIn('Save the planet',
list_page.list_text(list_page.thelist))
self.assertIn('Defeat the aliens',
list_page.list_text(list_page.thelist))
self.assertEqual(len(list_page.thelist), 2)
def test_can_order_action_items_by_name(self):
# Alice is a user with two items on her action list
user = self.create_and_login_user('alice', '<EMAIL>', 'alice')
if self.against_staging:
remote.create_action(self.server_host, 'alice', 'A item')
remote.create_action(self.server_host, 'alice', 'B item')
else:
projects.factories.ActionlistItemFactory(user=user, text='A item')
projects.factories.ActionlistItemFactory(user=user, text='B item')
project_page = pages.projects.BaseProjectPage(self.browser)
project_page.action_link(project_page.sidebar).click()
# On the page she sees a selection box that allows her to change
# the order of the items
list_page = pages.projects.ProjectPage(self.browser)
self.assertIn('Text', [o.text for o in list_page.sort_method.options])
# When she selects alphabetical and clicks 'GO' she sees the page
# reload and that the items are sorted
list_page.sort_method.select_by_value('text')
list_page.apply_sort.click()
self.assertEqual(list_page.list_text(list_page.thelist),
['A item', 'B item'])
# When she selects the descending option and clicks 'GO' the order
# of the list is reversed
list_page.sort_method.select_by_value('text')
list_page.sort_order.select_by_value('-')
list_page.apply_sort.click()
self.assertEqual(list_page.list_text(list_page.thelist),
['B item', 'A item'])
# She can also revert back to unordered by selecting the top option
list_page.sort_method.select_by_index(0)
list_page.apply_sort.click()
self.assertEqual(len(list_page.error_lists), 0)
def test_can_order_action_items_by_complete_status(self):
# Alice is a user with two items on her action list
user = self.create_and_login_user('alice', '<EMAIL>', 'alice')
self.create_action('alice', 'Item 1')
self.create_action('alice', 'Item 2')
project_page = pages.projects.BaseProjectPage(self.browser)
project_page.action_link(project_page.sidebar).click()
# On the page she first switches the complete status of one action
action_page = pages.projects.ProjectPage(self.browser)
for key, a in action_page.get_list_rows(action_page.thelist).items():
if a['text'].text == 'Item 1':
a['item'].click()
break
# When she sorts by complete status she sees the uncompleted on top
self.assertIn('Completed', [o.text for o in
action_page.sort_method.options])
action_page.sort_method.select_by_value('complete')
action_page.sort_order.select_by_value('')
action_page.apply_sort.click()
self.assertEqual(action_page.list_text(action_page.thelist),
['Item 2', 'Item 1'])
# When she sorts descending she sees the completed on top
action_page.sort_method.select_by_value('complete')
action_page.sort_order.select_by_value('-')
action_page.apply_sort.click()
self.assertEqual(action_page.list_text(action_page.thelist),
['Item 1', 'Item 2'])
def test_can_order_action_items_by_deadline(self):
# Alice is a user with items on her action list
user = self.create_and_login_user('alice', '<EMAIL>', 'alice')
self.create_action('alice', 'Item 1')
self.create_action('alice', 'Item 2')
self.create_action('alice', 'Item 3')
project_page = pages.projects.BaseProjectPage(self.browser)
project_page.action_link(project_page.sidebar).click()
# On the page she changes the deadline of the first item
action_page = pages.projects.ProjectPage(self.browser)
for k, a in action_page.get_list_rows(action_page.thelist).items():
if a['text'].text == 'Item 1':
a['edit'].click()
break
edit_page = pages.projects.EditActionPage(self.browser)
edit_page.deadline_date.send_keys('1970-01-01')
edit_page.deadline_time.send_keys('00:00:00\n')
# Next she edits the second action
action_page = pages.projects.ProjectPage(self.browser)
for k, a in action_page.get_list_rows(action_page.thelist).items():
if a['text'].text == 'Item 2':
a['edit'].click()
break
edit_page.deadline_date.send_keys('2000-01-01')
edit_page.deadline_time.send_keys('00:00:00\n')
# On the action list she can sort the items by deadline
self.assertIn('Deadline', [o.text for o in
action_page.sort_method.options])
action_page.sort_method.select_by_value('deadline')
action_page.sort_order.select_by_value('')
action_page.apply_sort.click()
actions = action_page.list_text(action_page.thelist)
self.assertTrue(actions == ['Item 3', 'Item 1', 'Item 2'] or \
actions == ['Item 1', 'Item 2', 'Item 3'], actions)
# She can also reverse the sorting
self.assertIn('Deadline', [o.text for o in
action_page.sort_method.options])
action_page.sort_method.select_by_value('deadline')
action_page.sort_order.select_by_value('-')
action_page.apply_sort.click()
actions = action_page.list_text(action_page.thelist)
self.assertTrue(actions == ['Item 3', 'Item 2', 'Item 1'] or \
actions == ['Item 2', 'Item 1', 'Item 3'], actions)
| en | 0.941826 | # -*- coding:utf-8 -*- # Alice visits the website # In the sidebar she finds an action list link and she clicks it # On the new page there is a text box where she is invited to enter # a new action item # She enters something in the text box and hits enter # The page reloads and she sees that the item is in a list on the page # She decides to add a second item to the list # The page reloads again and now both items are on the page # Alice visits the website and creates an item for the action list # The items are both on the page # Bob is another user who goes to the action list page on the site # He cannot see Alice's items # Bob enters some items of his own # There is still no sign of Alice's list, but Bob can see the item # that he just added # Alice is a user who goes to the action list page # Alice tries to add an empty item # She sees an error on the page # Bob is a user who goes to the action list page # Bob adds an item # He tries to add an item again but gets an error # Alice is a user who logs in and goes to the action list page # There is an action on the page (created earlier) # She moves her mouse over the text and sees that it gets crossed out # She also notices that her curser indicates that she can click it # When she clicks it the page reloads and the action is "checked" # Alice is a user who logs in and goes to the action list page # She adds an item to the action page and completes it # The item is in the completed list # She clicks the item in the complete list # The item is back in the incomplete list # Alice is a user who logs in and goes to the action list page # There are two items to the action_page # She wants to remove the last item that she has added, so she # looks it up in the list and removes it # She ends up on a new page that asks her if she wants to confirm # to delete the item, she first checks whether the item is correct # She clicks the confirm button # She is returned to the action list page, which doesn't have the # item anymore, but the other one is still there # Alice is a user who logs in and goes to the action list page # Make sure the actions have been added # Alice goes to complete the first action she's added # The item is now in the completed list next to a delete button, # she clicks it # She ends up on a confirmation page which has the text of the # item and a confirmation button on it, which she clicks # She is returned to the action list page, which doesn't have the # item in either list anymore # Alice is a user who logs in and goes to the inlist page # She adds an item to the inlist page # There is a button next to it that lets her convert it to an # action, she clicks it # She ends up on a new page where she can create the action # The text box holds the text from the inlist item # She enters a new text # She clicks the convert button, which saves the action # She returns to the linst page # When she navigates to the action page she finds the item there # Alice is a user who goes to the inlist page # She adds an item on the inlist page # She then goes to the action list page # The previously added item is not on this page # She adds an item on the action page # She navigates back to the inlist page and sees that the last # added item is not on that list, but the first item is # Alice is a user who goes to the actionlist page # Alice can click a log out button in the menu # She lands on the logout confirmation page # She is now logged out and on the landing page again # Alice is a user who logs in and goes to the inlist page # She adds an item # She goes to the convert page ## Copy the url so Trudy can use it # Trudy is another user who tries to mess with Alice's items # Trudy directly enters the url # She is greeted with a 404 Not Found error # Alice is a user who logs in and has an action on the action list # She goes to the delete page for the action ## Copy the url so Trudy can use it # Trudy is another user who tries to delete Alice's action # Trudy directly enters the url # She sees a 404 Not Found error # Alice is a user who logs in and goes to the action project # She sees that there is no delete button on the page # Going to the delete page directly shows a 403 Forbidden error # Alice is a user who logs in and goes to the action project # She sees that there is no edit button on the page # Going to the edit page directly shows a 403 Forbidden error # Alice is a user who has an item on her action list # She sees an edit button next to the item and clicks it # She ends up on an edit action page # The edit action page allows moving of the action # There is also a field to edit a date and time for a deadline # She enters a date into the field and submits the form # Alice returns to the action list page, where the item has a # deadline on it # When she adds a second item it has no deadline on it # Alice goes to enter a deadline for that item as well # When she returns to the action list she sees that both items # have different dates # Alice is a user who has an item on her action list # She sees an edit button next to it, which she clicks # She sees that there is a text entry field with the action's name # in it # Alice decides to change the text and saves her changes # She lands on the action list page and sees that her item has changed # Alice is a user with two items on her action list # Alice realises that aliens don't exist, so she wants to change # one of the actions # Alice changes the item's text to be the same as the other's # Instead of being send to the action list page she gets an error # When she returns to the action list without saving none of the # items has changed # Alice is a user with two items on her action list # On the page she sees a selection box that allows her to change # the order of the items # When she selects alphabetical and clicks 'GO' she sees the page # reload and that the items are sorted # When she selects the descending option and clicks 'GO' the order # of the list is reversed # She can also revert back to unordered by selecting the top option # Alice is a user with two items on her action list # On the page she first switches the complete status of one action # When she sorts by complete status she sees the uncompleted on top # When she sorts descending she sees the completed on top # Alice is a user with items on her action list # On the page she changes the deadline of the first item # Next she edits the second action # On the action list she can sort the items by deadline # She can also reverse the sorting | 2.63243 | 3 |
tuenti/tuenti-challenge-10/3.py | Ashindustry007/competitive-programming | 506 | 6613032 | #!/usr/bin/env python3
with open('pg17013.txt') as f:
contents = f.read().lower()
alphabet = 'abcdefghijklmnñopqrstuvwxyzáéíóúü'
contents = ''.join(x if x in alphabet else ' ' for x in contents)
words = {}
for w in contents.split():
if len(w) < 3: continue
words[w] = words.get(w, 0) + 1
words = sorted([(-v, k) for k, v in words.items()])
indices = {}
for i, w in enumerate(words):
indices[w[1]] = (-w[0], i)
for t in range(int(input())):
k = input()
if ord(k[0]) >= 0x30 and ord(k[0]) <= 0x39:
k = int(k)-1
k, w = words[k]
print(f'Case #{t+1}: {w} {-k}')
else:
w, i = indices[k]
print(f'Case #{t+1}: {w} #{i+1}')
| #!/usr/bin/env python3
with open('pg17013.txt') as f:
contents = f.read().lower()
alphabet = 'abcdefghijklmnñopqrstuvwxyzáéíóúü'
contents = ''.join(x if x in alphabet else ' ' for x in contents)
words = {}
for w in contents.split():
if len(w) < 3: continue
words[w] = words.get(w, 0) + 1
words = sorted([(-v, k) for k, v in words.items()])
indices = {}
for i, w in enumerate(words):
indices[w[1]] = (-w[0], i)
for t in range(int(input())):
k = input()
if ord(k[0]) >= 0x30 and ord(k[0]) <= 0x39:
k = int(k)-1
k, w = words[k]
print(f'Case #{t+1}: {w} {-k}')
else:
w, i = indices[k]
print(f'Case #{t+1}: {w} #{i+1}')
| ru | 0.253707 | #!/usr/bin/env python3 #{t+1}: {w} {-k}') #{t+1}: {w} #{i+1}') | 3.381228 | 3 |
mantistablex/forms.py | TvvH05ozL6/table-summarisation | 1 | 6613033 | <filename>mantistablex/forms.py
from django import forms
from .models import GoldStandardsEnum
class TableFromJSONForm(forms.Form):
table_name = forms.CharField(
widget=forms.TextInput(),
label="Table name",
max_length=200,
required=True,
label_suffix=""
)
json_file = forms.FileField(
widget=forms.FileInput(attrs={'accept': '.json'}),
label="Insert a JSON file",
required=True,
label_suffix=""
)
class AnnotationFromJSONForm(forms.Form):
json_file = forms.FileField(
widget=forms.FileInput(attrs={'accept': '.csv'}),
label="Insert a CSV file",
required=True,
label_suffix=""
)
gs_type = forms.ChoiceField(
widget=forms.Select(attrs={'class': 'form-control selectpicker'}),
label="Gold Standard",
initial=GoldStandardsEnum.NONE.value,
choices=[(tag.name, tag.value) for tag in GoldStandardsEnum],
required=True,
label_suffix=""
)
| <filename>mantistablex/forms.py
from django import forms
from .models import GoldStandardsEnum
class TableFromJSONForm(forms.Form):
table_name = forms.CharField(
widget=forms.TextInput(),
label="Table name",
max_length=200,
required=True,
label_suffix=""
)
json_file = forms.FileField(
widget=forms.FileInput(attrs={'accept': '.json'}),
label="Insert a JSON file",
required=True,
label_suffix=""
)
class AnnotationFromJSONForm(forms.Form):
json_file = forms.FileField(
widget=forms.FileInput(attrs={'accept': '.csv'}),
label="Insert a CSV file",
required=True,
label_suffix=""
)
gs_type = forms.ChoiceField(
widget=forms.Select(attrs={'class': 'form-control selectpicker'}),
label="Gold Standard",
initial=GoldStandardsEnum.NONE.value,
choices=[(tag.name, tag.value) for tag in GoldStandardsEnum],
required=True,
label_suffix=""
)
| none | 1 | 2.572763 | 3 | |
Test/En/run_10_6_5_PMI_Trigram_distribution_question.py | mack-the-psych/plimac3 | 0 | 6613034 | import pandas as pd
import numpy as np
import filecmp
data_dir = r'../../Data/En/'
orig_dir = r'./orig_data/'
from ac_bi_trigram_pmi_distribution import *
Ngram_count_start_from_question = 23
Decimal_places = 4
Unknown_word_len_min = 1
test_file = r'PMI-Distribution-Trigram-Question.csv'
df_ac_trigram_q = pd.read_csv(data_dir + r'Trigram-Question.csv')
df_ac_trigram_q = df_ac_trigram_q.set_index('AC_Doc_ID')
df_ac_pmi_bigram = pd.read_csv(data_dir + r'PMI-Sum-T-Trigram-Passage.csv')
df_ac_trigram_q_buf_pmi = ac_bi_trigram_pmi_distribution(df_ac_trigram_q, Ngram_count_start_from_question - 1,
df_ac_pmi_bigram, 'Trigram', Decimal_places)
df_ac_trigram_q_buf_pmi.to_csv(data_dir + test_file)
from file_cmp_diff_ratio import *
file_cmp_diff_ratio(data_dir + test_file, orig_dir + test_file)
| import pandas as pd
import numpy as np
import filecmp
data_dir = r'../../Data/En/'
orig_dir = r'./orig_data/'
from ac_bi_trigram_pmi_distribution import *
Ngram_count_start_from_question = 23
Decimal_places = 4
Unknown_word_len_min = 1
test_file = r'PMI-Distribution-Trigram-Question.csv'
df_ac_trigram_q = pd.read_csv(data_dir + r'Trigram-Question.csv')
df_ac_trigram_q = df_ac_trigram_q.set_index('AC_Doc_ID')
df_ac_pmi_bigram = pd.read_csv(data_dir + r'PMI-Sum-T-Trigram-Passage.csv')
df_ac_trigram_q_buf_pmi = ac_bi_trigram_pmi_distribution(df_ac_trigram_q, Ngram_count_start_from_question - 1,
df_ac_pmi_bigram, 'Trigram', Decimal_places)
df_ac_trigram_q_buf_pmi.to_csv(data_dir + test_file)
from file_cmp_diff_ratio import *
file_cmp_diff_ratio(data_dir + test_file, orig_dir + test_file)
| none | 1 | 2.582957 | 3 | |
data_admin_examples/example1/models.py | love1900905/frepple-data-admin | 7 | 6613035 | from django.db import models
from django.utils.translation import gettext_lazy as _
from data_admin.common.models import (
HierarchyModel,
AuditModel,
)
class Location(AuditModel, HierarchyModel):
# Database fields
description = models.CharField(
_("description"), max_length=500, null=True, blank=True
)
category = models.CharField(
_("category"), max_length=300, null=True, blank=True, db_index=True
)
subcategory = models.CharField(
_("subcategory"), max_length=300, null=True, blank=True, db_index=True
)
def __str__(self):
return self.name
class Meta(AuditModel.Meta):
db_table = "location"
verbose_name = _("location")
verbose_name_plural = _("locations")
ordering = ["name"]
class Customer(AuditModel, HierarchyModel):
# Database fields
description = models.CharField(
_("description"), max_length=500, null=True, blank=True
)
category = models.CharField(
_("category"), max_length=300, null=True, blank=True, db_index=True
)
subcategory = models.CharField(
_("subcategory"), max_length=300, null=True, blank=True, db_index=True
)
def __str__(self):
return self.name
class Meta(AuditModel.Meta):
db_table = "customer"
verbose_name = _("customer")
verbose_name_plural = _("customers")
ordering = ["name"]
class Item(AuditModel, HierarchyModel):
types = (
("make to stock", _("make to stock")),
("make to order", _("make to order")),
)
# Database fields
description = models.CharField(
_("description"), max_length=500, null=True, blank=True
)
category = models.CharField(
_("category"), max_length=300, null=True, blank=True, db_index=True
)
subcategory = models.CharField(
_("subcategory"), max_length=300, null=True, blank=True, db_index=True
)
cost = models.DecimalField(
_("cost"),
null=True,
blank=True,
max_digits=20,
decimal_places=8,
help_text=_("Cost of the item"),
)
type = models.CharField(
_("type"), max_length=20, null=True, blank=True, choices=types
)
weight = models.DecimalField(
_("weight"),
null=True,
blank=True,
max_digits=20,
decimal_places=8,
help_text=_("Weight of the item"),
)
volume = models.DecimalField(
_("volume"),
null=True,
blank=True,
max_digits=20,
decimal_places=8,
help_text=_("Volume of the item"),
)
def __str__(self):
return self.name
class Meta(AuditModel.Meta):
db_table = "item"
verbose_name = _("item")
verbose_name_plural = _("items")
ordering = ["name"]
class Demand(AuditModel, HierarchyModel):
# Status
demandstatus = (
("inquiry", _("inquiry")),
("quote", _("quote")),
("open", _("open")),
("closed", _("closed")),
("canceled", _("canceled")),
)
# Database fields
description = models.CharField(
_("description"), max_length=500, null=True, blank=True
)
category = models.CharField(
_("category"), max_length=300, null=True, blank=True, db_index=True
)
subcategory = models.CharField(
_("subcategory"), max_length=300, null=True, blank=True, db_index=True
)
customer = models.ForeignKey(
Customer, verbose_name=_("customer"), db_index=True, on_delete=models.CASCADE
)
item = models.ForeignKey(
Item, verbose_name=_("item"), db_index=True, on_delete=models.CASCADE
)
location = models.ForeignKey(
Location, verbose_name=_("location"), db_index=True, on_delete=models.CASCADE
)
due = models.DateTimeField(
_("due"), help_text=_("Due date of the sales order"), db_index=True
)
status = models.CharField(
_("status"),
max_length=10,
null=True,
blank=True,
choices=demandstatus,
default="open",
help_text=_('Status of the demand. Only "open" demands are planned'),
)
quantity = models.DecimalField(
_("quantity"), max_digits=20, decimal_places=8, default=1
)
# Convenience methods
def __str__(self):
return self.name
class Meta(AuditModel.Meta):
db_table = "demand"
verbose_name = _("sales order")
verbose_name_plural = _("sales orders")
ordering = ["name"]
| from django.db import models
from django.utils.translation import gettext_lazy as _
from data_admin.common.models import (
HierarchyModel,
AuditModel,
)
class Location(AuditModel, HierarchyModel):
# Database fields
description = models.CharField(
_("description"), max_length=500, null=True, blank=True
)
category = models.CharField(
_("category"), max_length=300, null=True, blank=True, db_index=True
)
subcategory = models.CharField(
_("subcategory"), max_length=300, null=True, blank=True, db_index=True
)
def __str__(self):
return self.name
class Meta(AuditModel.Meta):
db_table = "location"
verbose_name = _("location")
verbose_name_plural = _("locations")
ordering = ["name"]
class Customer(AuditModel, HierarchyModel):
# Database fields
description = models.CharField(
_("description"), max_length=500, null=True, blank=True
)
category = models.CharField(
_("category"), max_length=300, null=True, blank=True, db_index=True
)
subcategory = models.CharField(
_("subcategory"), max_length=300, null=True, blank=True, db_index=True
)
def __str__(self):
return self.name
class Meta(AuditModel.Meta):
db_table = "customer"
verbose_name = _("customer")
verbose_name_plural = _("customers")
ordering = ["name"]
class Item(AuditModel, HierarchyModel):
types = (
("make to stock", _("make to stock")),
("make to order", _("make to order")),
)
# Database fields
description = models.CharField(
_("description"), max_length=500, null=True, blank=True
)
category = models.CharField(
_("category"), max_length=300, null=True, blank=True, db_index=True
)
subcategory = models.CharField(
_("subcategory"), max_length=300, null=True, blank=True, db_index=True
)
cost = models.DecimalField(
_("cost"),
null=True,
blank=True,
max_digits=20,
decimal_places=8,
help_text=_("Cost of the item"),
)
type = models.CharField(
_("type"), max_length=20, null=True, blank=True, choices=types
)
weight = models.DecimalField(
_("weight"),
null=True,
blank=True,
max_digits=20,
decimal_places=8,
help_text=_("Weight of the item"),
)
volume = models.DecimalField(
_("volume"),
null=True,
blank=True,
max_digits=20,
decimal_places=8,
help_text=_("Volume of the item"),
)
def __str__(self):
return self.name
class Meta(AuditModel.Meta):
db_table = "item"
verbose_name = _("item")
verbose_name_plural = _("items")
ordering = ["name"]
class Demand(AuditModel, HierarchyModel):
# Status
demandstatus = (
("inquiry", _("inquiry")),
("quote", _("quote")),
("open", _("open")),
("closed", _("closed")),
("canceled", _("canceled")),
)
# Database fields
description = models.CharField(
_("description"), max_length=500, null=True, blank=True
)
category = models.CharField(
_("category"), max_length=300, null=True, blank=True, db_index=True
)
subcategory = models.CharField(
_("subcategory"), max_length=300, null=True, blank=True, db_index=True
)
customer = models.ForeignKey(
Customer, verbose_name=_("customer"), db_index=True, on_delete=models.CASCADE
)
item = models.ForeignKey(
Item, verbose_name=_("item"), db_index=True, on_delete=models.CASCADE
)
location = models.ForeignKey(
Location, verbose_name=_("location"), db_index=True, on_delete=models.CASCADE
)
due = models.DateTimeField(
_("due"), help_text=_("Due date of the sales order"), db_index=True
)
status = models.CharField(
_("status"),
max_length=10,
null=True,
blank=True,
choices=demandstatus,
default="open",
help_text=_('Status of the demand. Only "open" demands are planned'),
)
quantity = models.DecimalField(
_("quantity"), max_digits=20, decimal_places=8, default=1
)
# Convenience methods
def __str__(self):
return self.name
class Meta(AuditModel.Meta):
db_table = "demand"
verbose_name = _("sales order")
verbose_name_plural = _("sales orders")
ordering = ["name"]
| en | 0.257853 | # Database fields # Database fields # Database fields # Status # Database fields # Convenience methods | 2.084177 | 2 |
models/eval_imagewise_classifier.py | jlee24/live_mapillary | 15 | 6613036 | import argparse
import logging
import os
import h5py
import torch
import torch.nn as nn
import pandas as pd
import numpy as np
from collections import Counter
from skimage import io, transform
from sklearn.metrics import confusion_matrix, precision_recall_fscore_support
from timeit import default_timer as timer
from torch.utils.data import Dataset, DataLoader
from torchvision import models
from torchvision import transforms, utils
from tqdm import tqdm
parser = argparse.ArgumentParser()
parser.add_argument('--log_file', type=str, default='results_pov_label_img.log', help='Log file')
parser.add_argument('--model_weights', type=str, default='models/pov_classify', help='Where pretrained model is located')
parser.add_argument('--resnet_ver', type=str, default='resnet34', help='Which ResNet architecture was used')
parser.add_argument('--label', type=str, default='pov_label', help='Label')
parser.add_argument('--batch_size', type=int, default=64)
args = parser.parse_args()
logging.basicConfig(filename=args.log_file,level=logging.DEBUG)
class ClusterImgDataset(Dataset):
def __init__(self, df, device):
self.img_paths = df['img_path_224x224'].to_numpy()
self.device = device
def __len__(self):
return self.img_paths.shape[0]
def __getitem__(self, idx):
image = io.imread(self.img_paths[idx])
image_tensor = torch.from_numpy(image)
image_tensor = image_tensor.permute(2,0,1)
return image_tensor
def create_model():
pretrained_weights = torch.load(args.model_weights)
if args.resnet_ver == 'resnet18':
model = models.resnet18(pretrained=False)
elif args.resnet_ver == 'resnet34':
model = models.resnet34(pretrained=False)
model.fc = nn.Linear(512, 2)
model.load_state_dict(pretrained_weights)
return model
def get_majority_vote(cluster_dataset, model, device):
model.eval()
pred = np.array([])
generator = DataLoader(cluster_dataset, batch_size=args.batch_size, num_workers=1)
for batch in generator:
batch = batch.to(device, dtype=torch.float32)
output = model(batch)
predicted = output.argmax(dim=1, keepdim=True)
pred = np.append(pred, predicted.cpu().numpy())
del batch
del generator
votes = Counter(pred)
majority = votes.most_common(1)[0][0]
del pred
return majority
def main():
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
df = pd.read_csv('data.csv')
val_f = open('val_clusters_ia.txt', 'r')
val_clusters = [x[:-1] for x in val_f.readlines()]
val_f.close()
print("Creating model...")
model = create_model().to(device)
model.eval()
val_c = []
y_true = np.array([])
y_pred = np.array([])
for cluster in tqdm(val_clusters):
cluster_df = df.loc[df['unique_cluster'] == cluster]
if cluster_df.shape[0] == 0:
continue
val_c.append(cluster)
target = cluster_df[args.label].values[0]
y_true = np.append(y_true, cluster_df[args.label].values[0])
dataset = ClusterImgDataset(cluster_df, device)
y_pred = np.append(y_pred, get_majority_vote(dataset, model, device))
del cluster_df, dataset
logging.debug(y_true.shape)
logging.debug(y_pred.shape)
print(confusion_matrix(y_true, y_pred))
logging.debug(confusion_matrix(y_true, y_pred))
prec, recall, fscore, support = precision_recall_fscore_support(y_true, y_pred, average='micro')
print("Precision: {}\nRecall: {}\nF-Score: {}\nSupport: {}".format(prec, recall, fscore, support))
logging.debug("Precision: {}\nRecall: {}\nF-Score: {}\nSupport: {}".format(prec, recall, fscore, support))
# Save predictions
df = pd.DataFrame({'unique_cluster': np.array(val_c),
args.label: y_true,
args.label + 'pred': y_pred})
df.to_csv(args.label + '_preds.csv', index=False)
if __name__ == "__main__":
main()
| import argparse
import logging
import os
import h5py
import torch
import torch.nn as nn
import pandas as pd
import numpy as np
from collections import Counter
from skimage import io, transform
from sklearn.metrics import confusion_matrix, precision_recall_fscore_support
from timeit import default_timer as timer
from torch.utils.data import Dataset, DataLoader
from torchvision import models
from torchvision import transforms, utils
from tqdm import tqdm
parser = argparse.ArgumentParser()
parser.add_argument('--log_file', type=str, default='results_pov_label_img.log', help='Log file')
parser.add_argument('--model_weights', type=str, default='models/pov_classify', help='Where pretrained model is located')
parser.add_argument('--resnet_ver', type=str, default='resnet34', help='Which ResNet architecture was used')
parser.add_argument('--label', type=str, default='pov_label', help='Label')
parser.add_argument('--batch_size', type=int, default=64)
args = parser.parse_args()
logging.basicConfig(filename=args.log_file,level=logging.DEBUG)
class ClusterImgDataset(Dataset):
def __init__(self, df, device):
self.img_paths = df['img_path_224x224'].to_numpy()
self.device = device
def __len__(self):
return self.img_paths.shape[0]
def __getitem__(self, idx):
image = io.imread(self.img_paths[idx])
image_tensor = torch.from_numpy(image)
image_tensor = image_tensor.permute(2,0,1)
return image_tensor
def create_model():
pretrained_weights = torch.load(args.model_weights)
if args.resnet_ver == 'resnet18':
model = models.resnet18(pretrained=False)
elif args.resnet_ver == 'resnet34':
model = models.resnet34(pretrained=False)
model.fc = nn.Linear(512, 2)
model.load_state_dict(pretrained_weights)
return model
def get_majority_vote(cluster_dataset, model, device):
model.eval()
pred = np.array([])
generator = DataLoader(cluster_dataset, batch_size=args.batch_size, num_workers=1)
for batch in generator:
batch = batch.to(device, dtype=torch.float32)
output = model(batch)
predicted = output.argmax(dim=1, keepdim=True)
pred = np.append(pred, predicted.cpu().numpy())
del batch
del generator
votes = Counter(pred)
majority = votes.most_common(1)[0][0]
del pred
return majority
def main():
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
df = pd.read_csv('data.csv')
val_f = open('val_clusters_ia.txt', 'r')
val_clusters = [x[:-1] for x in val_f.readlines()]
val_f.close()
print("Creating model...")
model = create_model().to(device)
model.eval()
val_c = []
y_true = np.array([])
y_pred = np.array([])
for cluster in tqdm(val_clusters):
cluster_df = df.loc[df['unique_cluster'] == cluster]
if cluster_df.shape[0] == 0:
continue
val_c.append(cluster)
target = cluster_df[args.label].values[0]
y_true = np.append(y_true, cluster_df[args.label].values[0])
dataset = ClusterImgDataset(cluster_df, device)
y_pred = np.append(y_pred, get_majority_vote(dataset, model, device))
del cluster_df, dataset
logging.debug(y_true.shape)
logging.debug(y_pred.shape)
print(confusion_matrix(y_true, y_pred))
logging.debug(confusion_matrix(y_true, y_pred))
prec, recall, fscore, support = precision_recall_fscore_support(y_true, y_pred, average='micro')
print("Precision: {}\nRecall: {}\nF-Score: {}\nSupport: {}".format(prec, recall, fscore, support))
logging.debug("Precision: {}\nRecall: {}\nF-Score: {}\nSupport: {}".format(prec, recall, fscore, support))
# Save predictions
df = pd.DataFrame({'unique_cluster': np.array(val_c),
args.label: y_true,
args.label + 'pred': y_pred})
df.to_csv(args.label + '_preds.csv', index=False)
if __name__ == "__main__":
main()
| en | 0.435207 | # Save predictions | 2.057547 | 2 |
happi/qt/__init__.py | cristinasewell/happi | 6 | 6613037 | from .model import HappiDeviceListView
__all__ = ["HappiDeviceListView"]
| from .model import HappiDeviceListView
__all__ = ["HappiDeviceListView"]
| none | 1 | 1.060234 | 1 | |
data_log/migrations/0005_auto_20190322_1821.py | Itori/swarfarm | 66 | 6613038 | # Generated by Django 2.1.7 on 2019-03-23 01:21
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('data_log', '0004_riftdungeonlog_clear_time'),
]
operations = [
migrations.RemoveField(
model_name='riftraiditemdrop',
name='item',
),
migrations.RemoveField(
model_name='riftraiditemdrop',
name='log',
),
migrations.RemoveField(
model_name='riftraidmonsterdrop',
name='item',
),
migrations.RemoveField(
model_name='riftraidmonsterdrop',
name='log',
),
migrations.RemoveField(
model_name='riftraidrunecraftdrop',
name='item',
),
migrations.RemoveField(
model_name='riftraidrunecraftdrop',
name='log',
),
migrations.RemoveField(
model_name='worldbosslog',
name='summoner',
),
migrations.RemoveField(
model_name='worldbosslogitemdrop',
name='item',
),
migrations.RemoveField(
model_name='worldbosslogitemdrop',
name='log',
),
migrations.RemoveField(
model_name='worldbosslogmonsterdrop',
name='log',
),
migrations.RemoveField(
model_name='worldbosslogmonsterdrop',
name='monster',
),
migrations.RemoveField(
model_name='worldbosslogrunedrop',
name='log',
),
migrations.AlterField(
model_name='dungeonitemdrop',
name='item',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='bestiary.GameItem'),
),
migrations.AlterField(
model_name='dungeonitemdrop',
name='log',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='items', to='data_log.DungeonLog'),
),
migrations.AlterField(
model_name='dungeonlog',
name='level',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='bestiary.Level'),
),
migrations.AlterField(
model_name='dungeonmonsterdrop',
name='log',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='monsters', to='data_log.DungeonLog'),
),
migrations.AlterField(
model_name='dungeonmonsterdrop',
name='monster',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='bestiary.Monster'),
),
migrations.AlterField(
model_name='dungeonmonsterpiecedrop',
name='log',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='monster_pieces', to='data_log.DungeonLog'),
),
migrations.AlterField(
model_name='dungeonmonsterpiecedrop',
name='monster',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='bestiary.Monster'),
),
migrations.AlterField(
model_name='dungeonrunedrop',
name='log',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='runes', to='data_log.DungeonLog'),
),
migrations.AlterField(
model_name='dungeonsecretdungeondrop',
name='level',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='bestiary.Level'),
),
migrations.AlterField(
model_name='dungeonsecretdungeondrop',
name='log',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='secret_dungeons', to='data_log.DungeonLog'),
),
migrations.AlterField(
model_name='magicboxcraftitemdrop',
name='item',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='bestiary.GameItem'),
),
migrations.AlterField(
model_name='magicboxcraftitemdrop',
name='log',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='items', to='data_log.MagicBoxCraft'),
),
migrations.AlterField(
model_name='magicboxcraftrunecraftdrop',
name='log',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='rune_crafts', to='data_log.MagicBoxCraft'),
),
migrations.AlterField(
model_name='magicboxcraftrunedrop',
name='log',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='runes', to='data_log.MagicBoxCraft'),
),
migrations.AlterField(
model_name='riftdungeonitemdrop',
name='item',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='bestiary.GameItem'),
),
migrations.AlterField(
model_name='riftdungeonitemdrop',
name='log',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='items', to='data_log.RiftDungeonLog'),
),
migrations.AlterField(
model_name='riftdungeonlog',
name='level',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='bestiary.Level'),
),
migrations.AlterField(
model_name='riftdungeonmonsterdrop',
name='log',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='monsters', to='data_log.RiftDungeonLog'),
),
migrations.AlterField(
model_name='riftdungeonmonsterdrop',
name='monster',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='bestiary.Monster'),
),
migrations.AlterField(
model_name='riftdungeonrunecraftdrop',
name='log',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='rune_crafts', to='data_log.RiftDungeonLog'),
),
migrations.AlterField(
model_name='riftdungeonrunedrop',
name='log',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='runes', to='data_log.RiftDungeonLog'),
),
migrations.AlterField(
model_name='riftraidlog',
name='level',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='bestiary.Level'),
),
migrations.AlterField(
model_name='shoprefreshitemdrop',
name='item',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='bestiary.GameItem'),
),
migrations.AlterField(
model_name='shoprefreshitemdrop',
name='log',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='items', to='data_log.ShopRefreshLog'),
),
migrations.AlterField(
model_name='shoprefreshmonsterdrop',
name='log',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='monsters', to='data_log.ShopRefreshLog'),
),
migrations.AlterField(
model_name='shoprefreshmonsterdrop',
name='monster',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='bestiary.Monster'),
),
migrations.AlterField(
model_name='shoprefreshrunedrop',
name='log',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='runes', to='data_log.ShopRefreshLog'),
),
migrations.AlterField(
model_name='summonlog',
name='item',
field=models.ForeignKey(help_text='Item or currency used to summon', on_delete=django.db.models.deletion.PROTECT, to='bestiary.GameItem'),
),
migrations.AlterField(
model_name='summonlog',
name='monster',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='bestiary.Monster'),
),
migrations.AlterField(
model_name='wishlogitemdrop',
name='item',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='bestiary.GameItem'),
),
migrations.AlterField(
model_name='wishlogitemdrop',
name='log',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='items', to='data_log.WishLog'),
),
migrations.AlterField(
model_name='wishlogmonsterdrop',
name='log',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='monsters', to='data_log.WishLog'),
),
migrations.AlterField(
model_name='wishlogmonsterdrop',
name='monster',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='bestiary.Monster'),
),
migrations.AlterField(
model_name='wishlogrunedrop',
name='log',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='runes', to='data_log.WishLog'),
),
migrations.DeleteModel(
name='RiftRaidItemDrop',
),
migrations.DeleteModel(
name='RiftRaidMonsterDrop',
),
migrations.DeleteModel(
name='RiftRaidRuneCraftDrop',
),
migrations.DeleteModel(
name='WorldBossLog',
),
migrations.DeleteModel(
name='WorldBossLogItemDrop',
),
migrations.DeleteModel(
name='WorldBossLogMonsterDrop',
),
migrations.DeleteModel(
name='WorldBossLogRuneDrop',
),
]
| # Generated by Django 2.1.7 on 2019-03-23 01:21
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('data_log', '0004_riftdungeonlog_clear_time'),
]
operations = [
migrations.RemoveField(
model_name='riftraiditemdrop',
name='item',
),
migrations.RemoveField(
model_name='riftraiditemdrop',
name='log',
),
migrations.RemoveField(
model_name='riftraidmonsterdrop',
name='item',
),
migrations.RemoveField(
model_name='riftraidmonsterdrop',
name='log',
),
migrations.RemoveField(
model_name='riftraidrunecraftdrop',
name='item',
),
migrations.RemoveField(
model_name='riftraidrunecraftdrop',
name='log',
),
migrations.RemoveField(
model_name='worldbosslog',
name='summoner',
),
migrations.RemoveField(
model_name='worldbosslogitemdrop',
name='item',
),
migrations.RemoveField(
model_name='worldbosslogitemdrop',
name='log',
),
migrations.RemoveField(
model_name='worldbosslogmonsterdrop',
name='log',
),
migrations.RemoveField(
model_name='worldbosslogmonsterdrop',
name='monster',
),
migrations.RemoveField(
model_name='worldbosslogrunedrop',
name='log',
),
migrations.AlterField(
model_name='dungeonitemdrop',
name='item',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='bestiary.GameItem'),
),
migrations.AlterField(
model_name='dungeonitemdrop',
name='log',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='items', to='data_log.DungeonLog'),
),
migrations.AlterField(
model_name='dungeonlog',
name='level',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='bestiary.Level'),
),
migrations.AlterField(
model_name='dungeonmonsterdrop',
name='log',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='monsters', to='data_log.DungeonLog'),
),
migrations.AlterField(
model_name='dungeonmonsterdrop',
name='monster',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='bestiary.Monster'),
),
migrations.AlterField(
model_name='dungeonmonsterpiecedrop',
name='log',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='monster_pieces', to='data_log.DungeonLog'),
),
migrations.AlterField(
model_name='dungeonmonsterpiecedrop',
name='monster',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='bestiary.Monster'),
),
migrations.AlterField(
model_name='dungeonrunedrop',
name='log',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='runes', to='data_log.DungeonLog'),
),
migrations.AlterField(
model_name='dungeonsecretdungeondrop',
name='level',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='bestiary.Level'),
),
migrations.AlterField(
model_name='dungeonsecretdungeondrop',
name='log',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='secret_dungeons', to='data_log.DungeonLog'),
),
migrations.AlterField(
model_name='magicboxcraftitemdrop',
name='item',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='bestiary.GameItem'),
),
migrations.AlterField(
model_name='magicboxcraftitemdrop',
name='log',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='items', to='data_log.MagicBoxCraft'),
),
migrations.AlterField(
model_name='magicboxcraftrunecraftdrop',
name='log',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='rune_crafts', to='data_log.MagicBoxCraft'),
),
migrations.AlterField(
model_name='magicboxcraftrunedrop',
name='log',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='runes', to='data_log.MagicBoxCraft'),
),
migrations.AlterField(
model_name='riftdungeonitemdrop',
name='item',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='bestiary.GameItem'),
),
migrations.AlterField(
model_name='riftdungeonitemdrop',
name='log',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='items', to='data_log.RiftDungeonLog'),
),
migrations.AlterField(
model_name='riftdungeonlog',
name='level',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='bestiary.Level'),
),
migrations.AlterField(
model_name='riftdungeonmonsterdrop',
name='log',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='monsters', to='data_log.RiftDungeonLog'),
),
migrations.AlterField(
model_name='riftdungeonmonsterdrop',
name='monster',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='bestiary.Monster'),
),
migrations.AlterField(
model_name='riftdungeonrunecraftdrop',
name='log',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='rune_crafts', to='data_log.RiftDungeonLog'),
),
migrations.AlterField(
model_name='riftdungeonrunedrop',
name='log',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='runes', to='data_log.RiftDungeonLog'),
),
migrations.AlterField(
model_name='riftraidlog',
name='level',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='bestiary.Level'),
),
migrations.AlterField(
model_name='shoprefreshitemdrop',
name='item',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='bestiary.GameItem'),
),
migrations.AlterField(
model_name='shoprefreshitemdrop',
name='log',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='items', to='data_log.ShopRefreshLog'),
),
migrations.AlterField(
model_name='shoprefreshmonsterdrop',
name='log',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='monsters', to='data_log.ShopRefreshLog'),
),
migrations.AlterField(
model_name='shoprefreshmonsterdrop',
name='monster',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='bestiary.Monster'),
),
migrations.AlterField(
model_name='shoprefreshrunedrop',
name='log',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='runes', to='data_log.ShopRefreshLog'),
),
migrations.AlterField(
model_name='summonlog',
name='item',
field=models.ForeignKey(help_text='Item or currency used to summon', on_delete=django.db.models.deletion.PROTECT, to='bestiary.GameItem'),
),
migrations.AlterField(
model_name='summonlog',
name='monster',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='bestiary.Monster'),
),
migrations.AlterField(
model_name='wishlogitemdrop',
name='item',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='bestiary.GameItem'),
),
migrations.AlterField(
model_name='wishlogitemdrop',
name='log',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='items', to='data_log.WishLog'),
),
migrations.AlterField(
model_name='wishlogmonsterdrop',
name='log',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='monsters', to='data_log.WishLog'),
),
migrations.AlterField(
model_name='wishlogmonsterdrop',
name='monster',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='bestiary.Monster'),
),
migrations.AlterField(
model_name='wishlogrunedrop',
name='log',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='runes', to='data_log.WishLog'),
),
migrations.DeleteModel(
name='RiftRaidItemDrop',
),
migrations.DeleteModel(
name='RiftRaidMonsterDrop',
),
migrations.DeleteModel(
name='RiftRaidRuneCraftDrop',
),
migrations.DeleteModel(
name='WorldBossLog',
),
migrations.DeleteModel(
name='WorldBossLogItemDrop',
),
migrations.DeleteModel(
name='WorldBossLogMonsterDrop',
),
migrations.DeleteModel(
name='WorldBossLogRuneDrop',
),
]
| en | 0.82377 | # Generated by Django 2.1.7 on 2019-03-23 01:21 | 1.664676 | 2 |
Company_Con_Final/NIKKEI_Final_Exhibition/E.py | yosho-18/AtCoder | 0 | 6613039 | <gh_stars>0
n = int(input())
tmp = ""
for i in range(1, n + 1):
if i % 2 == 0:
tmp += "a"
if i % 3 == 0:
tmp += "b"
if i % 4 == 0:
tmp += "c"
if i % 5 == 0:
tmp += "d"
if i % 6 == 0:
tmp += "e"
print(tmp if tmp != "" else i)
tmp = "" | n = int(input())
tmp = ""
for i in range(1, n + 1):
if i % 2 == 0:
tmp += "a"
if i % 3 == 0:
tmp += "b"
if i % 4 == 0:
tmp += "c"
if i % 5 == 0:
tmp += "d"
if i % 6 == 0:
tmp += "e"
print(tmp if tmp != "" else i)
tmp = "" | none | 1 | 3.533882 | 4 | |
lib/constants.py | khromiumos/chromiumos-chromite | 0 | 6613040 | # -*- coding: utf-8 -*-
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This module contains constants used by cbuildbot and related code."""
from __future__ import print_function
import itertools
import os
def _FindSourceRoot():
"""Try and find the root check out of the chromiumos tree"""
source_root = path = os.path.realpath(os.path.join(
os.path.abspath(__file__), '..', '..', '..'))
while True:
if os.path.isdir(os.path.join(path, '.repo')):
return path
elif path == '/':
break
path = os.path.dirname(path)
return source_root
SOURCE_ROOT = _FindSourceRoot()
CHROOT_SOURCE_ROOT = '/mnt/host/source'
CHROOT_CACHE_ROOT = '/var/cache/chromeos-cache'
DEPOT_TOOLS_SUBPATH = 'src/chromium/depot_tools'
CROSUTILS_DIR = os.path.join(SOURCE_ROOT, 'src/scripts')
CHROMITE_DIR = os.path.realpath(os.path.join(
os.path.abspath(__file__), '..', '..'))
BOOTSTRAP_DIR = os.path.join(CHROMITE_DIR, 'bootstrap')
DEPOT_TOOLS_DIR = os.path.join(SOURCE_ROOT, DEPOT_TOOLS_SUBPATH)
CHROMITE_BIN_SUBDIR = 'chromite/bin'
CHROMITE_BIN_DIR = os.path.join(CHROMITE_DIR, 'bin')
CHROMITE_SCRIPTS_DIR = os.path.join(CHROMITE_DIR, 'scripts')
PATH_TO_CBUILDBOT = os.path.join(CHROMITE_BIN_SUBDIR, 'cbuildbot')
DEFAULT_CHROOT_DIR = 'chroot'
DEFAULT_CHROOT_PATH = os.path.join(SOURCE_ROOT, DEFAULT_CHROOT_DIR)
TERMINA_TOOLS_DIR = os.path.join(
SOURCE_ROOT, 'src/platform/container-guest-tools/termina')
STATEFUL_DIR = '/mnt/stateful_partition'
# These constants are defined and used in the die_hook that logs failed
# packages: 'cros_log_failed_packages' in profiles/base/profile.bashrc in
# chromiumos-overlay. The status file is generated in CROS_METRICS_DIR, and
# only if that environment variable is defined.
CROS_METRICS_DIR_ENVVAR = 'CROS_METRICS_DIR'
DIE_HOOK_STATUS_FILE_NAME = 'FAILED_PACKAGES'
CHROMEOS_CONFIG_FILE = os.path.join(CHROMITE_DIR, 'config', 'config_dump.json')
WATERFALL_CONFIG_FILE = os.path.join(
CHROMITE_DIR, 'config', 'waterfall_layout_dump.txt')
LUCI_SCHEDULER_CONFIG_FILE = os.path.join(
CHROMITE_DIR, 'config', 'luci-scheduler.cfg')
GE_BUILD_CONFIG_FILE = os.path.join(
CHROMITE_DIR, 'config', 'ge_build_config.json')
# The following define the location for storing toolchain packages and
# SDK overlay tarballs created during SDK builder runs. The paths are relative
# to the build root's chroot, which guarantees that they are reachable from it
# and get cleaned up when it is removed.
SDK_TOOLCHAINS_OUTPUT = 'tmp/toolchain-pkgs'
SDK_OVERLAYS_OUTPUT = 'tmp/sdk-overlays'
AUTOTEST_BUILD_PATH = 'usr/local/build/autotest'
UNITTEST_PKG_PATH = 'test-packages'
# Only used for testing pinned images on test images.
GUEST_IMAGES_PINS_PATH = 'usr/local/opt/google/containers/pins'
PIN_KEY_FILENAME = 'filename'
PIN_KEY_GSURI = 'gsuri'
# Path to the lsb-release file on the device.
LSB_RELEASE_PATH = '/etc/lsb-release'
HOME_DIRECTORY = os.path.expanduser('~')
# If cbuiltbot is running on a bot, then the cidb access credentials will be
# available here. This directory will not exist otherwise.
CIDB_PROD_BOT_CREDS = os.path.join(HOME_DIRECTORY, '.cidb_creds',
'prod_cidb_bot')
CIDB_DEBUG_BOT_CREDS = os.path.join(HOME_DIRECTORY, '.cidb_creds',
'debug_cidb_bot')
# Crash Server upload API key.
CRASH_API_KEY = os.path.join('/', 'creds', 'api_keys',
'api_key-chromeos-crash-uploader')
# Buildbucket build status
BUILDBUCKET_BUILDER_STATUS_SCHEDULED = 'SCHEDULED'
BUILDBUCKET_BUILDER_STATUS_STARTED = 'STARTED'
BUILDBUCKET_BUILDER_STATUS_COMPLETED = 'COMPLETED'
BUILDBUCKET_BUILDER_STATUSES = (BUILDBUCKET_BUILDER_STATUS_SCHEDULED,
BUILDBUCKET_BUILDER_STATUS_STARTED,
BUILDBUCKET_BUILDER_STATUS_COMPLETED)
BUILDBUCKET_BUILDER_RESULT_SUCCESS = 'SUCCESS'
BUILDBUCKET_BUILDER_RESULT_FAILURE = 'FAILURE'
BUILDBUCKET_BUILDER_RESULT_CANCELED = 'CANCELED'
# Builder status strings
BUILDER_STATUS_FAILED = 'fail'
BUILDER_STATUS_PASSED = 'pass'
BUILDER_STATUS_INFLIGHT = 'inflight'
BUILDER_STATUS_MISSING = 'missing'
BUILDER_STATUS_ABORTED = 'aborted'
# The following statuses are currently only used for build stages.
BUILDER_STATUS_PLANNED = 'planned'
BUILDER_STATUS_WAITING = 'waiting'
BUILDER_STATUS_SKIPPED = 'skipped'
BUILDER_STATUS_FORGIVEN = 'forgiven'
BUILDER_COMPLETED_STATUSES = (BUILDER_STATUS_PASSED,
BUILDER_STATUS_FAILED,
BUILDER_STATUS_ABORTED,
BUILDER_STATUS_SKIPPED,
BUILDER_STATUS_FORGIVEN)
BUILDER_ALL_STATUSES = (BUILDER_STATUS_FAILED,
BUILDER_STATUS_PASSED,
BUILDER_STATUS_INFLIGHT,
BUILDER_STATUS_MISSING,
BUILDER_STATUS_ABORTED,
BUILDER_STATUS_WAITING,
BUILDER_STATUS_PLANNED,
BUILDER_STATUS_SKIPPED,
BUILDER_STATUS_FORGIVEN)
BUILDER_NON_FAILURE_STATUSES = (BUILDER_STATUS_PLANNED,
BUILDER_STATUS_PASSED,
BUILDER_STATUS_SKIPPED,
# Quick fix for Buildbucket race problems.
BUILDER_STATUS_INFLIGHT,
BUILDER_STATUS_FORGIVEN)
# Partition labels
CROS_PART_STATEFUL = 'STATE'
# Signer status strings
SIGNER_STATUS_PASSED = 'passed'
SIGNER_STATUS_FAILED = 'failed'
# Change sources
CHANGE_SOURCE_INTERNAL = 'internal'
CHANGE_SOURCE_EXTERNAL = 'external'
# Exception categories, as recorded in cidb
EXCEPTION_CATEGORY_UNKNOWN = 'unknown'
EXCEPTION_CATEGORY_BUILD = 'build'
EXCEPTION_CATEGORY_TEST = 'test'
EXCEPTION_CATEGORY_INFRA = 'infra'
EXCEPTION_CATEGORY_LAB = 'lab'
EXCEPTION_CATEGORY_ALL_CATEGORIES = (
EXCEPTION_CATEGORY_UNKNOWN,
EXCEPTION_CATEGORY_BUILD,
EXCEPTION_CATEGORY_TEST,
EXCEPTION_CATEGORY_INFRA,
EXCEPTION_CATEGORY_LAB,
)
# Monarch metric names
MON_LAST_SLAVE = 'chromeos/cbuildbot/last_completed_slave'
MON_BUILD_COMP_COUNT = 'chromeos/cbuildbot/build/completed_count'
MON_BUILD_DURATION = 'chromeos/cbuildbot/build/durations'
MON_STAGE_COMP_COUNT = 'chromeos/cbuildbot/stage/completed_count'
MON_STAGE_DURATION = 'chromeos/cbuildbot/stage/durations'
MON_STAGE_INSTANCE_DURATION = 'chromeos/cbuildbot/stage/instance_durations'
MON_STAGE_FAILURE_COUNT = 'chromeos/cbuildbot/stage/failure_count'
MON_FAILED_STAGE = 'chromeos/chromite/cbuildbot_launch/failed_stage'
MON_CHROOT_USED = 'chromeos/cbuildbot/chroot_at_version'
MON_REPO_SYNC_COUNT = 'chromeos/cbuildbot/repo/sync_count'
MON_REPO_SYNC_RETRY_COUNT = 'chromeos/cbuildbot/repo/sync_retry_count'
MON_REPO_SELFUPDATE_FAILURE_COUNT = ('chromeos/cbuildbot/repo/'
'selfupdate_failure_count')
MON_REPO_INIT_RETRY_COUNT = 'chromeos/cbuildbot/repo/init_retry_count'
MON_REPO_MANIFEST_FAILURE_COUNT = ('chromeos/cbuildbot/repo/'
'manifest_failure_count')
MON_BB_RETRY_BUILD_COUNT = ('chromeos/cbuildbot/buildbucket/'
'retry_build_count')
MON_BB_CANCEL_BATCH_BUILDS_COUNT = ('chromeos/cbuildbot/buildbucket/'
'cancel_batch_builds_count')
MON_EXPORT_TO_GCLOUD = 'chromeos/cbuildbot/export_to_gcloud'
# Stage Categorization for failed stages metric.
UNCATEGORIZED_STAGE = 'Uncategorized'
CI_INFRA_STAGE = 'CI-Infra'
TEST_INFRA_STAGE = 'Test-Infra'
PRODUCT_OS_STAGE = 'Product-OS'
PRODUCT_ANDROID_STAGE = 'Product-Android'
PRODUCT_CHROME_STAGE = 'Product-Chrome'
PRODUCT_TOOLCHAIN_STAGE = 'Product-Toolchain'
# Re-execution API constants.
# Used by --resume and --bootstrap to decipher which options they
# can pass to the target cbuildbot (since it may not have that
# option).
# Format is Major.Minor. Minor is used for tracking new options added
# that aren't critical to the older version if it's not ran.
# Major is used for tracking heavy API breakage- for example, no longer
# supporting the --resume option.
REEXEC_API_MAJOR = 0
REEXEC_API_MINOR = 12
REEXEC_API_VERSION = '%i.%i' % (REEXEC_API_MAJOR, REEXEC_API_MINOR)
# Support --master-build-id
REEXEC_API_MASTER_BUILD_ID = 3
# Support --git-cache-dir
REEXEC_API_GIT_CACHE_DIR = 4
# Support --goma_dir and --goma_client_json
REEXEC_API_GOMA = 5
# Support --ts-mon-task-num
REEXEC_API_TSMON_TASK_NUM = 6
# Support --sanity-check-build
REEXEC_API_SANITY_CHECK_BUILD = 7
# Support --previous-build-state
REEXEC_API_PREVIOUS_BUILD_STATE = 8
# Support --workspace
REEXEC_API_WORKSPACE = 9
# Support --master-buildbucket-id
REEXEC_API_MASTER_BUILDBUCKET_ID = 10
# Support --chromeos_goma_dir
REEXEC_API_CHROMEOS_GOMA_DIR = 11
# Support --chrome-preload-dir
REEXEC_API_CHROME_PRELOAD_DIR = 12
# We rely on the (waterfall, builder name, build number) to uniquely identify
# a build. However, future migrations or state wipes of the buildbot master may
# cause it to reset its build number counter. When that happens, this value
# should be incremented, ensuring that (waterfall, builder name, build number,
# buildbot generation) is a unique identifier of builds.
BUILDBOT_GENERATION = 1
ISOLATESERVER = 'https://isolateserver.appspot.com'
GOOGLE_EMAIL = '@google.com'
CHROMIUM_EMAIL = '@chromium.org'
CORP_DOMAIN = 'corp.google.com'
GOLO_DOMAIN = 'golo.chromium.org'
CHROME_DOMAIN = 'chrome.' + CORP_DOMAIN
CHROMEOS_BOT_INTERNAL = 'chromeos-bot.internal'
GOB_HOST = '%s.googlesource.com'
EXTERNAL_GOB_INSTANCE = 'chromium'
EXTERNAL_GERRIT_INSTANCE = 'chromium-review'
EXTERNAL_GOB_HOST = GOB_HOST % EXTERNAL_GOB_INSTANCE
EXTERNAL_GERRIT_HOST = GOB_HOST % EXTERNAL_GERRIT_INSTANCE
EXTERNAL_GOB_URL = 'https://%s' % EXTERNAL_GOB_HOST
EXTERNAL_GERRIT_URL = 'https://%s' % EXTERNAL_GERRIT_HOST
INTERNAL_GOB_INSTANCE = 'chrome-internal'
INTERNAL_GERRIT_INSTANCE = 'chrome-internal-review'
INTERNAL_GOB_HOST = GOB_HOST % INTERNAL_GOB_INSTANCE
INTERNAL_GERRIT_HOST = GOB_HOST % INTERNAL_GERRIT_INSTANCE
INTERNAL_GOB_URL = 'https://%s' % INTERNAL_GOB_HOST
INTERNAL_GERRIT_URL = 'https://%s' % INTERNAL_GERRIT_HOST
# Tests without 'cheets_CTS_', 'cheets_GTS.' prefix will not considered
# as CTS/GTS test in chromite.lib.cts_helper
DEFAULT_CTS_TEST_XML_MAP = {
'cheets_CTS_': 'test_result.xml',
'cheets_GTS.': 'test_result.xml',
'cheets_GTS_': 'test_result.xml',
}
# Google Storage bucket URI to store results in.
DEFAULT_CTS_RESULTS_GSURI = 'gs://chromeos-cts-results/'
DEFAULT_CTS_APFE_GSURI = 'gs://chromeos-cts-apfe/'
ANDROID_CONTAINER_PACKAGE_KEYWORD = 'android-container'
ANDROID_VM_PACKAGE_KEYWORD = 'android-vm'
ANDROID_BUCKET_URL = 'gs://android-build-chromeos/builds'
ANDROID_PI_BUILD_BRANCH = 'git_pi-arc'
ANDROID_VMRVC_BUILD_BRANCH = 'git_rvc-arc'
ANDROID_VMMST_BUILD_BRANCH = 'git_master-arc-dev'
ANDROID_PI_BUILD_TARGETS = {
# Roll XkbToKcmConverter with system image. It's a host executable and
# doesn't depend on the target as long as it's pi-arc branch. The converter
# is ARC specific and not a part of Android SDK. Having a custom target like
# SDK_TOOLS might be better in the long term, but let's use one from ARM or
# X86 target as there's no other similar executables right now.
# We put it in two buckets because we have separate ACLs for arm and x86.
# http://b/128405786
'APPS': ('linux-apps', 'org.chromium.arc.cachebuilder.jar'),
'ARM': ('linux-cheets_arm-user', r'(\.zip|/XkbToKcmConverter)$'),
'ARM64': ('linux-cheets_arm64-user', r'(\.zip|/XkbToKcmConverter)$'),
'X86': ('linux-cheets_x86-user', r'(\.zip|/XkbToKcmConverter)$'),
'X86_64': ('linux-cheets_x86_64-user', r'\.zip$'),
'ARM_USERDEBUG': ('linux-cheets_arm-userdebug', r'\.zip$'),
'ARM64_USERDEBUG': ('linux-cheets_arm64-userdebug', r'\.zip$'),
'X86_USERDEBUG': ('linux-cheets_x86-userdebug', r'\.zip$'),
'X86_64_USERDEBUG': ('linux-cheets_x86_64-userdebug', r'\.zip$'),
'SDK_GOOGLE_X86_USERDEBUG': ('linux-sdk_cheets_x86-userdebug', r'\.zip$'),
'SDK_GOOGLE_X86_64_USERDEBUG': ('linux-sdk_cheets_x86_64-userdebug',
r'\.zip$'),
}
ANDROID_VMMST_BUILD_TARGETS = {
# For XkbToKcmConverter, see the comment in ANDROID_PI_BUILD_TARGETS.
'X86_64_USERDEBUG': ('linux-bertha_x86_64-userdebug',
r'(\.zip|/XkbToKcmConverter)$'),
}
ANDROID_VMRVC_BUILD_TARGETS = {
# For XkbToKcmConverter, see the comment in ANDROID_PI_BUILD_TARGETS.
'APPS': ('linux-apps', 'org.chromium.arc.cachebuilder.jar'),
'ARM64': ('linux-bertha_arm64-user', r'(\.zip|/XkbToKcmConverter)$'),
'X86_64': ('linux-bertha_x86_64-user', r'(\.zip|/XkbToKcmConverter)$'),
'ARM64_USERDEBUG': ('linux-bertha_arm64-userdebug',
r'(\.zip|/XkbToKcmConverter)$'),
'X86_64_USERDEBUG': ('linux-bertha_x86_64-userdebug',
r'(\.zip|/XkbToKcmConverter)$'),
}
# These refer to *_TARGET variables in Android ebuild files, used when
# parsing ebuilds to determine the corresponding Android branch.
# NOTE: We may use `|` operator to union dict keys after we completely go
# Python 3.
ANDROID_ALL_BUILD_TARGETS = frozenset(
x + '_TARGET' for x in itertools.chain(
ANDROID_PI_BUILD_TARGETS,
ANDROID_VMMST_BUILD_TARGETS,
ANDROID_VMRVC_BUILD_TARGETS,
)
)
ARC_BUCKET_URL = 'gs://chromeos-arc-images/builds'
ARC_BUCKET_ACLS = {
'APPS': 'googlestorage_acl_public.txt',
'ARM': 'googlestorage_acl_arm.txt',
'ARM64': 'googlestorage_acl_arm.txt',
'X86': 'googlestorage_acl_x86.txt',
'X86_64': 'googlestorage_acl_x86.txt',
'ARM_USERDEBUG': 'googlestorage_acl_arm.txt',
'ARM64_USERDEBUG': 'googlestorage_acl_arm.txt',
'X86_USERDEBUG': 'googlestorage_acl_x86.txt',
'X86_64_USERDEBUG': 'googlestorage_acl_x86.txt',
'SDK_GOOGLE_X86_USERDEBUG': 'googlestorage_acl_x86.txt',
'SDK_GOOGLE_X86_64_USERDEBUG': 'googlestorage_acl_x86.txt',
}
ANDROID_SYMBOLS_URL_TEMPLATE = (
ARC_BUCKET_URL +
'/%(branch)s-linux-%(target)s_%(arch)s-%(variant)s/%(version)s'
'/%(target)s_%(arch)s%(suffix)s-symbols-%(version)s.zip')
ANDROID_SYMBOLS_FILE = 'android-symbols.zip'
# x86-user, x86-userdebug and x86-eng builders create build artifacts with the
# same name, e.g. cheets_x86-target_files-${VERSION}.zip. Chrome OS builders
# that need to select x86-user or x86-userdebug artifacts at emerge time need
# the artifacts to have different filenames to avoid checksum failures. These
# targets will have their artifacts renamed when the PFQ copies them from the
# the Android bucket to the ARC++ bucket (b/33072485).
ARC_BUILDS_NEED_ARTIFACTS_RENAMED = {
'ARM_USERDEBUG',
'ARM64_USERDEBUG',
'X86_USERDEBUG',
'X86_64_USERDEBUG',
'SDK_GOOGLE_X86_USERDEBUG',
'SDK_GOOGLE_X86_64_USERDEBUG',
}
# All builds will have the same name without target prefix.
# Emerge checksum failures will be workarounded by ebuild rename symbol (->).
ARC_ARTIFACTS_RENAME_NOT_NEEDED = [
'push_to_device.zip',
'sepolicy.zip',
'XkbToKcmConverter',
]
GOB_COOKIE_PATH = os.path.expanduser('~/.git-credential-cache/cookie')
GITCOOKIES_PATH = os.path.expanduser('~/.gitcookies')
# Timestamps in the JSON from GoB's web interface is of the form 'Tue
# Dec 02 17:48:06 2014' and is assumed to be in UTC.
GOB_COMMIT_TIME_FORMAT = '%a %b %d %H:%M:%S %Y'
CHROMITE_PROJECT = 'chromiumos/chromite'
CHROMITE_URL = '%s/%s' % (EXTERNAL_GOB_URL, CHROMITE_PROJECT)
CHROMIUM_SRC_PROJECT = 'chromium/src'
CHROMIUM_GOB_URL = '%s/%s.git' % (EXTERNAL_GOB_URL, CHROMIUM_SRC_PROJECT)
CHROME_INTERNAL_PROJECT = 'chrome/src-internal'
CHROME_INTERNAL_GOB_URL = '%s/%s.git' % (
INTERNAL_GOB_URL, CHROME_INTERNAL_PROJECT)
DEFAULT_MANIFEST = 'default.xml'
OFFICIAL_MANIFEST = 'official.xml'
LKGM_MANIFEST = 'LKGM/lkgm.xml'
SHARED_CACHE_ENVVAR = 'CROS_CACHEDIR'
PARALLEL_EMERGE_STATUS_FILE_ENVVAR = 'PARALLEL_EMERGE_STATUS_FILE'
# These projects can be responsible for infra failures.
INFRA_PROJECTS = (CHROMITE_PROJECT,)
# The manifest contains extra attributes in the 'project' nodes to determine our
# branching strategy for the project.
# create: Create a new branch on the project repo for the new CrOS branch.
# This is the default.
# pin: On the CrOS branch, pin the project to the current revision.
# tot: On the CrOS branch, the project still tracks ToT.
MANIFEST_ATTR_BRANCHING = 'branch-mode'
MANIFEST_ATTR_BRANCHING_CREATE = 'create'
MANIFEST_ATTR_BRANCHING_PIN = 'pin'
MANIFEST_ATTR_BRANCHING_TOT = 'tot'
MANIFEST_ATTR_BRANCHING_ALL = (
MANIFEST_ATTR_BRANCHING_CREATE,
MANIFEST_ATTR_BRANCHING_PIN,
MANIFEST_ATTR_BRANCHING_TOT,
)
STREAK_COUNTERS = 'streak_counters'
PATCH_BRANCH = 'patch_branch'
STABLE_EBUILD_BRANCH = 'stabilizing_branch'
MERGE_BRANCH = 'merge_branch'
# These branches are deleted at the beginning of every buildbot run.
CREATED_BRANCHES = [
PATCH_BRANCH,
STABLE_EBUILD_BRANCH,
MERGE_BRANCH
]
# Default OS target packages.
TARGET_OS_PKG = 'virtual/target-os'
TARGET_OS_DEV_PKG = 'virtual/target-os-dev'
TARGET_OS_TEST_PKG = 'virtual/target-os-test'
TARGET_OS_FACTORY_PKG = 'virtual/target-os-factory'
# Constants for uprevving Chrome
CHROMEOS_BASE = 'chromeos-base'
# Portage category and package name for Chrome.
CHROME_CN = CHROMEOS_BASE
CHROME_PN = 'chromeos-chrome'
CHROME_CP = '%s/%s' % (CHROME_CN, CHROME_PN)
# Other packages to uprev while uprevving Chrome.
OTHER_CHROME_PACKAGES = ['chromeos-base/chromium-source',
'chromeos-base/chrome-icu']
# Chrome use flags
USE_CHROME_INTERNAL = 'chrome_internal'
USE_AFDO_USE = 'afdo_use'
# Builds and validates _alpha ebuilds. These builds sync to the latest
# revsion of the Chromium src tree and build with that checkout.
CHROME_REV_TOT = 'tot'
# Builds and validates chrome at a given revision through cbuildbot
# --chrome_version
CHROME_REV_SPEC = 'spec'
# Builds and validates the latest Chromium release as defined by
# ~/trunk/releases in the Chrome src tree. These ebuilds are suffixed with rc.
CHROME_REV_LATEST = 'latest_release'
# Builds and validates the latest Chromium release for a specific Chromium
# branch that we want to watch. These ebuilds are suffixed with rc.
CHROME_REV_STICKY = 'stable_release'
# Builds and validates Chromium for a pre-populated directory.
# Also uses _alpha, since portage doesn't have anything lower.
CHROME_REV_LOCAL = 'local'
VALID_CHROME_REVISIONS = [CHROME_REV_TOT, CHROME_REV_LATEST,
CHROME_REV_STICKY, CHROME_REV_LOCAL, CHROME_REV_SPEC]
# Constants for uprevving Android.
# Portage package name for Android container.
ANDROID_PACKAGE_NAME = 'android-container'
# Builds and validates the latest Android release.
ANDROID_REV_LATEST = 'latest_release'
VALID_ANDROID_REVISIONS = [ANDROID_REV_LATEST]
# Build types supported.
# TODO(sosa): Deprecate PFQ type.
# Incremental builds that are built using binary packages when available.
# These builds have less validation than other build types.
INCREMENTAL_TYPE = 'binary'
# These builds serve as PFQ builders. This is being deprecated.
PFQ_TYPE = 'pfq'
# Android PFQ type. Builds and validates new versions of Android.
ANDROID_PFQ_TYPE = 'android'
# Builds from source and non-incremental. This builds fully wipe their
# chroot before the start of every build and no not use a BINHOST.
FULL_TYPE = 'full'
# Full but with versioned logic.
CANARY_TYPE = 'canary'
# Generate payloads for an already built build/version.
PAYLOADS_TYPE = 'payloads'
# Similar behavior to canary, but used to validate toolchain changes.
TOOLCHAIN_TYPE = 'toolchain'
# Generic type of tryjob only build configs.
TRYJOB_TYPE = 'tryjob'
# Special build type for Chroot builders. These builds focus on building
# toolchains and validate that they work.
CHROOT_BUILDER_TYPE = 'chroot'
CHROOT_BUILDER_BOARD = 'amd64-host'
# Use for builds that don't requite a type.
GENERIC_TYPE = 'generic'
VALID_BUILD_TYPES = (
INCREMENTAL_TYPE,
FULL_TYPE,
CANARY_TYPE,
CHROOT_BUILDER_TYPE,
CHROOT_BUILDER_BOARD,
ANDROID_PFQ_TYPE,
PFQ_TYPE,
PAYLOADS_TYPE,
TOOLCHAIN_TYPE,
TRYJOB_TYPE,
GENERIC_TYPE,
)
HWTEST_TRYBOT_NUM = 3
HWTEST_QUOTA_POOL = 'quota'
HWTEST_QUOTA_ACCOUNT_BVT = 'legacypool-bvt'
HWTEST_QUOTA_ACCOUNT_BVT_SYNC = 'bvt-sync'
HWTEST_QUOTA_ACCOUNT_PFQ = 'pfq'
HWTEST_QUOTA_ACCOUNT_SUITES = 'legacypool-suites'
HWTEST_QUOTA_ACCOUNT_TOOLCHAIN = 'toolchain'
# How many total test retries should be done for a suite.
HWTEST_MAX_RETRIES = 5
# Defines for the various hardware test suites:
# BVT: Basic blocking suite to be run against any build that
# requires a HWTest phase.
# COMMIT: Suite of basic tests required for commits to the source
# tree. Runs as a blocking suite on the CQ and PFQ; runs as
# a non-blocking suite on canaries.
# CANARY: Non-blocking suite run only against the canaries.
# AFDO: Non-blocking suite run only AFDO builders.
# MOBLAB: Blocking Suite run only on *_moblab builders.
# INSTALLER: Blocking suite run against all canaries; tests basic installer
# functionality.
HWTEST_ARC_COMMIT_SUITE = 'bvt-arc'
HWTEST_BVT_SUITE = 'bvt-inline'
HWTEST_COMMIT_SUITE = 'bvt-cq'
HWTEST_CANARY_SUITE = 'bvt-perbuild'
HWTEST_INSTALLER_SUITE = 'bvt-installer'
# Runs all non-informational Tast tests (exercising any of OS, Chrome, and ARC).
HWTEST_TAST_CQ_SUITE = 'bvt-tast-cq'
# Runs non-informational Tast tests exercising either Chrome or ARC.
HWTEST_TAST_CHROME_PFQ_SUITE = 'bvt-tast-chrome-pfq'
# Runs non-informational Tast tests exercising ARC.
HWTEST_TAST_ANDROID_PFQ_SUITE = 'bvt-tast-android-pfq'
# Runs all Tast informational tests.
HWTEST_TAST_INFORMATIONAL_SUITE = 'bvt-tast-informational'
HWTEST_AFDO_SUITE = 'AFDO_record'
HWTEST_JETSTREAM_COMMIT_SUITE = 'jetstream_cq'
HWTEST_MOBLAB_SUITE = 'moblab'
HWTEST_MOBLAB_QUICK_SUITE = 'moblab_quick'
HWTEST_SANITY_SUITE = 'sanity'
HWTEST_TOOLCHAIN_SUITE = 'toolchain-tests'
# Non-blocking informational hardware tests for Chrome, run throughout the
# day on tip-of-trunk Chrome rather than on the daily Chrome branch.
HWTEST_CHROME_INFORMATIONAL = 'chrome-informational'
# Additional timeout to wait for autotest to abort a suite if the test takes
# too long to run. This is meant to be overly conservative as a timeout may
# indicate that autotest is at capacity.
HWTEST_TIMEOUT_EXTENSION = 10 * 60
HWTEST_WEEKLY_PRIORITY = 'Weekly'
HWTEST_CTS_PRIORITY = 'CTS'
HWTEST_GTS_PRIORITY = HWTEST_CTS_PRIORITY
HWTEST_DAILY_PRIORITY = 'Daily'
HWTEST_DEFAULT_PRIORITY = 'DEFAULT'
HWTEST_CQ_PRIORITY = 'CQ'
HWTEST_BUILD_PRIORITY = 'Build'
HWTEST_PFQ_PRIORITY = 'PFQ'
HWTEST_POST_BUILD_PRIORITY = 'PostBuild'
# Ordered by priority (first item being lowest).
HWTEST_VALID_PRIORITIES = [HWTEST_WEEKLY_PRIORITY,
HWTEST_CTS_PRIORITY,
HWTEST_DAILY_PRIORITY,
HWTEST_POST_BUILD_PRIORITY,
HWTEST_DEFAULT_PRIORITY,
HWTEST_BUILD_PRIORITY,
HWTEST_PFQ_PRIORITY,
HWTEST_CQ_PRIORITY]
# Creates a mapping of priorities to make easy comparsions.
# Use the same priorities mapping as autotest/client/common_lib/priorities.py
HWTEST_PRIORITIES_MAP = {
HWTEST_WEEKLY_PRIORITY: 10,
HWTEST_CTS_PRIORITY: 11,
HWTEST_DAILY_PRIORITY: 20,
HWTEST_POST_BUILD_PRIORITY: 30,
HWTEST_DEFAULT_PRIORITY: 40,
HWTEST_BUILD_PRIORITY: 50,
HWTEST_PFQ_PRIORITY: 60,
HWTEST_CQ_PRIORITY: 70}
# Creates a mapping of priorities for skylab hwtest tasks. In swarming,
# lower number means high priorities. Priority lower than 48 will be special
# tasks. The upper bound of priority is 255.
# Use the same priorities mapping as autotest/venv/skylab_suite/swarming_lib.py
SKYLAB_HWTEST_PRIORITIES_MAP = {
HWTEST_WEEKLY_PRIORITY: 230,
HWTEST_CTS_PRIORITY: 215,
HWTEST_DAILY_PRIORITY: 200,
HWTEST_POST_BUILD_PRIORITY: 170,
HWTEST_DEFAULT_PRIORITY: 140,
HWTEST_BUILD_PRIORITY: 110,
HWTEST_PFQ_PRIORITY: 80,
HWTEST_CQ_PRIORITY: 50,
}
# The environment for executing tests.
ENV_SKYLAB = 'skylab'
ENV_AUTOTEST = 'autotest'
# The cipd package for skylab tool
CIPD_SKYLAB_PACKAGE = 'chromiumos/infra/skylab/linux-amd64'
# crbug.com/1108489: The skylab tool CIPD package is pinned to a specific
# version to avoid uncontrolled tool release and so that the tool is effectively
# branched with cbuildbot.
CIPD_SKYLAB_INSTANCE_ID = 'LU2Xmdk1oXyZPuiEfzDQhUWFMXY3jYQNPOzHRkRkZBEC'
# HWTest result statuses
HWTEST_STATUS_PASS = 'pass'
HWTEST_STATUS_FAIL = 'fail'
HWTEST_STATUS_ABORT = 'abort'
HWTEST_STATUS_OTHER = 'other'
HWTEST_STATUES_NOT_PASSED = frozenset([HWTEST_STATUS_FAIL,
HWTEST_STATUS_ABORT,
HWTEST_STATUS_OTHER])
# Define HWTEST subsystem logic constants.
SUBSYSTEMS = 'subsystems'
SUBSYSTEM_UNUSED = 'subsystem_unused'
# Build messages
MESSAGE_TYPE_IGNORED_REASON = 'ignored_reason'
MESSAGE_TYPE_ANNOTATIONS_FINALIZED = 'annotations_finalized'
# MESSSGE_TYPE_IGNORED_REASON messages store the affected build as
# the CIDB column message_value.
MESSAGE_SUBTYPE_SELF_DESTRUCTION = 'self_destruction'
# Define HWTEST job_keyvals
JOB_KEYVAL_DATASTORE_PARENT_KEY = 'datastore_parent_key'
JOB_KEYVAL_CIDB_BUILD_ID = 'cidb_build_id'
JOB_KEYVAL_CIDB_BUILD_STAGE_ID = 'cidb_build_stage_id'
JOB_KEYVAL_BUILD_CONFIG = 'build_config'
JOB_KEYVAL_MASTER_BUILD_CONFIG = 'master_build_config'
JOB_KEYVAL_BRANCH = 'branch'
# How many total test retries should be done for a suite.
VM_TEST_MAX_RETRIES = 5
# Defines VM Test types.
SIMPLE_AU_TEST_TYPE = 'pfq_suite'
VM_SUITE_TEST_TYPE = 'vm_suite'
GCE_SUITE_TEST_TYPE = 'gce_suite'
CROS_VM_TEST_TYPE = 'cros_vm_test'
DEV_MODE_TEST_TYPE = 'dev_mode_test'
VALID_VM_TEST_TYPES = [
SIMPLE_AU_TEST_TYPE,
VM_SUITE_TEST_TYPE,
GCE_SUITE_TEST_TYPE,
CROS_VM_TEST_TYPE,
DEV_MODE_TEST_TYPE
]
VALID_GCE_TEST_SUITES = ['gce-smoke', 'gce-sanity']
# MoblabVM tests are suites of tests used to validate a moblab image via
# VMTests.
MOBLAB_VM_SMOKE_TEST_TYPE = 'moblab_smoke_test'
CHROMIUMOS_OVERLAY_DIR = 'src/third_party/chromiumos-overlay'
PORTAGE_STABLE_OVERLAY_DIR = 'src/third_party/portage-stable'
ECLASS_OVERLAY_DIR = 'src/third_party/eclass-overlay'
CHROMEOS_PARTNER_OVERLAY_DIR = 'src/private-overlays/chromeos-partner-overlay/'
PUBLIC_BINHOST_CONF_DIR = os.path.join(CHROMIUMOS_OVERLAY_DIR,
'chromeos/binhost')
PRIVATE_BINHOST_CONF_DIR = os.path.join(CHROMEOS_PARTNER_OVERLAY_DIR,
'chromeos/binhost')
VERSION_FILE = os.path.join(CHROMIUMOS_OVERLAY_DIR,
'chromeos/config/chromeos_version.sh')
SDK_VERSION_FILE = os.path.join(PUBLIC_BINHOST_CONF_DIR,
'host/sdk_version.conf')
SDK_GS_BUCKET = 'chromiumos-sdk'
PUBLIC = 'public'
PRIVATE = 'private'
BOTH_OVERLAYS = 'both'
PUBLIC_OVERLAYS = PUBLIC
PRIVATE_OVERLAYS = PRIVATE
VALID_OVERLAYS = [BOTH_OVERLAYS, PUBLIC_OVERLAYS, PRIVATE_OVERLAYS, None]
# Common default logging settings for use with the logging module.
LOGGER_FMT = '%(asctime)s: %(levelname)s: %(message)s'
LOGGER_DATE_FMT = '%H:%M:%S'
# Used by remote patch serialization/deserialzation.
INTERNAL_PATCH_TAG = 'i'
EXTERNAL_PATCH_TAG = 'e'
PATCH_TAGS = (INTERNAL_PATCH_TAG, EXTERNAL_PATCH_TAG)
GERRIT_ON_BORG_LABELS = {
'Code-Review': 'CRVW',
'Commit-Queue': 'COMR',
'Verified': 'VRIF',
}
# Environment variables that should be exposed to all children processes
# invoked via cros_build_lib.run.
ENV_PASSTHRU = ('CROS_SUDO_KEEP_ALIVE', SHARED_CACHE_ENVVAR,
PARALLEL_EMERGE_STATUS_FILE_ENVVAR)
# List of variables to proxy into the chroot from the host, and to
# have sudo export if existent. Anytime this list is modified, a new
# chroot_version_hooks.d upgrade script that symlinks to 153_rewrite_sudoers.d
# should be created.
CHROOT_ENVIRONMENT_WHITELIST = (
'CHROMEOS_OFFICIAL',
'CHROMEOS_VERSION_AUSERVER',
'CHROMEOS_VERSION_DEVSERVER',
'CHROMEOS_VERSION_TRACK',
'GCC_GITHASH',
'GIT_AUTHOR_EMAIL',
'GIT_AUTHOR_NAME',
'GIT_COMMITTER_EMAIL',
'GIT_COMMITTER_NAME',
'GIT_PROXY_COMMAND',
'GIT_SSH',
'RSYNC_PROXY',
'SSH_AGENT_PID',
'SSH_AUTH_SOCK',
'TMUX',
'USE',
'all_proxy',
'ftp_proxy',
'http_proxy',
'https_proxy',
'no_proxy',
)
# Paths for Chrome LKGM which are relative to the Chromium base url.
CHROME_LKGM_FILE = 'CHROMEOS_LKGM'
PATH_TO_CHROME_LKGM = 'chromeos/%s' % CHROME_LKGM_FILE
# Path for the Chrome LKGM's closest OWNERS file.
PATH_TO_CHROME_CHROMEOS_OWNERS = 'chromeos/OWNERS'
# Cache constants.
COMMON_CACHE = 'common'
# Artifact constants.
def _SlashToUnderscore(string):
return string.replace('/', '_')
# GCE tar ball constants.
def ImageBinToGceTar(image_bin):
assert image_bin.endswith('.bin'), ('Filename %s does not end with ".bin"' %
image_bin)
return '%s_gce.tar.gz' % os.path.splitext(image_bin)[0]
RELEASE_BUCKET = 'gs://chromeos-releases'
TRASH_BUCKET = 'gs://chromeos-throw-away-bucket'
CHROME_SYSROOT_TAR = 'sysroot_%s.tar.xz' % _SlashToUnderscore(CHROME_CP)
CHROME_ENV_TAR = 'environment_%s.tar.xz' % _SlashToUnderscore(CHROME_CP)
CHROME_ENV_FILE = 'environment'
BASE_IMAGE_NAME = 'chromiumos_base_image'
BASE_IMAGE_TAR = '%s.tar.xz' % BASE_IMAGE_NAME
BASE_IMAGE_BIN = '%s.bin' % BASE_IMAGE_NAME
BASE_IMAGE_GCE_TAR = ImageBinToGceTar(BASE_IMAGE_BIN)
IMAGE_SCRIPTS_NAME = 'image_scripts'
IMAGE_SCRIPTS_TAR = '%s.tar.xz' % IMAGE_SCRIPTS_NAME
TARGET_SYSROOT_TAR = 'sysroot_%s.tar.xz' % _SlashToUnderscore(TARGET_OS_PKG)
VM_IMAGE_NAME = 'chromiumos_qemu_image'
VM_IMAGE_BIN = '%s.bin' % VM_IMAGE_NAME
VM_IMAGE_TAR = '%s.tar.xz' % VM_IMAGE_NAME
VM_DISK_PREFIX = 'chromiumos_qemu_disk.bin'
VM_MEM_PREFIX = 'chromiumos_qemu_mem.bin'
VM_NUM_RETRIES = 0
# Disabling Tast VM retries because of https://crbug.com/1098346.
TAST_VM_NUM_RETRIES = 0
TAST_VM_TEST_RESULTS = 'tast_vm_test_results_%(attempt)s'
BASE_GUEST_VM_DIR = 'guest-vm-base'
TEST_GUEST_VM_DIR = 'guest-vm-test'
BASE_GUEST_VM_TAR = '%s.tar.xz' % BASE_GUEST_VM_DIR
TEST_GUEST_VM_TAR = '%s.tar.xz' % TEST_GUEST_VM_DIR
TEST_IMAGE_NAME = 'chromiumos_test_image'
TEST_IMAGE_TAR = '%s.tar.xz' % TEST_IMAGE_NAME
TEST_IMAGE_BIN = '%s.bin' % TEST_IMAGE_NAME
TEST_IMAGE_GCE_TAR = ImageBinToGceTar(TEST_IMAGE_BIN)
TEST_KEY_PRIVATE = 'id_rsa'
TEST_KEY_PUBLIC = 'id_rsa.pub'
DEV_IMAGE_NAME = 'chromiumos_image'
DEV_IMAGE_BIN = '%s.bin' % DEV_IMAGE_NAME
RECOVERY_IMAGE_NAME = 'recovery_image'
RECOVERY_IMAGE_BIN = '%s.bin' % RECOVERY_IMAGE_NAME
RECOVERY_IMAGE_TAR = '%s.tar.xz' % RECOVERY_IMAGE_NAME
# Image type constants.
IMAGE_TYPE_BASE = 'base'
IMAGE_TYPE_DEV = 'dev'
IMAGE_TYPE_TEST = 'test'
IMAGE_TYPE_RECOVERY = 'recovery'
IMAGE_TYPE_FACTORY = 'factory'
IMAGE_TYPE_FIRMWARE = 'firmware'
# USB PD accessory microcontroller firmware (e.g. power brick, display dongle).
IMAGE_TYPE_ACCESSORY_USBPD = 'accessory_usbpd'
# Standalone accessory microcontroller firmware (e.g. wireless keyboard).
IMAGE_TYPE_ACCESSORY_RWSIG = 'accessory_rwsig'
# Cr50 Firmware.
IMAGE_TYPE_CR50_FIRMWARE = 'cr50_firmware'
IMAGE_TYPE_TO_NAME = {
IMAGE_TYPE_BASE: BASE_IMAGE_BIN,
IMAGE_TYPE_DEV: DEV_IMAGE_BIN,
IMAGE_TYPE_RECOVERY: RECOVERY_IMAGE_BIN,
IMAGE_TYPE_TEST: TEST_IMAGE_BIN,
}
IMAGE_NAME_TO_TYPE = dict((v, k) for k, v in IMAGE_TYPE_TO_NAME.items())
METADATA_JSON = 'metadata.json'
PARTIAL_METADATA_JSON = 'partial-metadata.json'
METADATA_TAGS = 'tags'
DELTA_SYSROOT_TAR = 'delta_sysroot.tar.xz'
DELTA_SYSROOT_BATCH = 'batch'
FIRMWARE_ARCHIVE_NAME = 'firmware_from_source.tar.bz2'
FPMCU_UNITTESTS_ARCHIVE_NAME = 'fpmcu_unittests.tar.bz2'
# Global configuration constants.
CHROMITE_CONFIG_DIR = os.path.expanduser('~/.chromite')
CHROME_SDK_BASHRC = os.path.join(CHROMITE_CONFIG_DIR, 'chrome_sdk.bashrc')
SYNC_RETRIES = 4
SLEEP_TIMEOUT = 30
# Lab status url.
LAB_STATUS_URL = 'http://chromiumos-lab.appspot.com/current?format=json'
GOLO_SMTP_SERVER = 'mail.golo.chromium.org'
CHROME_GARDENER = 'chrome'
# Email alias to add as reviewer in Gerrit, which GWSQ will then automatically
# assign to the current gardener.
CHROME_GARDENER_REVIEW_EMAIL = '<EMAIL>'
# Useful config targets.
CANARY_MASTER = 'master-release'
PFQ_MASTER = 'master-chromium-pfq'
VMMST_ANDROID_PFQ_MASTER = 'master-vmmst-android-pfq'
PI_ANDROID_PFQ_MASTER = 'master-pi-android-pfq'
VMRVC_ANDROID_PFQ_MASTER = 'master-vmrvc-android-pfq'
TOOLCHAIN_MASTTER = 'master-toolchain'
# Email validation regex. Not quite fully compliant with RFC 2822, but good
# approximation.
EMAIL_REGEX = r'[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Za-z]{2,4}'
# Blacklist of files not allowed to be uploaded into the Partner Project Google
# Storage Buckets:
# debug.tgz contains debug symbols.
# manifest.xml exposes all of our repo names.
# vm_test_results can contain symbolicated crash dumps.
EXTRA_BUCKETS_FILES_BLACKLIST = [
'debug.tgz',
'manifest.xml',
'vm_test_results_*'
]
# AFDO common constants.
# How long does the AFDO_record autotest have to generate the AFDO perf data.
AFDO_GENERATE_TIMEOUT = 120 * 60
# Gmail Credentials.
GMAIL_TOKEN_CACHE_FILE = os.path.expanduser('~/.gmail_credentials')
GMAIL_TOKEN_JSON_FILE = '/creds/refresh_tokens/chromeos_gmail_alerts'
# Maximum number of boards per release group builder. This should be
# chosen/adjusted based on expected release build times such that successive
# builds don't overlap and create a backlog.
MAX_RELEASE_GROUP_BOARDS = 4
CHROMEOS_SERVICE_ACCOUNT = os.path.join('/', 'creds', 'service_accounts',
'service-account-chromeos.json')
# Buildbucket buckets
CHROMEOS_RELEASE_BUILDBUCKET_BUCKET = 'master.chromeos_release'
CHROMEOS_BUILDBUCKET_BUCKET = 'master.chromeos'
INTERNAL_SWARMING_BUILDBUCKET_BUCKET = 'luci.chromeos.general'
ACTIVE_BUCKETS = [
CHROMEOS_RELEASE_BUILDBUCKET_BUCKET,
CHROMEOS_BUILDBUCKET_BUCKET,
INTERNAL_SWARMING_BUILDBUCKET_BUCKET,
]
# Build retry limit on buildbucket
#
# 2020-05-13 by engeg@: This is rarely effective, causes confusion,
# higher bot utilization, and if the initial try was past uploading artifacts
# then the retry is destined to fail with a difficult to parse error.
# 2020-05-19 by seanabraham@: Leave this at zero. These retries can break
# Chrome-wide profiling. http://b/156994019
BUILDBUCKET_BUILD_RETRY_LIMIT = 0 # Do not change. Read the above.
# TODO(nxia): consolidate all run.metadata key constants,
# add a unit test to avoid duplicated keys in run_metadata
# Builder_run metadata keys
METADATA_SCHEDULED_IMPORTANT_SLAVES = 'scheduled_important_slaves'
METADATA_SCHEDULED_EXPERIMENTAL_SLAVES = 'scheduled_experimental_slaves'
METADATA_UNSCHEDULED_SLAVES = 'unscheduled_slaves'
# List of builders marked as experimental through the tree status, not all the
# experimental builders for a run.
METADATA_EXPERIMENTAL_BUILDERS = 'experimental_builders'
# Metadata key to indicate whether a build is self-destructed.
SELF_DESTRUCTED_BUILD = 'self_destructed_build'
# Metadata key to indicate whether a build is self-destructed with success.
SELF_DESTRUCTED_WITH_SUCCESS_BUILD = 'self_destructed_with_success_build'
# Chroot snapshot names
CHROOT_SNAPSHOT_CLEAN = 'clean-chroot'
# Partition labels.
PART_STATE = 'STATE'
PART_ROOT_A = 'ROOT-A'
PART_ROOT_B = 'ROOT-B'
PART_KERN_A = 'KERN-A'
PART_KERN_B = 'KERN-B'
# Quick provision payloads. These file names should never be changed, otherwise
# very bad things can happen :). The reason is we have already uploaded these
# files with these names for all boards. So if the name changes, all scripts
# that have been using this need to handle both cases to be backward compatible.
QUICK_PROVISION_PAYLOAD_KERNEL = 'full_dev_part_KERN.bin.gz'
QUICK_PROVISION_PAYLOAD_ROOTFS = 'full_dev_part_ROOT.bin.gz'
# Mock build and stage IDs.
MOCK_STAGE_ID = 313377
MOCK_BUILD_ID = 31337
# Topology dictionary copied from CIDB.
TOPOLOGY_DICT = {
'/buildbucket/host':
'cr-buildbucket.appspot.com',
'/chrome_swarming_proxy/host':
'chromeos-swarming.appspot.com',
'/datastore/creds_file': ('/creds/service_accounts/service-account-chromeos'
'-datastore-writer-prod.json'),
'/sheriffomatic/host':
'sheriff-o-matic.appspot.com',
'/statsd/es_host':
'172.16.58.3',
'/statsd/host':
'172.16.58.3',
}
# Percentage of child builders that need to complete to update LKGM
LKGM_THRESHOLD = 80
| # -*- coding: utf-8 -*-
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This module contains constants used by cbuildbot and related code."""
from __future__ import print_function
import itertools
import os
def _FindSourceRoot():
"""Try and find the root check out of the chromiumos tree"""
source_root = path = os.path.realpath(os.path.join(
os.path.abspath(__file__), '..', '..', '..'))
while True:
if os.path.isdir(os.path.join(path, '.repo')):
return path
elif path == '/':
break
path = os.path.dirname(path)
return source_root
SOURCE_ROOT = _FindSourceRoot()
CHROOT_SOURCE_ROOT = '/mnt/host/source'
CHROOT_CACHE_ROOT = '/var/cache/chromeos-cache'
DEPOT_TOOLS_SUBPATH = 'src/chromium/depot_tools'
CROSUTILS_DIR = os.path.join(SOURCE_ROOT, 'src/scripts')
CHROMITE_DIR = os.path.realpath(os.path.join(
os.path.abspath(__file__), '..', '..'))
BOOTSTRAP_DIR = os.path.join(CHROMITE_DIR, 'bootstrap')
DEPOT_TOOLS_DIR = os.path.join(SOURCE_ROOT, DEPOT_TOOLS_SUBPATH)
CHROMITE_BIN_SUBDIR = 'chromite/bin'
CHROMITE_BIN_DIR = os.path.join(CHROMITE_DIR, 'bin')
CHROMITE_SCRIPTS_DIR = os.path.join(CHROMITE_DIR, 'scripts')
PATH_TO_CBUILDBOT = os.path.join(CHROMITE_BIN_SUBDIR, 'cbuildbot')
DEFAULT_CHROOT_DIR = 'chroot'
DEFAULT_CHROOT_PATH = os.path.join(SOURCE_ROOT, DEFAULT_CHROOT_DIR)
TERMINA_TOOLS_DIR = os.path.join(
SOURCE_ROOT, 'src/platform/container-guest-tools/termina')
STATEFUL_DIR = '/mnt/stateful_partition'
# These constants are defined and used in the die_hook that logs failed
# packages: 'cros_log_failed_packages' in profiles/base/profile.bashrc in
# chromiumos-overlay. The status file is generated in CROS_METRICS_DIR, and
# only if that environment variable is defined.
CROS_METRICS_DIR_ENVVAR = 'CROS_METRICS_DIR'
DIE_HOOK_STATUS_FILE_NAME = 'FAILED_PACKAGES'
CHROMEOS_CONFIG_FILE = os.path.join(CHROMITE_DIR, 'config', 'config_dump.json')
WATERFALL_CONFIG_FILE = os.path.join(
CHROMITE_DIR, 'config', 'waterfall_layout_dump.txt')
LUCI_SCHEDULER_CONFIG_FILE = os.path.join(
CHROMITE_DIR, 'config', 'luci-scheduler.cfg')
GE_BUILD_CONFIG_FILE = os.path.join(
CHROMITE_DIR, 'config', 'ge_build_config.json')
# The following define the location for storing toolchain packages and
# SDK overlay tarballs created during SDK builder runs. The paths are relative
# to the build root's chroot, which guarantees that they are reachable from it
# and get cleaned up when it is removed.
SDK_TOOLCHAINS_OUTPUT = 'tmp/toolchain-pkgs'
SDK_OVERLAYS_OUTPUT = 'tmp/sdk-overlays'
AUTOTEST_BUILD_PATH = 'usr/local/build/autotest'
UNITTEST_PKG_PATH = 'test-packages'
# Only used for testing pinned images on test images.
GUEST_IMAGES_PINS_PATH = 'usr/local/opt/google/containers/pins'
PIN_KEY_FILENAME = 'filename'
PIN_KEY_GSURI = 'gsuri'
# Path to the lsb-release file on the device.
LSB_RELEASE_PATH = '/etc/lsb-release'
HOME_DIRECTORY = os.path.expanduser('~')
# If cbuiltbot is running on a bot, then the cidb access credentials will be
# available here. This directory will not exist otherwise.
CIDB_PROD_BOT_CREDS = os.path.join(HOME_DIRECTORY, '.cidb_creds',
'prod_cidb_bot')
CIDB_DEBUG_BOT_CREDS = os.path.join(HOME_DIRECTORY, '.cidb_creds',
'debug_cidb_bot')
# Crash Server upload API key.
CRASH_API_KEY = os.path.join('/', 'creds', 'api_keys',
'api_key-chromeos-crash-uploader')
# Buildbucket build status
BUILDBUCKET_BUILDER_STATUS_SCHEDULED = 'SCHEDULED'
BUILDBUCKET_BUILDER_STATUS_STARTED = 'STARTED'
BUILDBUCKET_BUILDER_STATUS_COMPLETED = 'COMPLETED'
BUILDBUCKET_BUILDER_STATUSES = (BUILDBUCKET_BUILDER_STATUS_SCHEDULED,
BUILDBUCKET_BUILDER_STATUS_STARTED,
BUILDBUCKET_BUILDER_STATUS_COMPLETED)
BUILDBUCKET_BUILDER_RESULT_SUCCESS = 'SUCCESS'
BUILDBUCKET_BUILDER_RESULT_FAILURE = 'FAILURE'
BUILDBUCKET_BUILDER_RESULT_CANCELED = 'CANCELED'
# Builder status strings
BUILDER_STATUS_FAILED = 'fail'
BUILDER_STATUS_PASSED = 'pass'
BUILDER_STATUS_INFLIGHT = 'inflight'
BUILDER_STATUS_MISSING = 'missing'
BUILDER_STATUS_ABORTED = 'aborted'
# The following statuses are currently only used for build stages.
BUILDER_STATUS_PLANNED = 'planned'
BUILDER_STATUS_WAITING = 'waiting'
BUILDER_STATUS_SKIPPED = 'skipped'
BUILDER_STATUS_FORGIVEN = 'forgiven'
BUILDER_COMPLETED_STATUSES = (BUILDER_STATUS_PASSED,
BUILDER_STATUS_FAILED,
BUILDER_STATUS_ABORTED,
BUILDER_STATUS_SKIPPED,
BUILDER_STATUS_FORGIVEN)
BUILDER_ALL_STATUSES = (BUILDER_STATUS_FAILED,
BUILDER_STATUS_PASSED,
BUILDER_STATUS_INFLIGHT,
BUILDER_STATUS_MISSING,
BUILDER_STATUS_ABORTED,
BUILDER_STATUS_WAITING,
BUILDER_STATUS_PLANNED,
BUILDER_STATUS_SKIPPED,
BUILDER_STATUS_FORGIVEN)
BUILDER_NON_FAILURE_STATUSES = (BUILDER_STATUS_PLANNED,
BUILDER_STATUS_PASSED,
BUILDER_STATUS_SKIPPED,
# Quick fix for Buildbucket race problems.
BUILDER_STATUS_INFLIGHT,
BUILDER_STATUS_FORGIVEN)
# Partition labels
CROS_PART_STATEFUL = 'STATE'
# Signer status strings
SIGNER_STATUS_PASSED = 'passed'
SIGNER_STATUS_FAILED = 'failed'
# Change sources
CHANGE_SOURCE_INTERNAL = 'internal'
CHANGE_SOURCE_EXTERNAL = 'external'
# Exception categories, as recorded in cidb
EXCEPTION_CATEGORY_UNKNOWN = 'unknown'
EXCEPTION_CATEGORY_BUILD = 'build'
EXCEPTION_CATEGORY_TEST = 'test'
EXCEPTION_CATEGORY_INFRA = 'infra'
EXCEPTION_CATEGORY_LAB = 'lab'
EXCEPTION_CATEGORY_ALL_CATEGORIES = (
EXCEPTION_CATEGORY_UNKNOWN,
EXCEPTION_CATEGORY_BUILD,
EXCEPTION_CATEGORY_TEST,
EXCEPTION_CATEGORY_INFRA,
EXCEPTION_CATEGORY_LAB,
)
# Monarch metric names
MON_LAST_SLAVE = 'chromeos/cbuildbot/last_completed_slave'
MON_BUILD_COMP_COUNT = 'chromeos/cbuildbot/build/completed_count'
MON_BUILD_DURATION = 'chromeos/cbuildbot/build/durations'
MON_STAGE_COMP_COUNT = 'chromeos/cbuildbot/stage/completed_count'
MON_STAGE_DURATION = 'chromeos/cbuildbot/stage/durations'
MON_STAGE_INSTANCE_DURATION = 'chromeos/cbuildbot/stage/instance_durations'
MON_STAGE_FAILURE_COUNT = 'chromeos/cbuildbot/stage/failure_count'
MON_FAILED_STAGE = 'chromeos/chromite/cbuildbot_launch/failed_stage'
MON_CHROOT_USED = 'chromeos/cbuildbot/chroot_at_version'
MON_REPO_SYNC_COUNT = 'chromeos/cbuildbot/repo/sync_count'
MON_REPO_SYNC_RETRY_COUNT = 'chromeos/cbuildbot/repo/sync_retry_count'
MON_REPO_SELFUPDATE_FAILURE_COUNT = ('chromeos/cbuildbot/repo/'
'selfupdate_failure_count')
MON_REPO_INIT_RETRY_COUNT = 'chromeos/cbuildbot/repo/init_retry_count'
MON_REPO_MANIFEST_FAILURE_COUNT = ('chromeos/cbuildbot/repo/'
'manifest_failure_count')
MON_BB_RETRY_BUILD_COUNT = ('chromeos/cbuildbot/buildbucket/'
'retry_build_count')
MON_BB_CANCEL_BATCH_BUILDS_COUNT = ('chromeos/cbuildbot/buildbucket/'
'cancel_batch_builds_count')
MON_EXPORT_TO_GCLOUD = 'chromeos/cbuildbot/export_to_gcloud'
# Stage Categorization for failed stages metric.
UNCATEGORIZED_STAGE = 'Uncategorized'
CI_INFRA_STAGE = 'CI-Infra'
TEST_INFRA_STAGE = 'Test-Infra'
PRODUCT_OS_STAGE = 'Product-OS'
PRODUCT_ANDROID_STAGE = 'Product-Android'
PRODUCT_CHROME_STAGE = 'Product-Chrome'
PRODUCT_TOOLCHAIN_STAGE = 'Product-Toolchain'
# Re-execution API constants.
# Used by --resume and --bootstrap to decipher which options they
# can pass to the target cbuildbot (since it may not have that
# option).
# Format is Major.Minor. Minor is used for tracking new options added
# that aren't critical to the older version if it's not ran.
# Major is used for tracking heavy API breakage- for example, no longer
# supporting the --resume option.
REEXEC_API_MAJOR = 0
REEXEC_API_MINOR = 12
REEXEC_API_VERSION = '%i.%i' % (REEXEC_API_MAJOR, REEXEC_API_MINOR)
# Support --master-build-id
REEXEC_API_MASTER_BUILD_ID = 3
# Support --git-cache-dir
REEXEC_API_GIT_CACHE_DIR = 4
# Support --goma_dir and --goma_client_json
REEXEC_API_GOMA = 5
# Support --ts-mon-task-num
REEXEC_API_TSMON_TASK_NUM = 6
# Support --sanity-check-build
REEXEC_API_SANITY_CHECK_BUILD = 7
# Support --previous-build-state
REEXEC_API_PREVIOUS_BUILD_STATE = 8
# Support --workspace
REEXEC_API_WORKSPACE = 9
# Support --master-buildbucket-id
REEXEC_API_MASTER_BUILDBUCKET_ID = 10
# Support --chromeos_goma_dir
REEXEC_API_CHROMEOS_GOMA_DIR = 11
# Support --chrome-preload-dir
REEXEC_API_CHROME_PRELOAD_DIR = 12
# We rely on the (waterfall, builder name, build number) to uniquely identify
# a build. However, future migrations or state wipes of the buildbot master may
# cause it to reset its build number counter. When that happens, this value
# should be incremented, ensuring that (waterfall, builder name, build number,
# buildbot generation) is a unique identifier of builds.
BUILDBOT_GENERATION = 1
ISOLATESERVER = 'https://isolateserver.appspot.com'
GOOGLE_EMAIL = '@google.com'
CHROMIUM_EMAIL = '@chromium.org'
CORP_DOMAIN = 'corp.google.com'
GOLO_DOMAIN = 'golo.chromium.org'
CHROME_DOMAIN = 'chrome.' + CORP_DOMAIN
CHROMEOS_BOT_INTERNAL = 'chromeos-bot.internal'
GOB_HOST = '%s.googlesource.com'
EXTERNAL_GOB_INSTANCE = 'chromium'
EXTERNAL_GERRIT_INSTANCE = 'chromium-review'
EXTERNAL_GOB_HOST = GOB_HOST % EXTERNAL_GOB_INSTANCE
EXTERNAL_GERRIT_HOST = GOB_HOST % EXTERNAL_GERRIT_INSTANCE
EXTERNAL_GOB_URL = 'https://%s' % EXTERNAL_GOB_HOST
EXTERNAL_GERRIT_URL = 'https://%s' % EXTERNAL_GERRIT_HOST
INTERNAL_GOB_INSTANCE = 'chrome-internal'
INTERNAL_GERRIT_INSTANCE = 'chrome-internal-review'
INTERNAL_GOB_HOST = GOB_HOST % INTERNAL_GOB_INSTANCE
INTERNAL_GERRIT_HOST = GOB_HOST % INTERNAL_GERRIT_INSTANCE
INTERNAL_GOB_URL = 'https://%s' % INTERNAL_GOB_HOST
INTERNAL_GERRIT_URL = 'https://%s' % INTERNAL_GERRIT_HOST
# Tests without 'cheets_CTS_', 'cheets_GTS.' prefix will not considered
# as CTS/GTS test in chromite.lib.cts_helper
DEFAULT_CTS_TEST_XML_MAP = {
'cheets_CTS_': 'test_result.xml',
'cheets_GTS.': 'test_result.xml',
'cheets_GTS_': 'test_result.xml',
}
# Google Storage bucket URI to store results in.
DEFAULT_CTS_RESULTS_GSURI = 'gs://chromeos-cts-results/'
DEFAULT_CTS_APFE_GSURI = 'gs://chromeos-cts-apfe/'
ANDROID_CONTAINER_PACKAGE_KEYWORD = 'android-container'
ANDROID_VM_PACKAGE_KEYWORD = 'android-vm'
ANDROID_BUCKET_URL = 'gs://android-build-chromeos/builds'
ANDROID_PI_BUILD_BRANCH = 'git_pi-arc'
ANDROID_VMRVC_BUILD_BRANCH = 'git_rvc-arc'
ANDROID_VMMST_BUILD_BRANCH = 'git_master-arc-dev'
ANDROID_PI_BUILD_TARGETS = {
# Roll XkbToKcmConverter with system image. It's a host executable and
# doesn't depend on the target as long as it's pi-arc branch. The converter
# is ARC specific and not a part of Android SDK. Having a custom target like
# SDK_TOOLS might be better in the long term, but let's use one from ARM or
# X86 target as there's no other similar executables right now.
# We put it in two buckets because we have separate ACLs for arm and x86.
# http://b/128405786
'APPS': ('linux-apps', 'org.chromium.arc.cachebuilder.jar'),
'ARM': ('linux-cheets_arm-user', r'(\.zip|/XkbToKcmConverter)$'),
'ARM64': ('linux-cheets_arm64-user', r'(\.zip|/XkbToKcmConverter)$'),
'X86': ('linux-cheets_x86-user', r'(\.zip|/XkbToKcmConverter)$'),
'X86_64': ('linux-cheets_x86_64-user', r'\.zip$'),
'ARM_USERDEBUG': ('linux-cheets_arm-userdebug', r'\.zip$'),
'ARM64_USERDEBUG': ('linux-cheets_arm64-userdebug', r'\.zip$'),
'X86_USERDEBUG': ('linux-cheets_x86-userdebug', r'\.zip$'),
'X86_64_USERDEBUG': ('linux-cheets_x86_64-userdebug', r'\.zip$'),
'SDK_GOOGLE_X86_USERDEBUG': ('linux-sdk_cheets_x86-userdebug', r'\.zip$'),
'SDK_GOOGLE_X86_64_USERDEBUG': ('linux-sdk_cheets_x86_64-userdebug',
r'\.zip$'),
}
ANDROID_VMMST_BUILD_TARGETS = {
# For XkbToKcmConverter, see the comment in ANDROID_PI_BUILD_TARGETS.
'X86_64_USERDEBUG': ('linux-bertha_x86_64-userdebug',
r'(\.zip|/XkbToKcmConverter)$'),
}
ANDROID_VMRVC_BUILD_TARGETS = {
# For XkbToKcmConverter, see the comment in ANDROID_PI_BUILD_TARGETS.
'APPS': ('linux-apps', 'org.chromium.arc.cachebuilder.jar'),
'ARM64': ('linux-bertha_arm64-user', r'(\.zip|/XkbToKcmConverter)$'),
'X86_64': ('linux-bertha_x86_64-user', r'(\.zip|/XkbToKcmConverter)$'),
'ARM64_USERDEBUG': ('linux-bertha_arm64-userdebug',
r'(\.zip|/XkbToKcmConverter)$'),
'X86_64_USERDEBUG': ('linux-bertha_x86_64-userdebug',
r'(\.zip|/XkbToKcmConverter)$'),
}
# These refer to *_TARGET variables in Android ebuild files, used when
# parsing ebuilds to determine the corresponding Android branch.
# NOTE: We may use `|` operator to union dict keys after we completely go
# Python 3.
ANDROID_ALL_BUILD_TARGETS = frozenset(
x + '_TARGET' for x in itertools.chain(
ANDROID_PI_BUILD_TARGETS,
ANDROID_VMMST_BUILD_TARGETS,
ANDROID_VMRVC_BUILD_TARGETS,
)
)
ARC_BUCKET_URL = 'gs://chromeos-arc-images/builds'
ARC_BUCKET_ACLS = {
'APPS': 'googlestorage_acl_public.txt',
'ARM': 'googlestorage_acl_arm.txt',
'ARM64': 'googlestorage_acl_arm.txt',
'X86': 'googlestorage_acl_x86.txt',
'X86_64': 'googlestorage_acl_x86.txt',
'ARM_USERDEBUG': 'googlestorage_acl_arm.txt',
'ARM64_USERDEBUG': 'googlestorage_acl_arm.txt',
'X86_USERDEBUG': 'googlestorage_acl_x86.txt',
'X86_64_USERDEBUG': 'googlestorage_acl_x86.txt',
'SDK_GOOGLE_X86_USERDEBUG': 'googlestorage_acl_x86.txt',
'SDK_GOOGLE_X86_64_USERDEBUG': 'googlestorage_acl_x86.txt',
}
ANDROID_SYMBOLS_URL_TEMPLATE = (
ARC_BUCKET_URL +
'/%(branch)s-linux-%(target)s_%(arch)s-%(variant)s/%(version)s'
'/%(target)s_%(arch)s%(suffix)s-symbols-%(version)s.zip')
ANDROID_SYMBOLS_FILE = 'android-symbols.zip'
# x86-user, x86-userdebug and x86-eng builders create build artifacts with the
# same name, e.g. cheets_x86-target_files-${VERSION}.zip. Chrome OS builders
# that need to select x86-user or x86-userdebug artifacts at emerge time need
# the artifacts to have different filenames to avoid checksum failures. These
# targets will have their artifacts renamed when the PFQ copies them from the
# the Android bucket to the ARC++ bucket (b/33072485).
ARC_BUILDS_NEED_ARTIFACTS_RENAMED = {
'ARM_USERDEBUG',
'ARM64_USERDEBUG',
'X86_USERDEBUG',
'X86_64_USERDEBUG',
'SDK_GOOGLE_X86_USERDEBUG',
'SDK_GOOGLE_X86_64_USERDEBUG',
}
# All builds will have the same name without target prefix.
# Emerge checksum failures will be workarounded by ebuild rename symbol (->).
ARC_ARTIFACTS_RENAME_NOT_NEEDED = [
'push_to_device.zip',
'sepolicy.zip',
'XkbToKcmConverter',
]
GOB_COOKIE_PATH = os.path.expanduser('~/.git-credential-cache/cookie')
GITCOOKIES_PATH = os.path.expanduser('~/.gitcookies')
# Timestamps in the JSON from GoB's web interface is of the form 'Tue
# Dec 02 17:48:06 2014' and is assumed to be in UTC.
GOB_COMMIT_TIME_FORMAT = '%a %b %d %H:%M:%S %Y'
CHROMITE_PROJECT = 'chromiumos/chromite'
CHROMITE_URL = '%s/%s' % (EXTERNAL_GOB_URL, CHROMITE_PROJECT)
CHROMIUM_SRC_PROJECT = 'chromium/src'
CHROMIUM_GOB_URL = '%s/%s.git' % (EXTERNAL_GOB_URL, CHROMIUM_SRC_PROJECT)
CHROME_INTERNAL_PROJECT = 'chrome/src-internal'
CHROME_INTERNAL_GOB_URL = '%s/%s.git' % (
INTERNAL_GOB_URL, CHROME_INTERNAL_PROJECT)
DEFAULT_MANIFEST = 'default.xml'
OFFICIAL_MANIFEST = 'official.xml'
LKGM_MANIFEST = 'LKGM/lkgm.xml'
SHARED_CACHE_ENVVAR = 'CROS_CACHEDIR'
PARALLEL_EMERGE_STATUS_FILE_ENVVAR = 'PARALLEL_EMERGE_STATUS_FILE'
# These projects can be responsible for infra failures.
INFRA_PROJECTS = (CHROMITE_PROJECT,)
# The manifest contains extra attributes in the 'project' nodes to determine our
# branching strategy for the project.
# create: Create a new branch on the project repo for the new CrOS branch.
# This is the default.
# pin: On the CrOS branch, pin the project to the current revision.
# tot: On the CrOS branch, the project still tracks ToT.
MANIFEST_ATTR_BRANCHING = 'branch-mode'
MANIFEST_ATTR_BRANCHING_CREATE = 'create'
MANIFEST_ATTR_BRANCHING_PIN = 'pin'
MANIFEST_ATTR_BRANCHING_TOT = 'tot'
MANIFEST_ATTR_BRANCHING_ALL = (
MANIFEST_ATTR_BRANCHING_CREATE,
MANIFEST_ATTR_BRANCHING_PIN,
MANIFEST_ATTR_BRANCHING_TOT,
)
STREAK_COUNTERS = 'streak_counters'
PATCH_BRANCH = 'patch_branch'
STABLE_EBUILD_BRANCH = 'stabilizing_branch'
MERGE_BRANCH = 'merge_branch'
# These branches are deleted at the beginning of every buildbot run.
CREATED_BRANCHES = [
PATCH_BRANCH,
STABLE_EBUILD_BRANCH,
MERGE_BRANCH
]
# Default OS target packages.
TARGET_OS_PKG = 'virtual/target-os'
TARGET_OS_DEV_PKG = 'virtual/target-os-dev'
TARGET_OS_TEST_PKG = 'virtual/target-os-test'
TARGET_OS_FACTORY_PKG = 'virtual/target-os-factory'
# Constants for uprevving Chrome
CHROMEOS_BASE = 'chromeos-base'
# Portage category and package name for Chrome.
CHROME_CN = CHROMEOS_BASE
CHROME_PN = 'chromeos-chrome'
CHROME_CP = '%s/%s' % (CHROME_CN, CHROME_PN)
# Other packages to uprev while uprevving Chrome.
OTHER_CHROME_PACKAGES = ['chromeos-base/chromium-source',
'chromeos-base/chrome-icu']
# Chrome use flags
USE_CHROME_INTERNAL = 'chrome_internal'
USE_AFDO_USE = 'afdo_use'
# Builds and validates _alpha ebuilds. These builds sync to the latest
# revsion of the Chromium src tree and build with that checkout.
CHROME_REV_TOT = 'tot'
# Builds and validates chrome at a given revision through cbuildbot
# --chrome_version
CHROME_REV_SPEC = 'spec'
# Builds and validates the latest Chromium release as defined by
# ~/trunk/releases in the Chrome src tree. These ebuilds are suffixed with rc.
CHROME_REV_LATEST = 'latest_release'
# Builds and validates the latest Chromium release for a specific Chromium
# branch that we want to watch. These ebuilds are suffixed with rc.
CHROME_REV_STICKY = 'stable_release'
# Builds and validates Chromium for a pre-populated directory.
# Also uses _alpha, since portage doesn't have anything lower.
CHROME_REV_LOCAL = 'local'
VALID_CHROME_REVISIONS = [CHROME_REV_TOT, CHROME_REV_LATEST,
CHROME_REV_STICKY, CHROME_REV_LOCAL, CHROME_REV_SPEC]
# Constants for uprevving Android.
# Portage package name for Android container.
ANDROID_PACKAGE_NAME = 'android-container'
# Builds and validates the latest Android release.
ANDROID_REV_LATEST = 'latest_release'
VALID_ANDROID_REVISIONS = [ANDROID_REV_LATEST]
# Build types supported.
# TODO(sosa): Deprecate PFQ type.
# Incremental builds that are built using binary packages when available.
# These builds have less validation than other build types.
INCREMENTAL_TYPE = 'binary'
# These builds serve as PFQ builders. This is being deprecated.
PFQ_TYPE = 'pfq'
# Android PFQ type. Builds and validates new versions of Android.
ANDROID_PFQ_TYPE = 'android'
# Builds from source and non-incremental. This builds fully wipe their
# chroot before the start of every build and no not use a BINHOST.
FULL_TYPE = 'full'
# Full but with versioned logic.
CANARY_TYPE = 'canary'
# Generate payloads for an already built build/version.
PAYLOADS_TYPE = 'payloads'
# Similar behavior to canary, but used to validate toolchain changes.
TOOLCHAIN_TYPE = 'toolchain'
# Generic type of tryjob only build configs.
TRYJOB_TYPE = 'tryjob'
# Special build type for Chroot builders. These builds focus on building
# toolchains and validate that they work.
CHROOT_BUILDER_TYPE = 'chroot'
CHROOT_BUILDER_BOARD = 'amd64-host'
# Use for builds that don't requite a type.
GENERIC_TYPE = 'generic'
VALID_BUILD_TYPES = (
INCREMENTAL_TYPE,
FULL_TYPE,
CANARY_TYPE,
CHROOT_BUILDER_TYPE,
CHROOT_BUILDER_BOARD,
ANDROID_PFQ_TYPE,
PFQ_TYPE,
PAYLOADS_TYPE,
TOOLCHAIN_TYPE,
TRYJOB_TYPE,
GENERIC_TYPE,
)
HWTEST_TRYBOT_NUM = 3
HWTEST_QUOTA_POOL = 'quota'
HWTEST_QUOTA_ACCOUNT_BVT = 'legacypool-bvt'
HWTEST_QUOTA_ACCOUNT_BVT_SYNC = 'bvt-sync'
HWTEST_QUOTA_ACCOUNT_PFQ = 'pfq'
HWTEST_QUOTA_ACCOUNT_SUITES = 'legacypool-suites'
HWTEST_QUOTA_ACCOUNT_TOOLCHAIN = 'toolchain'
# How many total test retries should be done for a suite.
HWTEST_MAX_RETRIES = 5
# Defines for the various hardware test suites:
# BVT: Basic blocking suite to be run against any build that
# requires a HWTest phase.
# COMMIT: Suite of basic tests required for commits to the source
# tree. Runs as a blocking suite on the CQ and PFQ; runs as
# a non-blocking suite on canaries.
# CANARY: Non-blocking suite run only against the canaries.
# AFDO: Non-blocking suite run only AFDO builders.
# MOBLAB: Blocking Suite run only on *_moblab builders.
# INSTALLER: Blocking suite run against all canaries; tests basic installer
# functionality.
HWTEST_ARC_COMMIT_SUITE = 'bvt-arc'
HWTEST_BVT_SUITE = 'bvt-inline'
HWTEST_COMMIT_SUITE = 'bvt-cq'
HWTEST_CANARY_SUITE = 'bvt-perbuild'
HWTEST_INSTALLER_SUITE = 'bvt-installer'
# Runs all non-informational Tast tests (exercising any of OS, Chrome, and ARC).
HWTEST_TAST_CQ_SUITE = 'bvt-tast-cq'
# Runs non-informational Tast tests exercising either Chrome or ARC.
HWTEST_TAST_CHROME_PFQ_SUITE = 'bvt-tast-chrome-pfq'
# Runs non-informational Tast tests exercising ARC.
HWTEST_TAST_ANDROID_PFQ_SUITE = 'bvt-tast-android-pfq'
# Runs all Tast informational tests.
HWTEST_TAST_INFORMATIONAL_SUITE = 'bvt-tast-informational'
HWTEST_AFDO_SUITE = 'AFDO_record'
HWTEST_JETSTREAM_COMMIT_SUITE = 'jetstream_cq'
HWTEST_MOBLAB_SUITE = 'moblab'
HWTEST_MOBLAB_QUICK_SUITE = 'moblab_quick'
HWTEST_SANITY_SUITE = 'sanity'
HWTEST_TOOLCHAIN_SUITE = 'toolchain-tests'
# Non-blocking informational hardware tests for Chrome, run throughout the
# day on tip-of-trunk Chrome rather than on the daily Chrome branch.
HWTEST_CHROME_INFORMATIONAL = 'chrome-informational'
# Additional timeout to wait for autotest to abort a suite if the test takes
# too long to run. This is meant to be overly conservative as a timeout may
# indicate that autotest is at capacity.
HWTEST_TIMEOUT_EXTENSION = 10 * 60
HWTEST_WEEKLY_PRIORITY = 'Weekly'
HWTEST_CTS_PRIORITY = 'CTS'
HWTEST_GTS_PRIORITY = HWTEST_CTS_PRIORITY
HWTEST_DAILY_PRIORITY = 'Daily'
HWTEST_DEFAULT_PRIORITY = 'DEFAULT'
HWTEST_CQ_PRIORITY = 'CQ'
HWTEST_BUILD_PRIORITY = 'Build'
HWTEST_PFQ_PRIORITY = 'PFQ'
HWTEST_POST_BUILD_PRIORITY = 'PostBuild'
# Ordered by priority (first item being lowest).
HWTEST_VALID_PRIORITIES = [HWTEST_WEEKLY_PRIORITY,
HWTEST_CTS_PRIORITY,
HWTEST_DAILY_PRIORITY,
HWTEST_POST_BUILD_PRIORITY,
HWTEST_DEFAULT_PRIORITY,
HWTEST_BUILD_PRIORITY,
HWTEST_PFQ_PRIORITY,
HWTEST_CQ_PRIORITY]
# Creates a mapping of priorities to make easy comparsions.
# Use the same priorities mapping as autotest/client/common_lib/priorities.py
HWTEST_PRIORITIES_MAP = {
HWTEST_WEEKLY_PRIORITY: 10,
HWTEST_CTS_PRIORITY: 11,
HWTEST_DAILY_PRIORITY: 20,
HWTEST_POST_BUILD_PRIORITY: 30,
HWTEST_DEFAULT_PRIORITY: 40,
HWTEST_BUILD_PRIORITY: 50,
HWTEST_PFQ_PRIORITY: 60,
HWTEST_CQ_PRIORITY: 70}
# Creates a mapping of priorities for skylab hwtest tasks. In swarming,
# lower number means high priorities. Priority lower than 48 will be special
# tasks. The upper bound of priority is 255.
# Use the same priorities mapping as autotest/venv/skylab_suite/swarming_lib.py
SKYLAB_HWTEST_PRIORITIES_MAP = {
HWTEST_WEEKLY_PRIORITY: 230,
HWTEST_CTS_PRIORITY: 215,
HWTEST_DAILY_PRIORITY: 200,
HWTEST_POST_BUILD_PRIORITY: 170,
HWTEST_DEFAULT_PRIORITY: 140,
HWTEST_BUILD_PRIORITY: 110,
HWTEST_PFQ_PRIORITY: 80,
HWTEST_CQ_PRIORITY: 50,
}
# The environment for executing tests.
ENV_SKYLAB = 'skylab'
ENV_AUTOTEST = 'autotest'
# The cipd package for skylab tool
CIPD_SKYLAB_PACKAGE = 'chromiumos/infra/skylab/linux-amd64'
# crbug.com/1108489: The skylab tool CIPD package is pinned to a specific
# version to avoid uncontrolled tool release and so that the tool is effectively
# branched with cbuildbot.
CIPD_SKYLAB_INSTANCE_ID = 'LU2Xmdk1oXyZPuiEfzDQhUWFMXY3jYQNPOzHRkRkZBEC'
# HWTest result statuses
HWTEST_STATUS_PASS = 'pass'
HWTEST_STATUS_FAIL = 'fail'
HWTEST_STATUS_ABORT = 'abort'
HWTEST_STATUS_OTHER = 'other'
HWTEST_STATUES_NOT_PASSED = frozenset([HWTEST_STATUS_FAIL,
HWTEST_STATUS_ABORT,
HWTEST_STATUS_OTHER])
# Define HWTEST subsystem logic constants.
SUBSYSTEMS = 'subsystems'
SUBSYSTEM_UNUSED = 'subsystem_unused'
# Build messages
MESSAGE_TYPE_IGNORED_REASON = 'ignored_reason'
MESSAGE_TYPE_ANNOTATIONS_FINALIZED = 'annotations_finalized'
# MESSSGE_TYPE_IGNORED_REASON messages store the affected build as
# the CIDB column message_value.
MESSAGE_SUBTYPE_SELF_DESTRUCTION = 'self_destruction'
# Define HWTEST job_keyvals
JOB_KEYVAL_DATASTORE_PARENT_KEY = 'datastore_parent_key'
JOB_KEYVAL_CIDB_BUILD_ID = 'cidb_build_id'
JOB_KEYVAL_CIDB_BUILD_STAGE_ID = 'cidb_build_stage_id'
JOB_KEYVAL_BUILD_CONFIG = 'build_config'
JOB_KEYVAL_MASTER_BUILD_CONFIG = 'master_build_config'
JOB_KEYVAL_BRANCH = 'branch'
# How many total test retries should be done for a suite.
VM_TEST_MAX_RETRIES = 5
# Defines VM Test types.
SIMPLE_AU_TEST_TYPE = 'pfq_suite'
VM_SUITE_TEST_TYPE = 'vm_suite'
GCE_SUITE_TEST_TYPE = 'gce_suite'
CROS_VM_TEST_TYPE = 'cros_vm_test'
DEV_MODE_TEST_TYPE = 'dev_mode_test'
VALID_VM_TEST_TYPES = [
SIMPLE_AU_TEST_TYPE,
VM_SUITE_TEST_TYPE,
GCE_SUITE_TEST_TYPE,
CROS_VM_TEST_TYPE,
DEV_MODE_TEST_TYPE
]
VALID_GCE_TEST_SUITES = ['gce-smoke', 'gce-sanity']
# MoblabVM tests are suites of tests used to validate a moblab image via
# VMTests.
MOBLAB_VM_SMOKE_TEST_TYPE = 'moblab_smoke_test'
CHROMIUMOS_OVERLAY_DIR = 'src/third_party/chromiumos-overlay'
PORTAGE_STABLE_OVERLAY_DIR = 'src/third_party/portage-stable'
ECLASS_OVERLAY_DIR = 'src/third_party/eclass-overlay'
CHROMEOS_PARTNER_OVERLAY_DIR = 'src/private-overlays/chromeos-partner-overlay/'
PUBLIC_BINHOST_CONF_DIR = os.path.join(CHROMIUMOS_OVERLAY_DIR,
'chromeos/binhost')
PRIVATE_BINHOST_CONF_DIR = os.path.join(CHROMEOS_PARTNER_OVERLAY_DIR,
'chromeos/binhost')
VERSION_FILE = os.path.join(CHROMIUMOS_OVERLAY_DIR,
'chromeos/config/chromeos_version.sh')
SDK_VERSION_FILE = os.path.join(PUBLIC_BINHOST_CONF_DIR,
'host/sdk_version.conf')
SDK_GS_BUCKET = 'chromiumos-sdk'
PUBLIC = 'public'
PRIVATE = 'private'
BOTH_OVERLAYS = 'both'
PUBLIC_OVERLAYS = PUBLIC
PRIVATE_OVERLAYS = PRIVATE
VALID_OVERLAYS = [BOTH_OVERLAYS, PUBLIC_OVERLAYS, PRIVATE_OVERLAYS, None]
# Common default logging settings for use with the logging module.
LOGGER_FMT = '%(asctime)s: %(levelname)s: %(message)s'
LOGGER_DATE_FMT = '%H:%M:%S'
# Used by remote patch serialization/deserialzation.
INTERNAL_PATCH_TAG = 'i'
EXTERNAL_PATCH_TAG = 'e'
PATCH_TAGS = (INTERNAL_PATCH_TAG, EXTERNAL_PATCH_TAG)
GERRIT_ON_BORG_LABELS = {
'Code-Review': 'CRVW',
'Commit-Queue': 'COMR',
'Verified': 'VRIF',
}
# Environment variables that should be exposed to all children processes
# invoked via cros_build_lib.run.
ENV_PASSTHRU = ('CROS_SUDO_KEEP_ALIVE', SHARED_CACHE_ENVVAR,
PARALLEL_EMERGE_STATUS_FILE_ENVVAR)
# List of variables to proxy into the chroot from the host, and to
# have sudo export if existent. Anytime this list is modified, a new
# chroot_version_hooks.d upgrade script that symlinks to 153_rewrite_sudoers.d
# should be created.
CHROOT_ENVIRONMENT_WHITELIST = (
'CHROMEOS_OFFICIAL',
'CHROMEOS_VERSION_AUSERVER',
'CHROMEOS_VERSION_DEVSERVER',
'CHROMEOS_VERSION_TRACK',
'GCC_GITHASH',
'GIT_AUTHOR_EMAIL',
'GIT_AUTHOR_NAME',
'GIT_COMMITTER_EMAIL',
'GIT_COMMITTER_NAME',
'GIT_PROXY_COMMAND',
'GIT_SSH',
'RSYNC_PROXY',
'SSH_AGENT_PID',
'SSH_AUTH_SOCK',
'TMUX',
'USE',
'all_proxy',
'ftp_proxy',
'http_proxy',
'https_proxy',
'no_proxy',
)
# Paths for Chrome LKGM which are relative to the Chromium base url.
CHROME_LKGM_FILE = 'CHROMEOS_LKGM'
PATH_TO_CHROME_LKGM = 'chromeos/%s' % CHROME_LKGM_FILE
# Path for the Chrome LKGM's closest OWNERS file.
PATH_TO_CHROME_CHROMEOS_OWNERS = 'chromeos/OWNERS'
# Cache constants.
COMMON_CACHE = 'common'
# Artifact constants.
def _SlashToUnderscore(string):
return string.replace('/', '_')
# GCE tar ball constants.
def ImageBinToGceTar(image_bin):
assert image_bin.endswith('.bin'), ('Filename %s does not end with ".bin"' %
image_bin)
return '%s_gce.tar.gz' % os.path.splitext(image_bin)[0]
RELEASE_BUCKET = 'gs://chromeos-releases'
TRASH_BUCKET = 'gs://chromeos-throw-away-bucket'
CHROME_SYSROOT_TAR = 'sysroot_%s.tar.xz' % _SlashToUnderscore(CHROME_CP)
CHROME_ENV_TAR = 'environment_%s.tar.xz' % _SlashToUnderscore(CHROME_CP)
CHROME_ENV_FILE = 'environment'
BASE_IMAGE_NAME = 'chromiumos_base_image'
BASE_IMAGE_TAR = '%s.tar.xz' % BASE_IMAGE_NAME
BASE_IMAGE_BIN = '%s.bin' % BASE_IMAGE_NAME
BASE_IMAGE_GCE_TAR = ImageBinToGceTar(BASE_IMAGE_BIN)
IMAGE_SCRIPTS_NAME = 'image_scripts'
IMAGE_SCRIPTS_TAR = '%s.tar.xz' % IMAGE_SCRIPTS_NAME
TARGET_SYSROOT_TAR = 'sysroot_%s.tar.xz' % _SlashToUnderscore(TARGET_OS_PKG)
VM_IMAGE_NAME = 'chromiumos_qemu_image'
VM_IMAGE_BIN = '%s.bin' % VM_IMAGE_NAME
VM_IMAGE_TAR = '%s.tar.xz' % VM_IMAGE_NAME
VM_DISK_PREFIX = 'chromiumos_qemu_disk.bin'
VM_MEM_PREFIX = 'chromiumos_qemu_mem.bin'
VM_NUM_RETRIES = 0
# Disabling Tast VM retries because of https://crbug.com/1098346.
TAST_VM_NUM_RETRIES = 0
TAST_VM_TEST_RESULTS = 'tast_vm_test_results_%(attempt)s'
BASE_GUEST_VM_DIR = 'guest-vm-base'
TEST_GUEST_VM_DIR = 'guest-vm-test'
BASE_GUEST_VM_TAR = '%s.tar.xz' % BASE_GUEST_VM_DIR
TEST_GUEST_VM_TAR = '%s.tar.xz' % TEST_GUEST_VM_DIR
TEST_IMAGE_NAME = 'chromiumos_test_image'
TEST_IMAGE_TAR = '%s.tar.xz' % TEST_IMAGE_NAME
TEST_IMAGE_BIN = '%s.bin' % TEST_IMAGE_NAME
TEST_IMAGE_GCE_TAR = ImageBinToGceTar(TEST_IMAGE_BIN)
TEST_KEY_PRIVATE = 'id_rsa'
TEST_KEY_PUBLIC = 'id_rsa.pub'
DEV_IMAGE_NAME = 'chromiumos_image'
DEV_IMAGE_BIN = '%s.bin' % DEV_IMAGE_NAME
RECOVERY_IMAGE_NAME = 'recovery_image'
RECOVERY_IMAGE_BIN = '%s.bin' % RECOVERY_IMAGE_NAME
RECOVERY_IMAGE_TAR = '%s.tar.xz' % RECOVERY_IMAGE_NAME
# Image type constants.
IMAGE_TYPE_BASE = 'base'
IMAGE_TYPE_DEV = 'dev'
IMAGE_TYPE_TEST = 'test'
IMAGE_TYPE_RECOVERY = 'recovery'
IMAGE_TYPE_FACTORY = 'factory'
IMAGE_TYPE_FIRMWARE = 'firmware'
# USB PD accessory microcontroller firmware (e.g. power brick, display dongle).
IMAGE_TYPE_ACCESSORY_USBPD = 'accessory_usbpd'
# Standalone accessory microcontroller firmware (e.g. wireless keyboard).
IMAGE_TYPE_ACCESSORY_RWSIG = 'accessory_rwsig'
# Cr50 Firmware.
IMAGE_TYPE_CR50_FIRMWARE = 'cr50_firmware'
IMAGE_TYPE_TO_NAME = {
IMAGE_TYPE_BASE: BASE_IMAGE_BIN,
IMAGE_TYPE_DEV: DEV_IMAGE_BIN,
IMAGE_TYPE_RECOVERY: RECOVERY_IMAGE_BIN,
IMAGE_TYPE_TEST: TEST_IMAGE_BIN,
}
IMAGE_NAME_TO_TYPE = dict((v, k) for k, v in IMAGE_TYPE_TO_NAME.items())
METADATA_JSON = 'metadata.json'
PARTIAL_METADATA_JSON = 'partial-metadata.json'
METADATA_TAGS = 'tags'
DELTA_SYSROOT_TAR = 'delta_sysroot.tar.xz'
DELTA_SYSROOT_BATCH = 'batch'
FIRMWARE_ARCHIVE_NAME = 'firmware_from_source.tar.bz2'
FPMCU_UNITTESTS_ARCHIVE_NAME = 'fpmcu_unittests.tar.bz2'
# Global configuration constants.
CHROMITE_CONFIG_DIR = os.path.expanduser('~/.chromite')
CHROME_SDK_BASHRC = os.path.join(CHROMITE_CONFIG_DIR, 'chrome_sdk.bashrc')
SYNC_RETRIES = 4
SLEEP_TIMEOUT = 30
# Lab status url.
LAB_STATUS_URL = 'http://chromiumos-lab.appspot.com/current?format=json'
GOLO_SMTP_SERVER = 'mail.golo.chromium.org'
CHROME_GARDENER = 'chrome'
# Email alias to add as reviewer in Gerrit, which GWSQ will then automatically
# assign to the current gardener.
CHROME_GARDENER_REVIEW_EMAIL = '<EMAIL>'
# Useful config targets.
CANARY_MASTER = 'master-release'
PFQ_MASTER = 'master-chromium-pfq'
VMMST_ANDROID_PFQ_MASTER = 'master-vmmst-android-pfq'
PI_ANDROID_PFQ_MASTER = 'master-pi-android-pfq'
VMRVC_ANDROID_PFQ_MASTER = 'master-vmrvc-android-pfq'
TOOLCHAIN_MASTTER = 'master-toolchain'
# Email validation regex. Not quite fully compliant with RFC 2822, but good
# approximation.
EMAIL_REGEX = r'[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Za-z]{2,4}'
# Blacklist of files not allowed to be uploaded into the Partner Project Google
# Storage Buckets:
# debug.tgz contains debug symbols.
# manifest.xml exposes all of our repo names.
# vm_test_results can contain symbolicated crash dumps.
EXTRA_BUCKETS_FILES_BLACKLIST = [
'debug.tgz',
'manifest.xml',
'vm_test_results_*'
]
# AFDO common constants.
# How long does the AFDO_record autotest have to generate the AFDO perf data.
AFDO_GENERATE_TIMEOUT = 120 * 60
# Gmail Credentials.
GMAIL_TOKEN_CACHE_FILE = os.path.expanduser('~/.gmail_credentials')
GMAIL_TOKEN_JSON_FILE = '/creds/refresh_tokens/chromeos_gmail_alerts'
# Maximum number of boards per release group builder. This should be
# chosen/adjusted based on expected release build times such that successive
# builds don't overlap and create a backlog.
MAX_RELEASE_GROUP_BOARDS = 4
CHROMEOS_SERVICE_ACCOUNT = os.path.join('/', 'creds', 'service_accounts',
'service-account-chromeos.json')
# Buildbucket buckets
CHROMEOS_RELEASE_BUILDBUCKET_BUCKET = 'master.chromeos_release'
CHROMEOS_BUILDBUCKET_BUCKET = 'master.chromeos'
INTERNAL_SWARMING_BUILDBUCKET_BUCKET = 'luci.chromeos.general'
ACTIVE_BUCKETS = [
CHROMEOS_RELEASE_BUILDBUCKET_BUCKET,
CHROMEOS_BUILDBUCKET_BUCKET,
INTERNAL_SWARMING_BUILDBUCKET_BUCKET,
]
# Build retry limit on buildbucket
#
# 2020-05-13 by engeg@: This is rarely effective, causes confusion,
# higher bot utilization, and if the initial try was past uploading artifacts
# then the retry is destined to fail with a difficult to parse error.
# 2020-05-19 by seanabraham@: Leave this at zero. These retries can break
# Chrome-wide profiling. http://b/156994019
BUILDBUCKET_BUILD_RETRY_LIMIT = 0 # Do not change. Read the above.
# TODO(nxia): consolidate all run.metadata key constants,
# add a unit test to avoid duplicated keys in run_metadata
# Builder_run metadata keys
METADATA_SCHEDULED_IMPORTANT_SLAVES = 'scheduled_important_slaves'
METADATA_SCHEDULED_EXPERIMENTAL_SLAVES = 'scheduled_experimental_slaves'
METADATA_UNSCHEDULED_SLAVES = 'unscheduled_slaves'
# List of builders marked as experimental through the tree status, not all the
# experimental builders for a run.
METADATA_EXPERIMENTAL_BUILDERS = 'experimental_builders'
# Metadata key to indicate whether a build is self-destructed.
SELF_DESTRUCTED_BUILD = 'self_destructed_build'
# Metadata key to indicate whether a build is self-destructed with success.
SELF_DESTRUCTED_WITH_SUCCESS_BUILD = 'self_destructed_with_success_build'
# Chroot snapshot names
CHROOT_SNAPSHOT_CLEAN = 'clean-chroot'
# Partition labels.
PART_STATE = 'STATE'
PART_ROOT_A = 'ROOT-A'
PART_ROOT_B = 'ROOT-B'
PART_KERN_A = 'KERN-A'
PART_KERN_B = 'KERN-B'
# Quick provision payloads. These file names should never be changed, otherwise
# very bad things can happen :). The reason is we have already uploaded these
# files with these names for all boards. So if the name changes, all scripts
# that have been using this need to handle both cases to be backward compatible.
QUICK_PROVISION_PAYLOAD_KERNEL = 'full_dev_part_KERN.bin.gz'
QUICK_PROVISION_PAYLOAD_ROOTFS = 'full_dev_part_ROOT.bin.gz'
# Mock build and stage IDs.
MOCK_STAGE_ID = 313377
MOCK_BUILD_ID = 31337
# Topology dictionary copied from CIDB.
TOPOLOGY_DICT = {
'/buildbucket/host':
'cr-buildbucket.appspot.com',
'/chrome_swarming_proxy/host':
'chromeos-swarming.appspot.com',
'/datastore/creds_file': ('/creds/service_accounts/service-account-chromeos'
'-datastore-writer-prod.json'),
'/sheriffomatic/host':
'sheriff-o-matic.appspot.com',
'/statsd/es_host':
'172.16.58.3',
'/statsd/host':
'172.16.58.3',
}
# Percentage of child builders that need to complete to update LKGM
LKGM_THRESHOLD = 80
| en | 0.855875 | # -*- coding: utf-8 -*- # Copyright (c) 2012 The Chromium OS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. This module contains constants used by cbuildbot and related code. Try and find the root check out of the chromiumos tree # These constants are defined and used in the die_hook that logs failed # packages: 'cros_log_failed_packages' in profiles/base/profile.bashrc in # chromiumos-overlay. The status file is generated in CROS_METRICS_DIR, and # only if that environment variable is defined. # The following define the location for storing toolchain packages and # SDK overlay tarballs created during SDK builder runs. The paths are relative # to the build root's chroot, which guarantees that they are reachable from it # and get cleaned up when it is removed. # Only used for testing pinned images on test images. # Path to the lsb-release file on the device. # If cbuiltbot is running on a bot, then the cidb access credentials will be # available here. This directory will not exist otherwise. # Crash Server upload API key. # Buildbucket build status # Builder status strings # The following statuses are currently only used for build stages. # Quick fix for Buildbucket race problems. # Partition labels # Signer status strings # Change sources # Exception categories, as recorded in cidb # Monarch metric names # Stage Categorization for failed stages metric. # Re-execution API constants. # Used by --resume and --bootstrap to decipher which options they # can pass to the target cbuildbot (since it may not have that # option). # Format is Major.Minor. Minor is used for tracking new options added # that aren't critical to the older version if it's not ran. # Major is used for tracking heavy API breakage- for example, no longer # supporting the --resume option. # Support --master-build-id # Support --git-cache-dir # Support --goma_dir and --goma_client_json # Support --ts-mon-task-num # Support --sanity-check-build # Support --previous-build-state # Support --workspace # Support --master-buildbucket-id # Support --chromeos_goma_dir # Support --chrome-preload-dir # We rely on the (waterfall, builder name, build number) to uniquely identify # a build. However, future migrations or state wipes of the buildbot master may # cause it to reset its build number counter. When that happens, this value # should be incremented, ensuring that (waterfall, builder name, build number, # buildbot generation) is a unique identifier of builds. # Tests without 'cheets_CTS_', 'cheets_GTS.' prefix will not considered # as CTS/GTS test in chromite.lib.cts_helper # Google Storage bucket URI to store results in. # Roll XkbToKcmConverter with system image. It's a host executable and # doesn't depend on the target as long as it's pi-arc branch. The converter # is ARC specific and not a part of Android SDK. Having a custom target like # SDK_TOOLS might be better in the long term, but let's use one from ARM or # X86 target as there's no other similar executables right now. # We put it in two buckets because we have separate ACLs for arm and x86. # http://b/128405786 # For XkbToKcmConverter, see the comment in ANDROID_PI_BUILD_TARGETS. # For XkbToKcmConverter, see the comment in ANDROID_PI_BUILD_TARGETS. # These refer to *_TARGET variables in Android ebuild files, used when # parsing ebuilds to determine the corresponding Android branch. # NOTE: We may use `|` operator to union dict keys after we completely go # Python 3. # x86-user, x86-userdebug and x86-eng builders create build artifacts with the # same name, e.g. cheets_x86-target_files-${VERSION}.zip. Chrome OS builders # that need to select x86-user or x86-userdebug artifacts at emerge time need # the artifacts to have different filenames to avoid checksum failures. These # targets will have their artifacts renamed when the PFQ copies them from the # the Android bucket to the ARC++ bucket (b/33072485). # All builds will have the same name without target prefix. # Emerge checksum failures will be workarounded by ebuild rename symbol (->). # Timestamps in the JSON from GoB's web interface is of the form 'Tue # Dec 02 17:48:06 2014' and is assumed to be in UTC. # These projects can be responsible for infra failures. # The manifest contains extra attributes in the 'project' nodes to determine our # branching strategy for the project. # create: Create a new branch on the project repo for the new CrOS branch. # This is the default. # pin: On the CrOS branch, pin the project to the current revision. # tot: On the CrOS branch, the project still tracks ToT. # These branches are deleted at the beginning of every buildbot run. # Default OS target packages. # Constants for uprevving Chrome # Portage category and package name for Chrome. # Other packages to uprev while uprevving Chrome. # Chrome use flags # Builds and validates _alpha ebuilds. These builds sync to the latest # revsion of the Chromium src tree and build with that checkout. # Builds and validates chrome at a given revision through cbuildbot # --chrome_version # Builds and validates the latest Chromium release as defined by # ~/trunk/releases in the Chrome src tree. These ebuilds are suffixed with rc. # Builds and validates the latest Chromium release for a specific Chromium # branch that we want to watch. These ebuilds are suffixed with rc. # Builds and validates Chromium for a pre-populated directory. # Also uses _alpha, since portage doesn't have anything lower. # Constants for uprevving Android. # Portage package name for Android container. # Builds and validates the latest Android release. # Build types supported. # TODO(sosa): Deprecate PFQ type. # Incremental builds that are built using binary packages when available. # These builds have less validation than other build types. # These builds serve as PFQ builders. This is being deprecated. # Android PFQ type. Builds and validates new versions of Android. # Builds from source and non-incremental. This builds fully wipe their # chroot before the start of every build and no not use a BINHOST. # Full but with versioned logic. # Generate payloads for an already built build/version. # Similar behavior to canary, but used to validate toolchain changes. # Generic type of tryjob only build configs. # Special build type for Chroot builders. These builds focus on building # toolchains and validate that they work. # Use for builds that don't requite a type. # How many total test retries should be done for a suite. # Defines for the various hardware test suites: # BVT: Basic blocking suite to be run against any build that # requires a HWTest phase. # COMMIT: Suite of basic tests required for commits to the source # tree. Runs as a blocking suite on the CQ and PFQ; runs as # a non-blocking suite on canaries. # CANARY: Non-blocking suite run only against the canaries. # AFDO: Non-blocking suite run only AFDO builders. # MOBLAB: Blocking Suite run only on *_moblab builders. # INSTALLER: Blocking suite run against all canaries; tests basic installer # functionality. # Runs all non-informational Tast tests (exercising any of OS, Chrome, and ARC). # Runs non-informational Tast tests exercising either Chrome or ARC. # Runs non-informational Tast tests exercising ARC. # Runs all Tast informational tests. # Non-blocking informational hardware tests for Chrome, run throughout the # day on tip-of-trunk Chrome rather than on the daily Chrome branch. # Additional timeout to wait for autotest to abort a suite if the test takes # too long to run. This is meant to be overly conservative as a timeout may # indicate that autotest is at capacity. # Ordered by priority (first item being lowest). # Creates a mapping of priorities to make easy comparsions. # Use the same priorities mapping as autotest/client/common_lib/priorities.py # Creates a mapping of priorities for skylab hwtest tasks. In swarming, # lower number means high priorities. Priority lower than 48 will be special # tasks. The upper bound of priority is 255. # Use the same priorities mapping as autotest/venv/skylab_suite/swarming_lib.py # The environment for executing tests. # The cipd package for skylab tool # crbug.com/1108489: The skylab tool CIPD package is pinned to a specific # version to avoid uncontrolled tool release and so that the tool is effectively # branched with cbuildbot. # HWTest result statuses # Define HWTEST subsystem logic constants. # Build messages # MESSSGE_TYPE_IGNORED_REASON messages store the affected build as # the CIDB column message_value. # Define HWTEST job_keyvals # How many total test retries should be done for a suite. # Defines VM Test types. # MoblabVM tests are suites of tests used to validate a moblab image via # VMTests. # Common default logging settings for use with the logging module. # Used by remote patch serialization/deserialzation. # Environment variables that should be exposed to all children processes # invoked via cros_build_lib.run. # List of variables to proxy into the chroot from the host, and to # have sudo export if existent. Anytime this list is modified, a new # chroot_version_hooks.d upgrade script that symlinks to 153_rewrite_sudoers.d # should be created. # Paths for Chrome LKGM which are relative to the Chromium base url. # Path for the Chrome LKGM's closest OWNERS file. # Cache constants. # Artifact constants. # GCE tar ball constants. # Disabling Tast VM retries because of https://crbug.com/1098346. # Image type constants. # USB PD accessory microcontroller firmware (e.g. power brick, display dongle). # Standalone accessory microcontroller firmware (e.g. wireless keyboard). # Cr50 Firmware. # Global configuration constants. # Lab status url. # Email alias to add as reviewer in Gerrit, which GWSQ will then automatically # assign to the current gardener. # Useful config targets. # Email validation regex. Not quite fully compliant with RFC 2822, but good # approximation. # Blacklist of files not allowed to be uploaded into the Partner Project Google # Storage Buckets: # debug.tgz contains debug symbols. # manifest.xml exposes all of our repo names. # vm_test_results can contain symbolicated crash dumps. # AFDO common constants. # How long does the AFDO_record autotest have to generate the AFDO perf data. # Gmail Credentials. # Maximum number of boards per release group builder. This should be # chosen/adjusted based on expected release build times such that successive # builds don't overlap and create a backlog. # Buildbucket buckets # Build retry limit on buildbucket # # 2020-05-13 by engeg@: This is rarely effective, causes confusion, # higher bot utilization, and if the initial try was past uploading artifacts # then the retry is destined to fail with a difficult to parse error. # 2020-05-19 by seanabraham@: Leave this at zero. These retries can break # Chrome-wide profiling. http://b/156994019 # Do not change. Read the above. # TODO(nxia): consolidate all run.metadata key constants, # add a unit test to avoid duplicated keys in run_metadata # Builder_run metadata keys # List of builders marked as experimental through the tree status, not all the # experimental builders for a run. # Metadata key to indicate whether a build is self-destructed. # Metadata key to indicate whether a build is self-destructed with success. # Chroot snapshot names # Partition labels. # Quick provision payloads. These file names should never be changed, otherwise # very bad things can happen :). The reason is we have already uploaded these # files with these names for all boards. So if the name changes, all scripts # that have been using this need to handle both cases to be backward compatible. # Mock build and stage IDs. # Topology dictionary copied from CIDB. # Percentage of child builders that need to complete to update LKGM | 1.91504 | 2 |
src/api/dataflow/flow/handlers/flow_utils.py | Chromico/bk-base | 84 | 6613041 | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from django.utils.translation import ugettext as _
from dataflow.flow.exceptions import ValidError
from dataflow.flow.handlers.node_factory import NODE_FACTORY
from dataflow.flow.handlers.node_utils import NodeUtils
from dataflow.flow.node_types import NodeTypes
from dataflow.shared.batch.batch_helper import BatchHelper
from dataflow.shared.stream.stream_helper import StreamHelper
def validate_node_infos(nodes, is_create):
"""
解析nodes信息,确保其符合对应节点类型的表单参数要求
@param nodes: {list}
各元素必须字段:node_type, from_nodes
@param is_create:
@return: 返回验证后的节点参数结果
"""
for node_info in nodes:
# 校验表单
node_type = node_info["node_type"]
name = node_info["name"]
from_nodes = NodeUtils.get_input_nodes_config(node_type, node_info)
# from_nodes = node_info.get('from_nodes', [])
if node_type in NodeTypes.SOURCE_CATEGORY:
result_table_id = node_info.get("result_table_id", None)
if not result_table_id:
raise ValidError(_("数据源节点类型必须包含参数result_table_id"))
node_info["from_result_table_ids"] = [result_table_id]
elif node_type in NodeTypes.STORAGE_CATEGORY:
if len(from_nodes) != 1 or len(from_nodes[0]["from_result_table_ids"]) != 1:
raise ValidError(_("存储节点类型必须包含参数from_nodes,且仅支持一个上游节点和一个相应上游结果表"))
result_table_id = from_nodes[0]["from_result_table_ids"][0]
node_info["result_table_id"] = result_table_id
node_info["from_result_table_ids"] = [result_table_id]
else:
from_result_table_ids = []
for rt_ids in from_nodes:
from_result_table_ids.extend(rt_ids["from_result_table_ids"])
node_info["from_result_table_ids"] = from_result_table_ids
node_info = NodeUtils.set_input_nodes_config(node_type, node_info, from_nodes)
node_obj = NODE_FACTORY.get_node_handler_by_type(node_type)
try:
NodeUtils.validate_config(node_info, node_obj.node_form, is_create)
except Exception as e:
raise Exception(
_("%(node_type)s类型节点(%(name)s)创建失败: %(message)s")
% {"node_type": node_type, "name": name, "message": "{}".format(e)}
)
return nodes
def param_verify(params):
"""
节点参数校验
@param params 校验参数
@return: 返回参数校验结果
"""
if params["scheduling_type"] == "batch":
return BatchHelper.check_batch_param(params)
elif params["scheduling_type"] == "stream":
return StreamHelper.check_stream_param(params)
| # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from django.utils.translation import ugettext as _
from dataflow.flow.exceptions import ValidError
from dataflow.flow.handlers.node_factory import NODE_FACTORY
from dataflow.flow.handlers.node_utils import NodeUtils
from dataflow.flow.node_types import NodeTypes
from dataflow.shared.batch.batch_helper import BatchHelper
from dataflow.shared.stream.stream_helper import StreamHelper
def validate_node_infos(nodes, is_create):
"""
解析nodes信息,确保其符合对应节点类型的表单参数要求
@param nodes: {list}
各元素必须字段:node_type, from_nodes
@param is_create:
@return: 返回验证后的节点参数结果
"""
for node_info in nodes:
# 校验表单
node_type = node_info["node_type"]
name = node_info["name"]
from_nodes = NodeUtils.get_input_nodes_config(node_type, node_info)
# from_nodes = node_info.get('from_nodes', [])
if node_type in NodeTypes.SOURCE_CATEGORY:
result_table_id = node_info.get("result_table_id", None)
if not result_table_id:
raise ValidError(_("数据源节点类型必须包含参数result_table_id"))
node_info["from_result_table_ids"] = [result_table_id]
elif node_type in NodeTypes.STORAGE_CATEGORY:
if len(from_nodes) != 1 or len(from_nodes[0]["from_result_table_ids"]) != 1:
raise ValidError(_("存储节点类型必须包含参数from_nodes,且仅支持一个上游节点和一个相应上游结果表"))
result_table_id = from_nodes[0]["from_result_table_ids"][0]
node_info["result_table_id"] = result_table_id
node_info["from_result_table_ids"] = [result_table_id]
else:
from_result_table_ids = []
for rt_ids in from_nodes:
from_result_table_ids.extend(rt_ids["from_result_table_ids"])
node_info["from_result_table_ids"] = from_result_table_ids
node_info = NodeUtils.set_input_nodes_config(node_type, node_info, from_nodes)
node_obj = NODE_FACTORY.get_node_handler_by_type(node_type)
try:
NodeUtils.validate_config(node_info, node_obj.node_form, is_create)
except Exception as e:
raise Exception(
_("%(node_type)s类型节点(%(name)s)创建失败: %(message)s")
% {"node_type": node_type, "name": name, "message": "{}".format(e)}
)
return nodes
def param_verify(params):
"""
节点参数校验
@param params 校验参数
@return: 返回参数校验结果
"""
if params["scheduling_type"] == "batch":
return BatchHelper.check_batch_param(params)
elif params["scheduling_type"] == "stream":
return StreamHelper.check_stream_param(params)
| en | 0.634252 | # -*- coding: utf-8 -*- Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available. Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. BK-BASE 蓝鲸基础平台 is licensed under the MIT License. License for BK-BASE 蓝鲸基础平台: -------------------------------------------------------------------- Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 解析nodes信息,确保其符合对应节点类型的表单参数要求 @param nodes: {list} 各元素必须字段:node_type, from_nodes @param is_create: @return: 返回验证后的节点参数结果 # 校验表单 # from_nodes = node_info.get('from_nodes', []) 节点参数校验 @param params 校验参数 @return: 返回参数校验结果 | 1.388354 | 1 |
probe/android/logcat.py | EverythingMe/probe | 40 | 6613042 | <reponame>EverythingMe/probe
from __future__ import absolute_import
import Queue
import threading
from probe.runtime import runtime
__author__ = 'rotem'
class AsyncLogcatReader(threading.Thread):
"""
Helper class to implement asynchronous reading of a file
in a separate thread. Pushes read lines on a queue to
be consumed in another thread.
"""
def __init__(self, process, queue, package_name):
assert isinstance(queue, Queue.Queue)
assert callable(process.stdout.readline)
threading.Thread.__init__(self)
self.daemon = True
self._process = process
self._fd = process.stdout
self._queue = queue
self._pid = runtime.get_pid()
self._stop = threading.Event()
def run(self):
"""
The body of the thread: read lines and put them in the queue.
"""
while not self.stopped():
line = self._fd.readline()
if self._pid in line:
self._queue.put(line)
self._fd.close()
def eof(self):
"""
Check whether there is no more content to expect.
"""
return not self.is_alive() and self._queue.empty()
def stop(self):
self._stop.set()
self._queue.put('\0')
def stopped(self):
return self._stop.isSet() | from __future__ import absolute_import
import Queue
import threading
from probe.runtime import runtime
__author__ = 'rotem'
class AsyncLogcatReader(threading.Thread):
"""
Helper class to implement asynchronous reading of a file
in a separate thread. Pushes read lines on a queue to
be consumed in another thread.
"""
def __init__(self, process, queue, package_name):
assert isinstance(queue, Queue.Queue)
assert callable(process.stdout.readline)
threading.Thread.__init__(self)
self.daemon = True
self._process = process
self._fd = process.stdout
self._queue = queue
self._pid = runtime.get_pid()
self._stop = threading.Event()
def run(self):
"""
The body of the thread: read lines and put them in the queue.
"""
while not self.stopped():
line = self._fd.readline()
if self._pid in line:
self._queue.put(line)
self._fd.close()
def eof(self):
"""
Check whether there is no more content to expect.
"""
return not self.is_alive() and self._queue.empty()
def stop(self):
self._stop.set()
self._queue.put('\0')
def stopped(self):
return self._stop.isSet() | en | 0.915631 | Helper class to implement asynchronous reading of a file in a separate thread. Pushes read lines on a queue to be consumed in another thread. The body of the thread: read lines and put them in the queue. Check whether there is no more content to expect. | 2.884698 | 3 |
male_names.py | sinus-x/EliskaSidlova | 0 | 6613043 | names = [
"Abadon",
"Abdon",
"Ábel",
"Abelard",
"Abraham",
"Abrahám",
"Absolón",
"Adalbert",
"Adam",
"Adin",
"Adolf",
"Adrián",
"Agaton",
"Achil",
"Alan",
"Alban",
"Albert",
"Albín",
"Albrecht",
"Aldo",
"Aleš",
"Alexandr",
"Alexej",
"Alfons",
"Alfréd",
"Alois",
"Alva",
"Alvar",
"Alvin",
"Amadeus",
"Amand",
"Amát",
"Ambrož",
"Ámos",
"Anastáz",
"Anatol",
"Anděl",
"Andrej",
"Anselm",
"Antal",
"Antonín",
"Aram",
"Ariel",
"Aristid",
"Arkád",
"Armand",
"Armin",
"Arne",
"Arnold",
"Arnošt",
"Árón",
"Arpád",
"Artur",
"Artuš",
"Arzen",
"Atanas",
"Atila",
"August",
"Aurel",
"Axel",
"Baltazar",
"Barnabáš",
"Bartoloměj",
"Bazil",
"Beatus",
"Bedřich",
"Benedikt",
"Benjamín",
"Bernard",
"Bertold",
"Bertram",
"Bivoj",
"Blahomil",
"Blahomír",
"Blahoslav",
"Blažej",
"Bohdan",
"Bohuchval",
"Bohumil",
"Bohumír",
"Bohuslav",
"Bohuš",
"Bojan",
"Bolemír",
"Boleslav",
"Bonifác",
"Boris",
"Bořek",
"Bořislav",
"Bořivoj",
"Božetěch",
"Božidar",
"Božislav",
"Branimír",
"Branislav",
"Bratislav",
"Brian",
"Brit",
"Bruno",
"Břetislav",
"Budimír",
"Budislav",
"Budivoj",
"Cecil",
"Celestýn",
"Cézar",
"Ctibor",
"Ctirad",
"Ctislav",
"Cyprián",
"Cyril",
"Čeněk",
"Čestmír",
"Čistoslav",
"Dag",
"Dalibor",
"Dalimil",
"Dalimír",
"Damián",
"Dan",
"Daniel",
"Darek",
"Darius",
"David",
"Denis",
"Děpold",
"Dětmar",
"Dětřich",
"Dezider",
"Dimitrij",
"Dino",
"Diviš",
"Dluhoš",
"Dobromil",
"Dobromír",
"Dobroslav",
"Dominik",
"Donald",
"Donát",
"Dorián",
"Drahomil",
"Drahomír",
"Drahoslav",
"Drahoš",
"Drahotín",
"Dušan",
"Edgar",
"Edmond",
"Edvard",
"Edvín",
"Egmont",
"Egon",
"Eliáš",
"Elizej",
"Elmar",
"Elvis",
"Emanuel",
"Emerich",
"Emil",
"Engelbert",
"Erazim",
"Erhard",
"Erik",
"Ernest",
"Ervín",
"Eusebius",
"Evald",
"Evan",
"Evarist",
"Evžen",
"Ezechiel",
"Ezra",
"Fabián",
"Faust",
"Fedor",
"Felix",
"Ferdinand",
"Fidel",
"Filemon",
"Filibert",
"Filip",
"Filomen",
"Flavián",
"Florentýn",
"Florián",
"Fortunát",
"František",
"Fridolín",
"Gabin",
"Gabriel",
"Gál",
"Garik",
"Gaston",
"Gedeon",
"Genadij",
"Gerald",
"Gerard",
"Gerazim",
"Géza",
"Gilbert",
"Gleb",
"Glen",
"Gorazd",
"Gordon",
"Gothard",
"Gracián",
"Grant",
"Gunter",
"Gustav",
"Hanuš",
"Harald",
"Haštal",
"Havel",
"Helmut",
"Herbert",
"Heřman",
"Hilar",
"Hjalmar",
"Homér",
"Honor",
"Horác",
"Horst",
"Horymír",
"Hostimil",
"Hostislav",
"Hostivít",
"Hovard",
"Hubert",
"Hugo",
"Hvězdoslav",
"Hyacint",
"Hynek",
"Hypolit",
"Chrabroš",
"Chranibor",
"Chranislav",
"Chrudoš",
"Chval",
"Ignác",
"Igor",
"Ilja",
"Inocenc",
"Irenej",
"Irvin",
"Ivan",
"Ivar",
"Ivo",
"Izaiáš",
"Izák",
"Izidor",
"Izmael",
"Jacek",
"Jáchym",
"Jakub",
"Jan",
"Jarmil",
"Jarolím",
"Jaromír",
"Jaroslav",
"Jasoň",
"Jeremiáš",
"Jeroným",
"Jiljí",
"Jimram",
"Jindřich",
"Jiří",
"Job",
"Joel",
"Jonáš",
"Jonatan",
"Jordan",
"Josef",
"Jošt",
"Jozue",
"Juda",
"Julián",
"Julius",
"Justýn",
"Kajetán",
"Kamil",
"Karel",
"Kasián",
"Kastor",
"Kašpar",
"Kazimír",
"Kilián",
"Kim",
"Klement",
"Knut",
"Koloman",
"Kolombín",
"Konrád",
"Konstantýn",
"Kornel",
"Kosma",
"Krasomil",
"Krasoslav",
"Kristián",
"Kryšpín",
"Kryštof",
"Křesomysl",
"Kurt",
"Květoslav",
"Květoš",
"Kvido",
"Ladislav",
"Lambert",
"Lars",
"Laurenc",
"Lazar",
"Leandr",
"Leo",
"Leodegar",
"Leonard",
"Leonid",
"Leontýn",
"Leopold",
"Leoš",
"Lešek",
"Lev",
"Libor",
"Liboslav",
"Lionel",
"Livius",
"Lotar",
"Lubomír",
"Lubor",
"Luboš",
"Lucián",
"Luděk",
"Ludivoj",
"Ludomír",
"Ludoslav",
"Ludvík",
"Lukáš",
"Lukrecius",
"Lumír",
"Lutobor",
"Magnus",
"Makar",
"Manfréd",
"Mansvet",
"Manuel",
"Marcel",
"Marek",
"Marián",
"Marin",
"Mario",
"Martin",
"Matěj",
"Matouš",
"Max",
"Maxmilián",
"Mečislav",
"Medard",
"Melichar",
"Merlin",
"Mervin",
"Metod",
"Michal",
"Mikoláš",
"Milan",
"Milíč",
"Milivoj",
"Miloň",
"Milorad",
"Miloslav",
"Miloš",
"Milota",
"Milouš",
"Milovan",
"Mirek",
"Miromil",
"Miron",
"Miroslav",
"Mlad",
"Mnata",
"Mnislav",
"Modest",
"Mojmír",
"Mojžíš",
"Morgan",
"Moric",
"Mstislav",
"Myrtil",
"Napoleon",
"Narcis",
"Natan",
"Natanael",
"Něhoslav",
"Neklan",
"Nepomuk",
"Nezamysl",
"Nikita",
"Nikodém",
"Nikola",
"Norbert",
"Norman",
"Odolen",
"Odon",
"Oktavián",
"Olaf",
"Olbram",
"Oldřich",
"Oleg",
"Oliver",
"Omar",
"Ondřej",
"Orest",
"Oskar",
"Osvald",
"Ota",
"Otakar",
"Otmar",
"Ovidius",
"Palmiro",
"Pankrác",
"Pantaleon",
"Paris",
"Parsival",
"Paskal",
"Patrik",
"Pavel",
"Pelhřim",
"Perikles",
"Petr",
"Petronius",
"Pius",
"Platón",
"Polykarp",
"Pravdomil",
"Prokop",
"Prosper",
"Přemysl",
"Přibyslav",
"Radek",
"Radhost",
"Radim",
"Radivoj",
"Radmil",
"Radomír",
"Radoslav",
"Radovan",
"Radúz",
"Rafael",
"Raimund",
"Rainald",
"Rainer",
"Rainhard",
"Rajko",
"Ralf",
"Ramon",
"Randolf",
"Ranek",
"Ratibor",
"Ratmír",
"Redmond",
"Remig",
"Remus",
"Renát",
"René",
"Richard",
"Robert",
"Robin",
"Robinson",
"Rodan",
"Roderik",
"Roger",
"Roch",
"Roland",
"Rolf",
"Roman",
"Romeo",
"Romuald",
"Romul",
"Ronald",
"Rostislav",
"Ruben",
"Rudolf",
"Rufus",
"Rupert",
"Ruslan",
"Řehoř",
"Sámo",
"Samson",
"Samuel",
"Saturnin",
"Saul",
"Sáva",
"Sebastián",
"Sedrik",
"Serafín",
"Serenus",
"Sergej",
"Servác",
"Severín",
"Sidon",
"Sigfríd",
"Silván",
"Silvestr",
"Simeon",
"Sinkler",
"Sixt",
"Slávek",
"Slaviboj",
"Slavoj",
"Slavomil",
"Slavomír",
"Smil",
"Soběslav",
"Sokrat",
"Soter",
"Spytihněv",
"Stanimír",
"Stanislav",
"Stojan",
"Stojmír",
"Svatobor",
"Svatomír",
"Svatopluk",
"Svatoslav",
"Sven",
"Svetozar",
"Šalomoun",
"Šavel",
"Šimon",
"Šťasta",
"Štěpán",
"Tadeáš",
"Tankred",
"Taras",
"Teobald",
"Teodor",
"Teodorik",
"Teodoz",
"Teofan",
"Teofil",
"Terenc",
"Tiber",
"Tibor",
"Tichomil",
"Tichomír",
"Tichon",
"Timon",
"Timotej",
"Timur",
"Titus",
"Tobiáš",
"Tomáš",
"Tomislav",
"Torkvát",
"Torsten",
"Tristan",
"Udo",
"Ulrich",
"Upton",
"Urban",
"Uve",
"Václav",
"Vadim",
"Valdemar",
"Valentýn",
"Valerián",
"Valtr",
"Vasil",
"Vavřinec",
"Veleslav",
"Velimír",
"Věnceslav",
"Vendelín",
"Verner",
"Věroslav",
"Vidor",
"Viktor",
"Vilém",
"Vilibald",
"Vilmar",
"Vincenc",
"Virgil",
"Virgin",
"Vít",
"Vítězslav",
"Vitold",
"Vivian",
"Vladan",
"Vladimír",
"Vladislav",
"Vladivoj",
"Vlastimil",
"Vlastimír",
"Vlastislav",
"Vlk",
"Vojen",
"Vojmil",
"Vojmír",
"Vojslav",
"Vojtěch",
"Volfgang",
"Vratislav",
"Vsevolod",
"Všebor",
"Všerad",
"Všeslav",
"Záboj",
"Zachar",
"Záviš",
"Zbyhněv",
"Zbyněk",
"Zbyslav",
"Zdeněk",
"Zderad",
"Zdislav",
"Zeno",
"Zikmund",
"Zlatan",
"Zlatomír",
"Zoltán",
"Zoran",
"Zoroslav",
"Zosim",
"Zvonimír",
"Žarko",
"Ždan",
"Želibor",
"Želimír",
"Želislav",
"Žitomír",
"Žitoslav",
"Živan",
]
| names = [
"Abadon",
"Abdon",
"Ábel",
"Abelard",
"Abraham",
"Abrahám",
"Absolón",
"Adalbert",
"Adam",
"Adin",
"Adolf",
"Adrián",
"Agaton",
"Achil",
"Alan",
"Alban",
"Albert",
"Albín",
"Albrecht",
"Aldo",
"Aleš",
"Alexandr",
"Alexej",
"Alfons",
"Alfréd",
"Alois",
"Alva",
"Alvar",
"Alvin",
"Amadeus",
"Amand",
"Amát",
"Ambrož",
"Ámos",
"Anastáz",
"Anatol",
"Anděl",
"Andrej",
"Anselm",
"Antal",
"Antonín",
"Aram",
"Ariel",
"Aristid",
"Arkád",
"Armand",
"Armin",
"Arne",
"Arnold",
"Arnošt",
"Árón",
"Arpád",
"Artur",
"Artuš",
"Arzen",
"Atanas",
"Atila",
"August",
"Aurel",
"Axel",
"Baltazar",
"Barnabáš",
"Bartoloměj",
"Bazil",
"Beatus",
"Bedřich",
"Benedikt",
"Benjamín",
"Bernard",
"Bertold",
"Bertram",
"Bivoj",
"Blahomil",
"Blahomír",
"Blahoslav",
"Blažej",
"Bohdan",
"Bohuchval",
"Bohumil",
"Bohumír",
"Bohuslav",
"Bohuš",
"Bojan",
"Bolemír",
"Boleslav",
"Bonifác",
"Boris",
"Bořek",
"Bořislav",
"Bořivoj",
"Božetěch",
"Božidar",
"Božislav",
"Branimír",
"Branislav",
"Bratislav",
"Brian",
"Brit",
"Bruno",
"Břetislav",
"Budimír",
"Budislav",
"Budivoj",
"Cecil",
"Celestýn",
"Cézar",
"Ctibor",
"Ctirad",
"Ctislav",
"Cyprián",
"Cyril",
"Čeněk",
"Čestmír",
"Čistoslav",
"Dag",
"Dalibor",
"Dalimil",
"Dalimír",
"Damián",
"Dan",
"Daniel",
"Darek",
"Darius",
"David",
"Denis",
"Děpold",
"Dětmar",
"Dětřich",
"Dezider",
"Dimitrij",
"Dino",
"Diviš",
"Dluhoš",
"Dobromil",
"Dobromír",
"Dobroslav",
"Dominik",
"Donald",
"Donát",
"Dorián",
"Drahomil",
"Drahomír",
"Drahoslav",
"Drahoš",
"Drahotín",
"Dušan",
"Edgar",
"Edmond",
"Edvard",
"Edvín",
"Egmont",
"Egon",
"Eliáš",
"Elizej",
"Elmar",
"Elvis",
"Emanuel",
"Emerich",
"Emil",
"Engelbert",
"Erazim",
"Erhard",
"Erik",
"Ernest",
"Ervín",
"Eusebius",
"Evald",
"Evan",
"Evarist",
"Evžen",
"Ezechiel",
"Ezra",
"Fabián",
"Faust",
"Fedor",
"Felix",
"Ferdinand",
"Fidel",
"Filemon",
"Filibert",
"Filip",
"Filomen",
"Flavián",
"Florentýn",
"Florián",
"Fortunát",
"František",
"Fridolín",
"Gabin",
"Gabriel",
"Gál",
"Garik",
"Gaston",
"Gedeon",
"Genadij",
"Gerald",
"Gerard",
"Gerazim",
"Géza",
"Gilbert",
"Gleb",
"Glen",
"Gorazd",
"Gordon",
"Gothard",
"Gracián",
"Grant",
"Gunter",
"Gustav",
"Hanuš",
"Harald",
"Haštal",
"Havel",
"Helmut",
"Herbert",
"Heřman",
"Hilar",
"Hjalmar",
"Homér",
"Honor",
"Horác",
"Horst",
"Horymír",
"Hostimil",
"Hostislav",
"Hostivít",
"Hovard",
"Hubert",
"Hugo",
"Hvězdoslav",
"Hyacint",
"Hynek",
"Hypolit",
"Chrabroš",
"Chranibor",
"Chranislav",
"Chrudoš",
"Chval",
"Ignác",
"Igor",
"Ilja",
"Inocenc",
"Irenej",
"Irvin",
"Ivan",
"Ivar",
"Ivo",
"Izaiáš",
"Izák",
"Izidor",
"Izmael",
"Jacek",
"Jáchym",
"Jakub",
"Jan",
"Jarmil",
"Jarolím",
"Jaromír",
"Jaroslav",
"Jasoň",
"Jeremiáš",
"Jeroným",
"Jiljí",
"Jimram",
"Jindřich",
"Jiří",
"Job",
"Joel",
"Jonáš",
"Jonatan",
"Jordan",
"Josef",
"Jošt",
"Jozue",
"Juda",
"Julián",
"Julius",
"Justýn",
"Kajetán",
"Kamil",
"Karel",
"Kasián",
"Kastor",
"Kašpar",
"Kazimír",
"Kilián",
"Kim",
"Klement",
"Knut",
"Koloman",
"Kolombín",
"Konrád",
"Konstantýn",
"Kornel",
"Kosma",
"Krasomil",
"Krasoslav",
"Kristián",
"Kryšpín",
"Kryštof",
"Křesomysl",
"Kurt",
"Květoslav",
"Květoš",
"Kvido",
"Ladislav",
"Lambert",
"Lars",
"Laurenc",
"Lazar",
"Leandr",
"Leo",
"Leodegar",
"Leonard",
"Leonid",
"Leontýn",
"Leopold",
"Leoš",
"Lešek",
"Lev",
"Libor",
"Liboslav",
"Lionel",
"Livius",
"Lotar",
"Lubomír",
"Lubor",
"Luboš",
"Lucián",
"Luděk",
"Ludivoj",
"Ludomír",
"Ludoslav",
"Ludvík",
"Lukáš",
"Lukrecius",
"Lumír",
"Lutobor",
"Magnus",
"Makar",
"Manfréd",
"Mansvet",
"Manuel",
"Marcel",
"Marek",
"Marián",
"Marin",
"Mario",
"Martin",
"Matěj",
"Matouš",
"Max",
"Maxmilián",
"Mečislav",
"Medard",
"Melichar",
"Merlin",
"Mervin",
"Metod",
"Michal",
"Mikoláš",
"Milan",
"Milíč",
"Milivoj",
"Miloň",
"Milorad",
"Miloslav",
"Miloš",
"Milota",
"Milouš",
"Milovan",
"Mirek",
"Miromil",
"Miron",
"Miroslav",
"Mlad",
"Mnata",
"Mnislav",
"Modest",
"Mojmír",
"Mojžíš",
"Morgan",
"Moric",
"Mstislav",
"Myrtil",
"Napoleon",
"Narcis",
"Natan",
"Natanael",
"Něhoslav",
"Neklan",
"Nepomuk",
"Nezamysl",
"Nikita",
"Nikodém",
"Nikola",
"Norbert",
"Norman",
"Odolen",
"Odon",
"Oktavián",
"Olaf",
"Olbram",
"Oldřich",
"Oleg",
"Oliver",
"Omar",
"Ondřej",
"Orest",
"Oskar",
"Osvald",
"Ota",
"Otakar",
"Otmar",
"Ovidius",
"Palmiro",
"Pankrác",
"Pantaleon",
"Paris",
"Parsival",
"Paskal",
"Patrik",
"Pavel",
"Pelhřim",
"Perikles",
"Petr",
"Petronius",
"Pius",
"Platón",
"Polykarp",
"Pravdomil",
"Prokop",
"Prosper",
"Přemysl",
"Přibyslav",
"Radek",
"Radhost",
"Radim",
"Radivoj",
"Radmil",
"Radomír",
"Radoslav",
"Radovan",
"Radúz",
"Rafael",
"Raimund",
"Rainald",
"Rainer",
"Rainhard",
"Rajko",
"Ralf",
"Ramon",
"Randolf",
"Ranek",
"Ratibor",
"Ratmír",
"Redmond",
"Remig",
"Remus",
"Renát",
"René",
"Richard",
"Robert",
"Robin",
"Robinson",
"Rodan",
"Roderik",
"Roger",
"Roch",
"Roland",
"Rolf",
"Roman",
"Romeo",
"Romuald",
"Romul",
"Ronald",
"Rostislav",
"Ruben",
"Rudolf",
"Rufus",
"Rupert",
"Ruslan",
"Řehoř",
"Sámo",
"Samson",
"Samuel",
"Saturnin",
"Saul",
"Sáva",
"Sebastián",
"Sedrik",
"Serafín",
"Serenus",
"Sergej",
"Servác",
"Severín",
"Sidon",
"Sigfríd",
"Silván",
"Silvestr",
"Simeon",
"Sinkler",
"Sixt",
"Slávek",
"Slaviboj",
"Slavoj",
"Slavomil",
"Slavomír",
"Smil",
"Soběslav",
"Sokrat",
"Soter",
"Spytihněv",
"Stanimír",
"Stanislav",
"Stojan",
"Stojmír",
"Svatobor",
"Svatomír",
"Svatopluk",
"Svatoslav",
"Sven",
"Svetozar",
"Šalomoun",
"Šavel",
"Šimon",
"Šťasta",
"Štěpán",
"Tadeáš",
"Tankred",
"Taras",
"Teobald",
"Teodor",
"Teodorik",
"Teodoz",
"Teofan",
"Teofil",
"Terenc",
"Tiber",
"Tibor",
"Tichomil",
"Tichomír",
"Tichon",
"Timon",
"Timotej",
"Timur",
"Titus",
"Tobiáš",
"Tomáš",
"Tomislav",
"Torkvát",
"Torsten",
"Tristan",
"Udo",
"Ulrich",
"Upton",
"Urban",
"Uve",
"Václav",
"Vadim",
"Valdemar",
"Valentýn",
"Valerián",
"Valtr",
"Vasil",
"Vavřinec",
"Veleslav",
"Velimír",
"Věnceslav",
"Vendelín",
"Verner",
"Věroslav",
"Vidor",
"Viktor",
"Vilém",
"Vilibald",
"Vilmar",
"Vincenc",
"Virgil",
"Virgin",
"Vít",
"Vítězslav",
"Vitold",
"Vivian",
"Vladan",
"Vladimír",
"Vladislav",
"Vladivoj",
"Vlastimil",
"Vlastimír",
"Vlastislav",
"Vlk",
"Vojen",
"Vojmil",
"Vojmír",
"Vojslav",
"Vojtěch",
"Volfgang",
"Vratislav",
"Vsevolod",
"Všebor",
"Všerad",
"Všeslav",
"Záboj",
"Zachar",
"Záviš",
"Zbyhněv",
"Zbyněk",
"Zbyslav",
"Zdeněk",
"Zderad",
"Zdislav",
"Zeno",
"Zikmund",
"Zlatan",
"Zlatomír",
"Zoltán",
"Zoran",
"Zoroslav",
"Zosim",
"Zvonimír",
"Žarko",
"Ždan",
"Želibor",
"Želimír",
"Želislav",
"Žitomír",
"Žitoslav",
"Živan",
]
| none | 1 | 1.611954 | 2 | |
tests/usage_test.py | mailosaurapp/mailosaur-python | 8 | 6613044 | <filename>tests/usage_test.py
import os
from unittest import TestCase
from mailosaur import MailosaurClient
class UsageTest(TestCase):
@classmethod
def setUpClass(self):
api_key = os.getenv('MAILOSAUR_API_KEY')
base_url = os.getenv('MAILOSAUR_BASE_URL')
if api_key is None:
raise Exception("Missing necessary environment variables - refer to README.md")
self.client = MailosaurClient(api_key, base_url)
def test_account_limits(self):
result = self.client.usage.limits()
self.assertIsNotNone(result.servers)
self.assertIsNotNone(result.users)
self.assertIsNotNone(result.email)
self.assertIsNotNone(result.sms)
self.assertTrue(result.servers.limit > 0)
self.assertTrue(result.users.limit > 0)
self.assertTrue(result.email.limit > 0)
self.assertTrue(result.sms.limit > 0)
def test_transactions_list(self):
result = self.client.usage.transactions()
self.assertTrue(len(result.items) > 1)
self.assertIsNotNone(result.items[0].timestamp)
self.assertIsNotNone(result.items[0].email)
self.assertIsNotNone(result.items[0].sms)
| <filename>tests/usage_test.py
import os
from unittest import TestCase
from mailosaur import MailosaurClient
class UsageTest(TestCase):
@classmethod
def setUpClass(self):
api_key = os.getenv('MAILOSAUR_API_KEY')
base_url = os.getenv('MAILOSAUR_BASE_URL')
if api_key is None:
raise Exception("Missing necessary environment variables - refer to README.md")
self.client = MailosaurClient(api_key, base_url)
def test_account_limits(self):
result = self.client.usage.limits()
self.assertIsNotNone(result.servers)
self.assertIsNotNone(result.users)
self.assertIsNotNone(result.email)
self.assertIsNotNone(result.sms)
self.assertTrue(result.servers.limit > 0)
self.assertTrue(result.users.limit > 0)
self.assertTrue(result.email.limit > 0)
self.assertTrue(result.sms.limit > 0)
def test_transactions_list(self):
result = self.client.usage.transactions()
self.assertTrue(len(result.items) > 1)
self.assertIsNotNone(result.items[0].timestamp)
self.assertIsNotNone(result.items[0].email)
self.assertIsNotNone(result.items[0].sms)
| none | 1 | 2.559266 | 3 | |
brewerslab-orig-commander/metroui/latexpdf/exportBrewlog.py | allena29/brewerslabng | 1 | 6613045 | brewlog="08.10.2016"
brewery="Worsdell Brewing"
recipeName="Wheat"
import time
import _mysql
import mysql.connector
db=_mysql.connect(host="localhost",user="brewerslab",passwd='<PASSWORD>',db="brewerslab")
con=mysql.connector.connect(user='brewerslab',password='<PASSWORD>',database="brewerslab")
con2=mysql.connector.connect(user='brewerslab',password='<PASSWORD>',database="brewerslab")
con3=mysql.connector.connect(user='brewerslab',password='<PASSWORD>',database="brewerslab")
recipeCursor=db.query("select description,waterProfile FROM gRecipes WHERE recipename ='%s'" %(recipeName))
result=db.use_result()
row=result.fetch_row()
((recipeDescription,waterProfile),)=row
o=open("template.lex")
y=o.readline()
while y != "":
y=y.rstrip()
if y[1:8] == "title{}":
print "\\title{Brewlog %s of %s by %s}" %(brewlog,recipeName,brewery)
elif y == "[[recipedescription]]":
for l in recipeDescription.split('\r\n'):
print l,"\\\\"
elif y == "[[fermentables-item-list]]":
noItems=True
cursor=con.cursor()
cursor.execute("select entity,recipeName,ingredient,qty,mustMash,isGrain,isAdjunct,hwe,unit FROM gIngredients WHERE recipeName = '%s' AND ingredientType = 'fermentables' ORDER BY qty DESC" %(recipeName))
for row in cursor:
(ent,recipe,ingredient,qty,mustMash,isGrain,isAdjunct,hwe,unit)=row
if mustMash and isGrain:
print "\\item %.0f %s of %s" %(qty,unit,ingredient)
noItems=False
row=result.fetch_row()
cursor=con.cursor()
cursor.execute("select entity,recipeName,ingredient,qty,mustMash,isGrain,isAdjunct,hwe,unit FROM gIngredients WHERE recipeName = '%s' AND ingredientType = 'fermentables' ORDER BY qty DESC" %(recipeName))
for row in cursor:
(ent,recipe,ingredient,qty,mustMash,isGrain,isAdjunct,hwe,unit)=row
if not mustMash and not isGrain:
print "\\item %.0f %s of %s" %(qty,unit,ingredient)
noItems=False
noItems=False
row=result.fetch_row()
if noItems:
print "\\item no fermentables "
elif y == "[[hops-item-list]]":
itemType="hops"
hop_values=[0.02,0.06,0.08,2,5,15,60,20.222]
hop_labels = {60:'Copper (60min)',15:'Aroma (15min)',5:'Finishing (5min)',0.08:'Flameout (0min)',0.06:'Whirlpool/Hopback (0min)' , 0.02:'Dryhop',20.222:'First Wort Hop' , 2:'Spices' }
noItems=True
for (hopAddAtA,hopAddAtB) in [ (20,21), (20.3,6000), (2.2,20.1), (1.5,2.1), (0,1.4) ]:
cursor=con.cursor()
cursor.execute("select entity,recipeName,ingredient,qty,hopAddAt,hopAlpha,unit FROM gIngredients WHERE recipeName = '%s' AND ingredientType = '%s' AND hopAddAt >=%s and hopAddAt <=%s ORDER BY hopAddAt DESC,qty DESC" %(recipeName,itemType,hopAddAtA,hopAddAtB))
for row in cursor:
(ent,recipe,ingredient,qty,hopAddAt,hopAlpha,unit)=row
if hop_labels.has_key(hopAddAt):
print "\\item %.0f %s of %s (%.1f %%) for %s" %(qty,unit,ingredient, hopAlpha, hop_labels[hopAddAt] )
else:
print "\\item %.0f %s of %s (%.1f %%) for %s min" %(qty,unit,ingredient, hopAlpha, hopaddAt)
noItems=False
row=result.fetch_row()
if noItems:
print "\\item no hops "
elif y == "[[yeast-item-list]]":
itemType="yeast"
noItems=True
cursor=con.cursor()
cursor.execute("select entity,recipeName,ingredient,qty,unit FROM gIngredients WHERE recipeName = '%s' AND ingredientType = 'yeast' ORDER BY qty DESC" %(recipeName))
for row in cursor:
(ent,recipe,ingredient,qty,unit)=row
print "\\item %.0f %s of %s" %(qty,unit,ingredient)
noItems=False
row=result.fetch_row()
if noItems:
print "\\item no yeast "
elif y == "[[misc-item-list]]":
itemType="yeast"
noItems=True
cursor=con.cursor()
cursor.execute("select entity,recipeName,ingredient,qty,unit FROM gIngredients WHERE recipeName = '%s' AND ingredientType = 'misc' AND category <> 'watertreat' ORDER BY qty DESC" %(recipeName))
for row in cursor:
(ent,recipe,ingredient,qty,unit)=row
print "\\item %.0f %s of %s" %(qty,unit,ingredient)
noItems=False
row=result.fetch_row()
if noItems:
print "\\item no sundries "
elif y == "[[water-details]]":
waterdetails=False
cursor4=con3.cursor()
cursor4.execute("select entity,ca,mg,na,co3,so4,cl,testdate,treatmentMethod FROM gWater WHERE profile=0 ORDER BY testdate DESC")
for row4 in cursor4:
(entity,xca,xmg,xna,xco3,xso4,xcl,wt,treatmentMethod)=row4
waterdetails=True
if not waterdetails:
print "No water test details available"
else:
d= time.strftime('%A %d %B %Y',time.localtime( wt ) )
print "The desired water profile for a \\textbf{%s} style is listed below, the \\textit{italic} values show the water as was tested on %s." %(waterProfile,d)
elif y == "[[water-item-list]]":
Ca=-1
Mg=-1
Na=-1
CO3=-1
SO4=-1
Cl=-1
if waterProfile:
Ca=0
Mg=0
Na=0
CO3=0
SO4=0
Cl=0
ca=0
mg=0
na=0
co3=0
so4=0
cl=0
cursor=con.cursor()
cursor.execute("select ca,mg,na,co3,so4,cl FROM gWater WHERE description = '%s'" %(waterProfile))
for row in cursor:
(Ca,Mg,Na,CO3,SO4,Cl)=row
cursor4=con3.cursor()
cursor4.execute("select entity,ca,mg,na,co3,so4,cl,testdate,treatmentMethod FROM gWater WHERE profile=0 ORDER BY testdate DESC LIMIT 0,1")
for row4 in cursor4:
(entity,ca,mg,na,co3,so4,cl,wt,treatmentMethod)=row4
print "\\item Calcium \\textbf{Ca} \\tab %.1f ppm \\tab \\textit{%.1f ppm}" %(Ca,ca)
print "\\item Magnesium \\textbf{Mg} \\tab %.1f ppm \\tab \\textit{%.1f ppm}" %(Mg,mg)
print "\\item Sodium \\textbf{Na} \\tab %.1f ppm \\tab \\textit{%.1f ppm}" %(Na,na)
print "\\item Carbonate \\textbf{CO3} \\tab %.1f ppm \\tab \\textit{%.1f ppm}" %(CO3,co3)
print "\\item Sulphate \\textbf{SO4} \\tab %.1f ppm \\tab \\textit{%.1f ppm}" %(SO4,so4)
print "\\item Chloride \\textbf{Cl} \\tab %.1f ppm \\tab \\textit{%.1f ppm}" %(Cl,cl)
else:
print y
y=o.readline()
o.close()
| brewlog="08.10.2016"
brewery="Worsdell Brewing"
recipeName="Wheat"
import time
import _mysql
import mysql.connector
db=_mysql.connect(host="localhost",user="brewerslab",passwd='<PASSWORD>',db="brewerslab")
con=mysql.connector.connect(user='brewerslab',password='<PASSWORD>',database="brewerslab")
con2=mysql.connector.connect(user='brewerslab',password='<PASSWORD>',database="brewerslab")
con3=mysql.connector.connect(user='brewerslab',password='<PASSWORD>',database="brewerslab")
recipeCursor=db.query("select description,waterProfile FROM gRecipes WHERE recipename ='%s'" %(recipeName))
result=db.use_result()
row=result.fetch_row()
((recipeDescription,waterProfile),)=row
o=open("template.lex")
y=o.readline()
while y != "":
y=y.rstrip()
if y[1:8] == "title{}":
print "\\title{Brewlog %s of %s by %s}" %(brewlog,recipeName,brewery)
elif y == "[[recipedescription]]":
for l in recipeDescription.split('\r\n'):
print l,"\\\\"
elif y == "[[fermentables-item-list]]":
noItems=True
cursor=con.cursor()
cursor.execute("select entity,recipeName,ingredient,qty,mustMash,isGrain,isAdjunct,hwe,unit FROM gIngredients WHERE recipeName = '%s' AND ingredientType = 'fermentables' ORDER BY qty DESC" %(recipeName))
for row in cursor:
(ent,recipe,ingredient,qty,mustMash,isGrain,isAdjunct,hwe,unit)=row
if mustMash and isGrain:
print "\\item %.0f %s of %s" %(qty,unit,ingredient)
noItems=False
row=result.fetch_row()
cursor=con.cursor()
cursor.execute("select entity,recipeName,ingredient,qty,mustMash,isGrain,isAdjunct,hwe,unit FROM gIngredients WHERE recipeName = '%s' AND ingredientType = 'fermentables' ORDER BY qty DESC" %(recipeName))
for row in cursor:
(ent,recipe,ingredient,qty,mustMash,isGrain,isAdjunct,hwe,unit)=row
if not mustMash and not isGrain:
print "\\item %.0f %s of %s" %(qty,unit,ingredient)
noItems=False
noItems=False
row=result.fetch_row()
if noItems:
print "\\item no fermentables "
elif y == "[[hops-item-list]]":
itemType="hops"
hop_values=[0.02,0.06,0.08,2,5,15,60,20.222]
hop_labels = {60:'Copper (60min)',15:'Aroma (15min)',5:'Finishing (5min)',0.08:'Flameout (0min)',0.06:'Whirlpool/Hopback (0min)' , 0.02:'Dryhop',20.222:'First Wort Hop' , 2:'Spices' }
noItems=True
for (hopAddAtA,hopAddAtB) in [ (20,21), (20.3,6000), (2.2,20.1), (1.5,2.1), (0,1.4) ]:
cursor=con.cursor()
cursor.execute("select entity,recipeName,ingredient,qty,hopAddAt,hopAlpha,unit FROM gIngredients WHERE recipeName = '%s' AND ingredientType = '%s' AND hopAddAt >=%s and hopAddAt <=%s ORDER BY hopAddAt DESC,qty DESC" %(recipeName,itemType,hopAddAtA,hopAddAtB))
for row in cursor:
(ent,recipe,ingredient,qty,hopAddAt,hopAlpha,unit)=row
if hop_labels.has_key(hopAddAt):
print "\\item %.0f %s of %s (%.1f %%) for %s" %(qty,unit,ingredient, hopAlpha, hop_labels[hopAddAt] )
else:
print "\\item %.0f %s of %s (%.1f %%) for %s min" %(qty,unit,ingredient, hopAlpha, hopaddAt)
noItems=False
row=result.fetch_row()
if noItems:
print "\\item no hops "
elif y == "[[yeast-item-list]]":
itemType="yeast"
noItems=True
cursor=con.cursor()
cursor.execute("select entity,recipeName,ingredient,qty,unit FROM gIngredients WHERE recipeName = '%s' AND ingredientType = 'yeast' ORDER BY qty DESC" %(recipeName))
for row in cursor:
(ent,recipe,ingredient,qty,unit)=row
print "\\item %.0f %s of %s" %(qty,unit,ingredient)
noItems=False
row=result.fetch_row()
if noItems:
print "\\item no yeast "
elif y == "[[misc-item-list]]":
itemType="yeast"
noItems=True
cursor=con.cursor()
cursor.execute("select entity,recipeName,ingredient,qty,unit FROM gIngredients WHERE recipeName = '%s' AND ingredientType = 'misc' AND category <> 'watertreat' ORDER BY qty DESC" %(recipeName))
for row in cursor:
(ent,recipe,ingredient,qty,unit)=row
print "\\item %.0f %s of %s" %(qty,unit,ingredient)
noItems=False
row=result.fetch_row()
if noItems:
print "\\item no sundries "
elif y == "[[water-details]]":
waterdetails=False
cursor4=con3.cursor()
cursor4.execute("select entity,ca,mg,na,co3,so4,cl,testdate,treatmentMethod FROM gWater WHERE profile=0 ORDER BY testdate DESC")
for row4 in cursor4:
(entity,xca,xmg,xna,xco3,xso4,xcl,wt,treatmentMethod)=row4
waterdetails=True
if not waterdetails:
print "No water test details available"
else:
d= time.strftime('%A %d %B %Y',time.localtime( wt ) )
print "The desired water profile for a \\textbf{%s} style is listed below, the \\textit{italic} values show the water as was tested on %s." %(waterProfile,d)
elif y == "[[water-item-list]]":
Ca=-1
Mg=-1
Na=-1
CO3=-1
SO4=-1
Cl=-1
if waterProfile:
Ca=0
Mg=0
Na=0
CO3=0
SO4=0
Cl=0
ca=0
mg=0
na=0
co3=0
so4=0
cl=0
cursor=con.cursor()
cursor.execute("select ca,mg,na,co3,so4,cl FROM gWater WHERE description = '%s'" %(waterProfile))
for row in cursor:
(Ca,Mg,Na,CO3,SO4,Cl)=row
cursor4=con3.cursor()
cursor4.execute("select entity,ca,mg,na,co3,so4,cl,testdate,treatmentMethod FROM gWater WHERE profile=0 ORDER BY testdate DESC LIMIT 0,1")
for row4 in cursor4:
(entity,ca,mg,na,co3,so4,cl,wt,treatmentMethod)=row4
print "\\item Calcium \\textbf{Ca} \\tab %.1f ppm \\tab \\textit{%.1f ppm}" %(Ca,ca)
print "\\item Magnesium \\textbf{Mg} \\tab %.1f ppm \\tab \\textit{%.1f ppm}" %(Mg,mg)
print "\\item Sodium \\textbf{Na} \\tab %.1f ppm \\tab \\textit{%.1f ppm}" %(Na,na)
print "\\item Carbonate \\textbf{CO3} \\tab %.1f ppm \\tab \\textit{%.1f ppm}" %(CO3,co3)
print "\\item Sulphate \\textbf{SO4} \\tab %.1f ppm \\tab \\textit{%.1f ppm}" %(SO4,so4)
print "\\item Chloride \\textbf{Cl} \\tab %.1f ppm \\tab \\textit{%.1f ppm}" %(Cl,cl)
else:
print y
y=o.readline()
o.close()
| none | 1 | 2.653368 | 3 | |
models/REG2D.py | jytime/Deep-SfM-Revisited | 126 | 6613046 | from __future__ import print_function
import torch
import torch.nn as nn
import torch.utils.data
from torch.autograd import Variable
import torch.nn.functional as F
import math
from models.submodule import *
from models.inverse_warp import inverse_warp
import pdb
from lib.config import cfg, cfg_from_file, save_config_to_file
import utils
def convtext(in_planes, out_planes, kernel_size = 3, stride = 1, dilation = 1):
if cfg.CONTEXT_BN:
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size = kernel_size, stride = stride, dilation = dilation, padding = ((kernel_size - 1) * dilation) // 2, bias=False),
nn.BatchNorm2d(out_planes),
nn.ReLU(inplace=True))
else:
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size = kernel_size, stride = stride, dilation = dilation, padding = ((kernel_size - 1) * dilation) // 2, bias=False),
nn.ReLU(inplace=True))
def convbn(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1):
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, bias=True),
nn.BatchNorm2d(out_planes),
nn.LeakyReLU(0.1,inplace=True))
class REG2D(nn.Module):
def __init__(self, nlabel, mindepth):
super(REG2D, self).__init__()
self.nlabel = nlabel
self.mindepth = cfg.MIN_DEPTH
self.feature_extraction = feature_extraction()
dd = np.cumsum([128,128,96,64,32])
od = self.nlabel
self.conv0 = convbn(od, 128, kernel_size=3, stride=1)
self.conv1 = convbn(od+dd[0],128, kernel_size=3, stride=1)
self.conv2 = convbn(od+dd[1],96, kernel_size=3, stride=1)
self.conv3 = convbn(od+dd[2],64, kernel_size=3, stride=1)
self.conv4 = convbn(od+dd[3],32, kernel_size=3, stride=1)
self.predict = nn.Conv2d(od+dd[4],1,kernel_size=3,stride=1,padding=1,bias=True)
self.context = nn.Sequential(
convbn(135, 128, kernel_size=3, padding=1, dilation=1),
convbn(128, 128, kernel_size=3, padding=1, dilation=1),
convbn(128, 128, kernel_size=3, padding=1, dilation=1),
convbn(128, 128, kernel_size=3, padding=2, dilation=2),
convbn(128, 128, kernel_size=3, padding=4, dilation=4),
convbn(128, 96 , kernel_size=3, padding=8, dilation=8),
convbn(96, 64 , kernel_size=3, padding=16, dilation=16),
convbn(64, 32 , kernel_size=3, padding=1, dilation=1),
nn.Conv2d(32, 1 , kernel_size=3, stride=1, padding=1, bias=True))
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.Conv3d):
n = m.kernel_size[0] * m.kernel_size[1]*m.kernel_size[2] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
nn.init.xavier_uniform(m.weight.data)
if m.bias is not None:
m.bias.data.zero_()
def forward(self, ref, targets, pose, intrinsics, intrinsics_inv):
intrinsics4 = intrinsics.clone()
intrinsics_inv4 = intrinsics_inv.clone()
intrinsics4[:,:2,:] = intrinsics4[:,:2,:] / 4
intrinsics_inv4[:,:2,:2] = intrinsics_inv4[:,:2,:2] * 4
refimg_fea = self.feature_extraction(ref)
disp2depth = Variable(torch.ones(refimg_fea.size(0), refimg_fea.size(2), refimg_fea.size(3))).cuda() * self.mindepth * self.nlabel
batch_size = ref.size(0)
assert len(targets) == 1
for j, target in enumerate(targets):
targetimg_fea = self.feature_extraction(target)
cost = Variable(torch.FloatTensor(batch_size, int(self.nlabel), refimg_fea.size()[2], refimg_fea.size()[3]).zero_()).cuda()
for i in range(int(self.nlabel)):
depth = torch.div(disp2depth, i+1+1e-16)
targetimg_fea_t = inverse_warp(targetimg_fea, depth, pose[:,j], intrinsics4, intrinsics_inv4)
cost[:,i] = (refimg_fea*targetimg_fea_t).mean(dim=1)
cost = F.leaky_relu(cost, 0.1,inplace=True)
x = torch.cat((self.conv0(cost), cost),1)
x = torch.cat((self.conv1(x), x),1)
x = torch.cat((self.conv2(x), x),1)
x = torch.cat((self.conv3(x), x),1)
x = torch.cat((self.conv4(x), x),1)
depth_init = self.predict(x).squeeze(1)
pose_to_sample = pose[:,j]
scales = torch.from_numpy(np.arange(0.5, 1.6,0.1)).view(1,-1,1,1).cuda()
num_sampled_p = scales.shape[1]
sampled_poses = pose_to_sample.unsqueeze(1).expand(-1,num_sampled_p,-1,-1).contiguous()
sampled_poses[...,-1:] = sampled_poses[...,-1:]*scales
offset_num = 9
delta = (offset_num-1)/2
std = 0.5
cost = Variable(torch.FloatTensor(batch_size, offset_num*num_sampled_p, refimg_fea.size()[2], refimg_fea.size()[3]).zero_()).cuda()
temp_num = 0
for offset in range(offset_num):
depth_offset = (offset-delta)*std
depth_hypo = depth_init.detach()+depth_offset
for sampled_p in range(num_sampled_p):
targetimg_fea_t = inverse_warp(targetimg_fea, depth_hypo, sampled_poses[:,sampled_p], intrinsics4, intrinsics_inv4)
cost[:,temp_num] = (refimg_fea*targetimg_fea_t).mean(dim=1)
temp_num = temp_num+1
ref_down = F.interpolate(ref,scale_factor=(1/4), mode='bilinear',align_corners=True,recompute_scale_factor=True)
x = torch.cat((cost,refimg_fea,depth_init.unsqueeze(1).detach(),ref_down),dim=1)
depth_init = depth_init.unsqueeze(1)
depth = self.context(x) + depth_init.detach()
depth = F.interpolate(depth,scale_factor=4, mode='bilinear',align_corners=True,recompute_scale_factor=True)
depth_init = F.interpolate(depth_init,scale_factor=4, mode='bilinear',align_corners=True,recompute_scale_factor=True)
return depth_init, depth
| from __future__ import print_function
import torch
import torch.nn as nn
import torch.utils.data
from torch.autograd import Variable
import torch.nn.functional as F
import math
from models.submodule import *
from models.inverse_warp import inverse_warp
import pdb
from lib.config import cfg, cfg_from_file, save_config_to_file
import utils
def convtext(in_planes, out_planes, kernel_size = 3, stride = 1, dilation = 1):
if cfg.CONTEXT_BN:
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size = kernel_size, stride = stride, dilation = dilation, padding = ((kernel_size - 1) * dilation) // 2, bias=False),
nn.BatchNorm2d(out_planes),
nn.ReLU(inplace=True))
else:
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size = kernel_size, stride = stride, dilation = dilation, padding = ((kernel_size - 1) * dilation) // 2, bias=False),
nn.ReLU(inplace=True))
def convbn(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1):
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, bias=True),
nn.BatchNorm2d(out_planes),
nn.LeakyReLU(0.1,inplace=True))
class REG2D(nn.Module):
def __init__(self, nlabel, mindepth):
super(REG2D, self).__init__()
self.nlabel = nlabel
self.mindepth = cfg.MIN_DEPTH
self.feature_extraction = feature_extraction()
dd = np.cumsum([128,128,96,64,32])
od = self.nlabel
self.conv0 = convbn(od, 128, kernel_size=3, stride=1)
self.conv1 = convbn(od+dd[0],128, kernel_size=3, stride=1)
self.conv2 = convbn(od+dd[1],96, kernel_size=3, stride=1)
self.conv3 = convbn(od+dd[2],64, kernel_size=3, stride=1)
self.conv4 = convbn(od+dd[3],32, kernel_size=3, stride=1)
self.predict = nn.Conv2d(od+dd[4],1,kernel_size=3,stride=1,padding=1,bias=True)
self.context = nn.Sequential(
convbn(135, 128, kernel_size=3, padding=1, dilation=1),
convbn(128, 128, kernel_size=3, padding=1, dilation=1),
convbn(128, 128, kernel_size=3, padding=1, dilation=1),
convbn(128, 128, kernel_size=3, padding=2, dilation=2),
convbn(128, 128, kernel_size=3, padding=4, dilation=4),
convbn(128, 96 , kernel_size=3, padding=8, dilation=8),
convbn(96, 64 , kernel_size=3, padding=16, dilation=16),
convbn(64, 32 , kernel_size=3, padding=1, dilation=1),
nn.Conv2d(32, 1 , kernel_size=3, stride=1, padding=1, bias=True))
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.Conv3d):
n = m.kernel_size[0] * m.kernel_size[1]*m.kernel_size[2] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
nn.init.xavier_uniform(m.weight.data)
if m.bias is not None:
m.bias.data.zero_()
def forward(self, ref, targets, pose, intrinsics, intrinsics_inv):
intrinsics4 = intrinsics.clone()
intrinsics_inv4 = intrinsics_inv.clone()
intrinsics4[:,:2,:] = intrinsics4[:,:2,:] / 4
intrinsics_inv4[:,:2,:2] = intrinsics_inv4[:,:2,:2] * 4
refimg_fea = self.feature_extraction(ref)
disp2depth = Variable(torch.ones(refimg_fea.size(0), refimg_fea.size(2), refimg_fea.size(3))).cuda() * self.mindepth * self.nlabel
batch_size = ref.size(0)
assert len(targets) == 1
for j, target in enumerate(targets):
targetimg_fea = self.feature_extraction(target)
cost = Variable(torch.FloatTensor(batch_size, int(self.nlabel), refimg_fea.size()[2], refimg_fea.size()[3]).zero_()).cuda()
for i in range(int(self.nlabel)):
depth = torch.div(disp2depth, i+1+1e-16)
targetimg_fea_t = inverse_warp(targetimg_fea, depth, pose[:,j], intrinsics4, intrinsics_inv4)
cost[:,i] = (refimg_fea*targetimg_fea_t).mean(dim=1)
cost = F.leaky_relu(cost, 0.1,inplace=True)
x = torch.cat((self.conv0(cost), cost),1)
x = torch.cat((self.conv1(x), x),1)
x = torch.cat((self.conv2(x), x),1)
x = torch.cat((self.conv3(x), x),1)
x = torch.cat((self.conv4(x), x),1)
depth_init = self.predict(x).squeeze(1)
pose_to_sample = pose[:,j]
scales = torch.from_numpy(np.arange(0.5, 1.6,0.1)).view(1,-1,1,1).cuda()
num_sampled_p = scales.shape[1]
sampled_poses = pose_to_sample.unsqueeze(1).expand(-1,num_sampled_p,-1,-1).contiguous()
sampled_poses[...,-1:] = sampled_poses[...,-1:]*scales
offset_num = 9
delta = (offset_num-1)/2
std = 0.5
cost = Variable(torch.FloatTensor(batch_size, offset_num*num_sampled_p, refimg_fea.size()[2], refimg_fea.size()[3]).zero_()).cuda()
temp_num = 0
for offset in range(offset_num):
depth_offset = (offset-delta)*std
depth_hypo = depth_init.detach()+depth_offset
for sampled_p in range(num_sampled_p):
targetimg_fea_t = inverse_warp(targetimg_fea, depth_hypo, sampled_poses[:,sampled_p], intrinsics4, intrinsics_inv4)
cost[:,temp_num] = (refimg_fea*targetimg_fea_t).mean(dim=1)
temp_num = temp_num+1
ref_down = F.interpolate(ref,scale_factor=(1/4), mode='bilinear',align_corners=True,recompute_scale_factor=True)
x = torch.cat((cost,refimg_fea,depth_init.unsqueeze(1).detach(),ref_down),dim=1)
depth_init = depth_init.unsqueeze(1)
depth = self.context(x) + depth_init.detach()
depth = F.interpolate(depth,scale_factor=4, mode='bilinear',align_corners=True,recompute_scale_factor=True)
depth_init = F.interpolate(depth_init,scale_factor=4, mode='bilinear',align_corners=True,recompute_scale_factor=True)
return depth_init, depth
| none | 1 | 2.244777 | 2 | |
examples.py | bryanabsmith/libopenrsdata | 0 | 6613047 | <gh_stars>0
#!/usr/bin/env python
import libopenrsdata
ex = libopenrsdata.LibOpenRSData
#print("Number of schools: {0}".format(ex.get_number_of_schools()))
#print("Valid jurisdictions:")
#for juris in ex.get_valid_juris():
# print(" :: {0}".format(juris))
#print("Number of schools in Ontario: {0}".format(ex.get_number_of_schools_by_juris("ON")))
#print(ex.get_all_school_data()[0]["lng"])
#print(ex.get_school_by_id(53))
print(ex.get_schools_by_year_range(1900, 1950))
| #!/usr/bin/env python
import libopenrsdata
ex = libopenrsdata.LibOpenRSData
#print("Number of schools: {0}".format(ex.get_number_of_schools()))
#print("Valid jurisdictions:")
#for juris in ex.get_valid_juris():
# print(" :: {0}".format(juris))
#print("Number of schools in Ontario: {0}".format(ex.get_number_of_schools_by_juris("ON")))
#print(ex.get_all_school_data()[0]["lng"])
#print(ex.get_school_by_id(53))
print(ex.get_schools_by_year_range(1900, 1950)) | en | 0.572128 | #!/usr/bin/env python #print("Number of schools: {0}".format(ex.get_number_of_schools())) #print("Valid jurisdictions:") #for juris in ex.get_valid_juris(): # print(" :: {0}".format(juris)) #print("Number of schools in Ontario: {0}".format(ex.get_number_of_schools_by_juris("ON"))) #print(ex.get_all_school_data()[0]["lng"]) #print(ex.get_school_by_id(53)) | 2.60038 | 3 |
setup.py | furukazu/deep_running | 72 | 6613048 | <filename>setup.py
from setuptools import setup, Extension
from setuptools import find_packages
from os import listdir
with open("README.md") as f:
long_description = f.read()
scripts = ["scripts/"+i for i in listdir("scripts")]
if __name__ == "__main__":
setup(
name="deep_running",
scripts=scripts,
version="0.0.1",
description="Japanese Joke.",
long_description=long_description,
long_description_content_type="text/markdown",
author="karaage0703",
url="http://karaage.hatenadiary.jp",
license="MIT License",
packages=find_packages(),
python_requires=">3.6",
)
| <filename>setup.py
from setuptools import setup, Extension
from setuptools import find_packages
from os import listdir
with open("README.md") as f:
long_description = f.read()
scripts = ["scripts/"+i for i in listdir("scripts")]
if __name__ == "__main__":
setup(
name="deep_running",
scripts=scripts,
version="0.0.1",
description="Japanese Joke.",
long_description=long_description,
long_description_content_type="text/markdown",
author="karaage0703",
url="http://karaage.hatenadiary.jp",
license="MIT License",
packages=find_packages(),
python_requires=">3.6",
)
| none | 1 | 1.654476 | 2 | |
python/python-vba-powerpoint/Win32COM - PowerPoint To Excel - Chart Objects.py | josephobonyo/sigma_coding_youtube | 893 | 6613049 | import win32com.client
from win32com.client import constants as c
# Grab the Active Instance of Excel.
ExcelApp = win32com.client.GetActiveObject("Excel.Application")
# Grab the workbook with the charts.
xlWorkbook = ExcelApp.Workbooks("ChartObjects.xlsm")
# Create a new instance of PowerPoint and make sure it's visible.
PPTApp = win32com.client.gencache.EnsureDispatch("PowerPoint.Application")
PPTApp.Visible = True
# Add a presentation to the PowerPoint Application, returns a Presentation Object.
PPTPresentation = PPTApp.Presentations.Add()
# Loop through each Worksheet.
for xlWorksheet in xlWorkbook.Worksheets:
# Grab the ChartObjects Collection for each sheet.
xlCharts = xlWorksheet.ChartObjects()
# Loop through each Chart in the ChartObjects Collection.
for index, xlChart in enumerate(xlCharts):
# Each chart needs to be on it's own slide, so at this point create a new slide.
PPTSlide = PPTPresentation.Slides.Add(Index = index + 1, Layout = 12) # 12 is a blank layout
# Display something to the user.
print('Exporting Chart {} from Worksheet {}'.format(xlChart.Name, xlWorksheet.Name))
# Copy the chart.
xlChart.Copy()
# Paste the Object to the Slide
PPTSlide.Shapes.PasteSpecial(DataType = 1)
# Save the presentation.
PPTPresentation.SaveAs(r"FILE_PATH")
| import win32com.client
from win32com.client import constants as c
# Grab the Active Instance of Excel.
ExcelApp = win32com.client.GetActiveObject("Excel.Application")
# Grab the workbook with the charts.
xlWorkbook = ExcelApp.Workbooks("ChartObjects.xlsm")
# Create a new instance of PowerPoint and make sure it's visible.
PPTApp = win32com.client.gencache.EnsureDispatch("PowerPoint.Application")
PPTApp.Visible = True
# Add a presentation to the PowerPoint Application, returns a Presentation Object.
PPTPresentation = PPTApp.Presentations.Add()
# Loop through each Worksheet.
for xlWorksheet in xlWorkbook.Worksheets:
# Grab the ChartObjects Collection for each sheet.
xlCharts = xlWorksheet.ChartObjects()
# Loop through each Chart in the ChartObjects Collection.
for index, xlChart in enumerate(xlCharts):
# Each chart needs to be on it's own slide, so at this point create a new slide.
PPTSlide = PPTPresentation.Slides.Add(Index = index + 1, Layout = 12) # 12 is a blank layout
# Display something to the user.
print('Exporting Chart {} from Worksheet {}'.format(xlChart.Name, xlWorksheet.Name))
# Copy the chart.
xlChart.Copy()
# Paste the Object to the Slide
PPTSlide.Shapes.PasteSpecial(DataType = 1)
# Save the presentation.
PPTPresentation.SaveAs(r"FILE_PATH")
| en | 0.822325 | # Grab the Active Instance of Excel. # Grab the workbook with the charts. # Create a new instance of PowerPoint and make sure it's visible. # Add a presentation to the PowerPoint Application, returns a Presentation Object. # Loop through each Worksheet. # Grab the ChartObjects Collection for each sheet. # Loop through each Chart in the ChartObjects Collection. # Each chart needs to be on it's own slide, so at this point create a new slide. # 12 is a blank layout # Display something to the user. # Copy the chart. # Paste the Object to the Slide # Save the presentation. | 3.124607 | 3 |
setup.py | julianfl0w/spiral_lens | 0 | 6613050 | #!/usr/bin/env python
from setuptools import setup, find_packages
setup(name='vulkanese',
version='1.0',
description='An abstraction of Vulkan',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/julianfl0w/vulkanese',
packages=find_packages(),
package_data={
# everything
"": ["*"]
},
include_package_data=True
) | #!/usr/bin/env python
from setuptools import setup, find_packages
setup(name='vulkanese',
version='1.0',
description='An abstraction of Vulkan',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/julianfl0w/vulkanese',
packages=find_packages(),
package_data={
# everything
"": ["*"]
},
include_package_data=True
) | en | 0.225893 | #!/usr/bin/env python # everything | 1.252898 | 1 |