text
stringlengths 4
1.02M
| meta
dict |
|---|---|
import subprocess
from os import devnull
from os.path import join
# from os import mkdir
import sys
#from re import sub
from sympy import Symbol, sympify, nsimplify, fraction, S
from sympy.matrices import Matrix, diag, NonSquareMatrixError
from .ccobjects import CCBase, CCoef
from ...utils.misc import DotDict
from ...utils.misc import formatter_factory
from ...utils import ConfigReader
from ...utils.misc import ec_list, flux_list, ss_species_list
## Everything in this file can be a function rather than a static method
## better yet, almost everything can be part of symca. Finally everything can
## be in a single file.
all = ['SymcaToolBox']
class SymcaToolBox(object):
"""The class with the functions used to populate SymcaData. The project is
structured in this way to abstract the 'work' needed to build the various
matrices away from the SymcaData class."""
@staticmethod
def get_nmatrix(mod):
"""
Returns a sympy matrix made from the N matrix in a Pysces model where
the elements are in the same order as they appear in the k and l
matrices in pysces.
We need this to make calculations easier later on.
"""
nmatrix = mod.nmatrix
# swap columns around to same order as kmatrix, store in new matrix
nmatrix_cols = nmatrix[:, mod.kmatrix_row]
# swap rows around to same oder as lmatrix, store in a new matrix
nmatrix_cols_rows = nmatrix_cols[mod.lmatrix_row, :]
# create Sympy symbolic matrix from the numpy ndarray
nmat = Matrix(nmatrix_cols_rows)
return nmat
@staticmethod
def get_num_ind_species(mod):
inds = len(mod.lmatrix_col)
return inds
@staticmethod
def get_num_ind_fluxes(mod):
inds = len(mod.kmatrix_col)
return inds
@staticmethod
def get_species_vector(mod):
"""
Returns a vector (sympy matrix) with the species in the correct order
"""
slist = []
# gets the order of the species from the lmatrix rows
for index in mod.lmatrix_row:
slist.append(mod.species[index])
svector = Matrix(sympify(slist))
#inds = len(mod.lmatrix_col)
#Sind = Matrix(svector[:inds])
#Sdep = Matrix(svector[inds:])
return svector
@staticmethod
def get_fluxes_vector(mod):
"""
Gets the dependent and independent fluxes (in the correct order)
"""
jlist = []
# gets the order of the fluxes from the kmatrix rows
for index in mod.kmatrix_row:
jlist.append('J_' + mod.reactions[index])
jvector = Matrix(sympify(jlist))
#inds = len(mod.kmatrix_col)
#Jind = Matrix(jvector[:inds])
#Jdep = Matrix(jvector[inds:])
return jvector
@staticmethod
def substitute_fluxes(all_fluxes, kmatrix):
"""
Substitutes equivalent fluxes in the kmatrix (e.i. dependent fluxes
with independent fluxes or otherwise equal fluxes)
"""
new_fluxes = all_fluxes[:, :]
for row in xrange(kmatrix.rows - 1, -1, -1):
for row_above in xrange(row - 1, -1, -1):
if kmatrix[row, :] == kmatrix[row_above, :]:
new_fluxes[row] = new_fluxes[row_above]
return new_fluxes
@staticmethod
def scale_matrix(all_elements, mat, inds):
"""
Scales the k or l matrix.
The procedure is the same for each matrix:
(D^x)^(-1) * y * D^(x_i)
Inverse diagonal The matrix to be The diagonal of
of the x where scaled. i.e. the the independent x
x is either the k or l matrix where x is the
species or the species or the
fluxes fluxes
"""
d_all_inv = diag(*all_elements).inv()
d_inds = diag(*inds)
scaled_matrix = d_all_inv * mat * d_inds
return scaled_matrix
@staticmethod
def get_es_matrix(mod, nmatrix, fluxes, species):
"""
Gets the esmatrix.
Goes down the columns of the nmatrix (which holds the fluxes)
to get the rows of the esmatrix.
Nested loop goes down the rows of the nmatrix (which holds the species)
to get the columns of the esmatrix
so the format is
ecReationN0_M0 ecReationN0_M1 ecReationN0_M2
ecReationN1_M0 ecReationN1_M1 ecReationN1_M2
ecReationN2_M0 ecReationN2_M1 ecReationN2_M2
"""
nmat = nmatrix
elas = []
for col in range(nmat.cols):
current_reaction = fluxes[col]
elas_row = []
for row in range(nmat.rows):
current_species = species[row]
ec_name = 'ec' + \
str(current_reaction)[2:] + '_' + str(
current_species)
cond1 = getattr(mod, ec_name) != 0
if cond1:
elas_row.append(ec_name)
else:
elas_row.append(0)
elas.append(elas_row)
esmatrix = Matrix(elas)
return esmatrix
@staticmethod
def get_es_matrix_no_mca(mod, nmatrix, fluxes, species):
"""
Gets the esmatrix.
Goes down the columns of the nmatrix (which holds the fluxes)
to get the rows of the esmatrix.
Nested loop goes down the rows of the nmatrix (which holds the species)
to get the columns of the esmatrix
so the format is
ecReationN0_M0 ecReationN0_M1 ecReationN0_M2
ecReationN1_M0 ecReationN1_M1 ecReationN1_M2
ecReationN2_M0 ecReationN2_M1 ecReationN2_M2
"""
nmat = nmatrix
elas = []
modifiers = dict(mod.__modifiers__)
for col in range(nmat.cols):
current_reaction = fluxes[col]
elas_row = []
for row in range(nmat.rows):
current_species = species[row]
ec_name = 'ec' + \
str(current_reaction)[2:] + '_' + str(
current_species)
cond1 = nmat[row,col] != 0
cond2 = str(current_species) in modifiers[str(current_reaction)[2:]]
if cond1 or cond2:
elas_row.append(ec_name)
else:
elas_row.append(0)
elas.append(elas_row)
esmatrix = Matrix(elas)
return esmatrix
@staticmethod
def simplify_matrix(matrix):
"""
Replaces floats with ints and puts elements with fractions
on a single demoninator.
"""
m = matrix[:, :]
for i, e in enumerate(m):
m[i] = nsimplify(e, rational=True).cancel()
return m
@staticmethod
def adjugate_matrix(matrix):
"""
Returns the adjugate matrix which is the transpose of the
cofactor matrix.
Contains code adapted from sympy.
Specifically:
cofactorMatrix()
minorEntry()
minorMatrix()
cofactor()
"""
def cofactor_matrix(mat):
out = Matrix(mat.rows, mat.cols, lambda i, j:
cofactor(mat, i, j))
return out
def minor_entry(mat, i, j):
if not 0 <= i < mat.rows or not 0 <= j < mat.cols:
raise ValueError(
"`i` and `j` must satisfy 0 <= i < `mat.rows` " +
"(%d)" % mat.rows + "and 0 <= j < `mat.cols` (%d)." % mat.cols)
return SymcaToolBox.det_bareis(minor_matrix(mat, i, j))
def minor_matrix(mat, i, j):
if not 0 <= i < mat.rows or not 0 <= j < mat.cols:
raise ValueError(
"`i` and `j` must satisfy 0 <= i < `mat.rows` " +
"(%d)" % mat.rows + "and 0 <= j < `mat.cols` (%d)." % mat.cols)
m = mat.as_mutable()
m.row_del(i)
m.col_del(j)
return m[:, :]
def cofactor(mat, i, j):
if (i + j) % 2 == 0:
return minor_entry(mat, i, j)
else:
return -1 * minor_entry(mat, i, j)
return cofactor_matrix(matrix).transpose()
@staticmethod
def det_bareis(matrix):
"""
Adapted from original det_bareis function in Sympy 0.7.3.
cancel() and expand() are removed from function to speed
up calculations. Maxima will be used to simplify the result
Original docstring below:
Compute matrix determinant using Bareis' fraction-free
algorithm which is an extension of the well known Gaussian
elimination method. This approach is best suited for dense
symbolic matrices and will result in a determinant with
minimal number of fractions. It means that less term
rewriting is needed on resulting formulae.
"""
mat = matrix
if not mat.is_square:
raise NonSquareMatrixError()
m, n = mat[:, :], mat.rows
if n == 1:
det = m[0, 0]
elif n == 2:
det = m[0, 0] * m[1, 1] - m[0, 1] * m[1, 0]
else:
sign = 1 # track current sign in case of column swap
for k in range(n - 1):
# look for a pivot in the current column
# and assume det == 0 if none is found
if m[k, k] == 0:
for i in range(k + 1, n):
if m[i, k] != 0:
m.row_swap(i, k)
sign *= -1
break
else:
return S.Zero
# proceed with Bareis' fraction-free (FF)
# form of Gaussian elimination algorithm
for i in range(k + 1, n):
for j in range(k + 1, n):
d = m[k, k] * m[i, j] - m[i, k] * m[k, j]
if k > 0:
d /= m[k - 1, k - 1]
m[i, j] = d
det = sign * m[n - 1, n - 1]
return det
@staticmethod
def invert(matrix, path_to):
"""
Returns the numerators of the inverted martix separately from the
common denominator (the determinant of the matrix)
"""
common_denom = SymcaToolBox.det_bareis(matrix)
adjugate = SymcaToolBox.adjugate_matrix(matrix)
common_denom = SymcaToolBox.maxima_factor(common_denom, path_to)
#adjugate = self._maxima_factor('/home/carl/test.txt',adjugate)
cc_i_sol = adjugate, common_denom
return cc_i_sol
@staticmethod
def maxima_factor(expression, path_to):
"""
This function is equivalent to the sympy.cancel()
function but uses maxima instead
"""
maxima_in_file = join(path_to,'in.txt').replace('\\','\\\\')
maxima_out_file = join(path_to,'out.txt').replace('\\','\\\\')
if expression.is_Matrix:
expr_mat = expression[:, :]
# print expr_mat
print 'Simplifying matrix with ' + str(len(expr_mat)) + ' elements'
for i, e in enumerate(expr_mat):
sys.stdout.write('*')
sys.stdout.flush()
if (i + 1) % 50 == 0:
sys.stdout.write(' ' + str(i + 1) + '\n')
sys.stdout.flush()
# print e
expr_mat[i] = SymcaToolBox.maxima_factor(e, path_to)
sys.stdout.write('\n')
sys.stdout.flush()
return expr_mat
else:
batch_string = (
'stardisp:true;stringout("'
+ maxima_out_file + '",factor(' + str(expression) + '));')
# print batch_string
with open(maxima_in_file, 'w') as f:
f.write(batch_string)
config = ConfigReader.get_config()
if config['platform'] == 'win32':
maxima_command = [config['maxima_path'], '--batch=' + maxima_in_file]
else:
maxima_command = ['maxima', '--batch=' + maxima_in_file]
dn = open(devnull, 'w')
subprocess.call(maxima_command, stdin=dn, stdout=dn, stderr=dn)
simplified_expression = ''
with open(maxima_out_file) as f:
for line in f:
if line != '\n':
simplified_expression = line[:-2]
frac = fraction(sympify(simplified_expression))
# print frac[0].expand()/frac[1].expand()
return frac[0].expand() / frac[1].expand()
@staticmethod
def solve_dep(cc_i_num, scaledk0, scaledl0, num_ind_fluxes, path_to):
"""
Calculates the dependent control matrices from the independent control
matrix CC_i_solution
"""
j_cci_sol = cc_i_num[:num_ind_fluxes, :]
s_cci_sol = cc_i_num[num_ind_fluxes:, :]
j_ccd_sol = scaledk0 * j_cci_sol
s_ccd_sol = scaledl0 * s_cci_sol
tempmatrix = j_cci_sol
for matrix in [j_ccd_sol, s_cci_sol, s_ccd_sol]:
if len(matrix) != 0:
tempmatrix = tempmatrix.col_join(matrix)
cc_sol = tempmatrix
cc_sol = SymcaToolBox.maxima_factor(cc_sol, path_to)
# print len(j_cci_sol)
# print len(j_ccd_sol)
# print len(s_cci_sol)
# print len(s_ccd_sol)
return cc_sol
@staticmethod
def build_cc_matrix(j, jind, sind, jdep, sdep):
"""
Produces the matrices j_cci, j_ccd, s_cci and s_ccd
which holds the symbols for the independent and dependent flux control
coefficients and the independent and dependent species control
coefficients respectively
"""
j_cci = []
j_ccd = []
s_cci = []
s_ccd = []
for Ji in jind:
row = []
for R in j:
row.append('ccJ' + str(Ji)[2:] + '_' + str(R)[2:])
j_cci.append(row)
for Si in sind:
row = []
for R in j:
row.append('cc' + str(Si) + '_' + str(R)[2:])
s_cci.append(row)
for Jd in jdep:
row = []
for R in j:
row.append('ccJ' + str(Jd)[2:] + '_' + str(R)[2:])
j_ccd.append(row)
for Sd in sdep:
row = []
for R in j:
row.append('cc' + str(Sd) + '_' + str(R)[2:])
s_ccd.append(row)
j_cci = Matrix(j_cci)
j_ccd = Matrix(j_ccd)
s_cci = Matrix(s_cci)
s_ccd = Matrix(s_ccd)
#cc_i = j_cci.col_join(s_cci)
tempmatrix = j_cci
for matrix in [j_ccd, s_cci, s_ccd]:
if len(matrix) != 0:
tempmatrix = tempmatrix.col_join(matrix)
cc = tempmatrix
# print len(j_cci)
# print len(j_ccd)
# print len(s_cci)
# print len(s_ccd)
return cc
@staticmethod
def get_fix_denom(lmatrix, species_independent, species_dependent):
num_inds = len(species_independent)
num_deps = len(species_dependent)
if num_deps == 0:
return sympify('1')
else:
dependent_ls = lmatrix[num_inds:, :]
denom = sympify('1')
for row in range(dependent_ls.rows):
for each in dependent_ls[row, :] * species_independent * -1:
symbol_atoms = each.atoms(Symbol)
for symbol_atom in symbol_atoms:
if symbol_atom not in denom.atoms(Symbol):
denom = denom * symbol_atom
#denom = denom * each.atoms(Symbol).pop()
denom = denom * species_dependent[row]
return denom.nsimplify()
def get_fix_denom_jannie(lmatrix, species_independent, species_dependent):
num_inds = len(species_independent)
num_deps = len(species_dependent)
if num_deps == 0:
return sympify('1')
else:
dependent_ls = lmatrix[num_inds:, :]
denom = sympify('1')
for row in range(dependent_ls.rows):
den_new = sympify('1')
for each in dependent_ls[row, :] * species_independent * -1:
symbol_atoms = each.atoms(Symbol)
for symbol_atom in symbol_atoms:
if den_new == 1:
den_new = den_new * symbol_atom
else:
den_new = den_new + symbol_atom
#denom = denom * each.atoms(Symbol).pop()
if den_new == 1:
den_new = den_new * species_dependent[row]
else:
den_new = den_new + species_dependent[row]
denom = denom * den_new
return denom.nsimplify()
@staticmethod
def fix_expressions(cc_num, common_denom_expr, lmatrix,
species_independent, species_dependent):
fix_denom = SymcaToolBox.get_fix_denom(
lmatrix,
species_independent,
species_dependent
)
fix = False
cd_num, cd_denom = fraction(common_denom_expr)
ret2 = cd_num
if type(cc_num) is list:
new_cc_num = cc_num[:]
else:
new_cc_num = cc_num[:, :]
for i, each in enumerate(new_cc_num):
new_cc_num[i] = ((each * cd_denom)).expand()
for each in new_cc_num:
for symb in fix_denom.atoms(Symbol):
if symb in each.atoms(Symbol):
fix = True
break
if fix: break
if fix:
for i, each in enumerate(new_cc_num):
new_cc_num[i] = (each / fix_denom).expand()
ret2 = (cd_num / fix_denom).expand()
return new_cc_num, ret2
@staticmethod
def spawn_cc_objects(mod, cc_names, cc_sol, common_denom_exp, ltxe):
common_denom_object = CCBase(mod,
'common_denominator',
common_denom_exp,
ltxe)
cc_object_list = [common_denom_object]
for name, num in zip(cc_names, cc_sol):
ccoef_object = CCoef(mod,
str(name),
num,
common_denom_object,
ltxe)
cc_object_list.append(ccoef_object)
return cc_object_list
@staticmethod
def make_internals_dict(cc_sol, cc_names, common_denom_expr, path_to):
simpl_dic = {}
for i, each in enumerate(cc_sol):
expr = each / common_denom_expr
expr = SymcaToolBox.maxima_factor(expr, path_to)
num, denom = fraction(expr)
if not simpl_dic.has_key(denom):
simpl_dic[denom] = [[], []]
simpl_dic[denom][0].append(cc_names[i])
simpl_dic[denom][1].append(num)
return simpl_dic
@staticmethod
def make_CC_dot_dict(cc_objects):
CC = DotDict()
for cc in cc_objects:
CC[cc.name] = cc
CC._make_repr('"$" + v.latex_name + "$"', 'v.value',
formatter_factory())
return CC
@staticmethod
def build_inner_dict(cc_object):
deep_dict = {}
for key, value in cc_object.iteritems():
if key != 'common_denominator':
deepest_dict = {str(key): str(value.numerator)}
deep_dict.update(deepest_dict)
inner_dict = {str(cc_object.common_denominator.expression): deep_dict}
return inner_dict
@staticmethod
def build_outer_dict(symca_object):
containers = {}
containers['cc_results'] = SymcaToolBox.build_inner_dict(
getattr(symca_object, 'cc_results'))
counter = 0
while True:
CC_obj_name = 'cc_results_{0}'.format(counter)
try:
CC_obj_dict = getattr(symca_object, CC_obj_name)
except AttributeError:
break
containers[CC_obj_name] = SymcaToolBox.build_inner_dict(
CC_obj_dict)
counter += 1
return containers
@staticmethod
def make_inner_dict(cc_container, cc_container_name):
CC_dict = {}
CC_dict[cc_container_name] = dict(zip(
[cc.name for cc in cc_container.values() if
cc.name is not 'common_denominator'],
[cc.numerator for cc in cc_container.values() if
cc.name is not 'common_denominator']))
CC_dict[cc_container_name]['common_denominator'] = cc_container.common_denominator.expression
return CC_dict
@staticmethod
def generic_populate(mod, function, value = 1):
names = function(mod)
for name in names:
setattr(mod, name, value)
@staticmethod
def populate_with_fake_elasticities(mod):
SymcaToolBox.generic_populate(mod, ec_list)
@staticmethod
def populate_with_fake_fluxes(mod):
SymcaToolBox.generic_populate(mod, flux_list)
@staticmethod
def populate_with_fake_ss_concentrations(mod):
SymcaToolBox.generic_populate(mod, ss_species_list)
# OLD SAVE FUNCTIONS> Not as good as new ones
# @staticmethod
# def save_session(cc_list, common_denominator, path_to_pickle):
# mod = common_denominator.mod
# common_denominator.mod = ''
# for cc in cc_list:
# cc.mod = ''
# for cp in cc.control_patterns:
# cp.mod = ''
#
# cc_list.append(common_denominator)
# with open(path_to_pickle, 'w') as f:
# pickle.dump(cc_list, f)
#
# cc_list.pop()
# common_denominator.mod = mod
# for cc in cc_list:
# cc.mod = mod
# for cp in cc.control_patterns:
# cp.mod = mod
#
# @staticmethod
# def load_session(mod, path_to_pickle):
# with open(path_to_pickle) as f:
# cc_list = pickle.load_session(f)
#
# common_denominator = cc_list.pop()
#
# common_denominator.mod = mod
# for cc in cc_list:
# cc.mod = mod
# for cp in cc.control_patterns:
# cp.mod = mod
#
# cc_list.insert(0, common_denominator)
# return cc_list
|
{
"content_hash": "dd219e01f409f9d9ee64a3b8ffa2ca52",
"timestamp": "",
"source": "github",
"line_count": 684,
"max_line_length": 101,
"avg_line_length": 33.48684210526316,
"alnum_prop": 0.5166120934293822,
"repo_name": "exe0cdc/PyscesToolbox",
"id": "fded7f642d3354efaf91401f2bb98357b2c2774b",
"size": "22905",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "psctb/analyse/_symca/symca_toolbox.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "451196"
},
{
"name": "Papyrus",
"bytes": "3385"
},
{
"name": "Python",
"bytes": "337719"
}
],
"symlink_target": ""
}
|
"""
Associates products to each other for upselling purposes.
"""
from datetime import date
from django.conf import settings
from django.db import models
from django.utils.translation import get_language, gettext_lazy as _
from django.utils.translation import ugettext, ugettext_lazy as _
from satchmo import caching
from satchmo.caching.models import CachedObjectMixin
from satchmo.product.models import Product
import datetime
import logging
try:
from decimal import Decimal, getcontext
except:
from django.utils._decimal import Decimal, getcontext
log = logging.getLogger('upsell.models')
UPSELL_CHOICES=(
('CHECKBOX_1_FALSE', _('Checkbox to add 1')),
('CHECKBOX_1_TRUE', _('Checkbox to add 1, checked by default')),
('CHECKBOX_MATCH_FALSE', _('Checkbox to match quantity')),
('CHECKBOX_MATCH_TRUE', _('Checkbox to match quantity, checked by default')),
('FORM', _('Form with 0 quantity')),
)
class Upsell(models.Model, CachedObjectMixin):
target = models.ManyToManyField(Product, verbose_name=_('Target Product'),
related_name="upselltargets",
help_text = _("The products for which you want to show this goal product as an Upsell."))
goal = models.ForeignKey(Product, verbose_name=_('Goal Product'),
related_name="upsellgoals")
create_date = models.DateField(_("Creation Date"))
style = models.CharField(_("Upsell Style"), choices=UPSELL_CHOICES,
default='CHECKBOX_1_FALSE', max_length=20)
notes = models.TextField(_('Notes'), blank=True, null=True,
help_text = _("Internal notes"))
def _description(self):
"""Get the description, looking up by language code, falling back intelligently.
"""
language_code = get_language()
try:
trans = self.cache_get(trans=language_code)
except caching.NotCachedError, e:
trans = self._find_translation(language_code)
if trans:
return trans.description
else:
return ""
description = property(fget=_description)
def _find_translation(self, language_code):
c = self.translations.filter(languagecode__exact = language_code)
ct = c.count()
if not c or ct == 0:
pos = language_code.find('-')
if pos>-1:
short_code = language_code[:pos]
log.debug("%s: Trying to find root language content for: [%s]", self, short_code)
c = self.translations.filter(languagecode__exact = short_code)
ct = c.count()
if ct>0:
log.debug("%s: Found root language content for: [%s]", self, short_code)
if not c or ct == 0:
#log.debug("Trying to find default language content for: %s", self)
c = self.translations.filter(languagecode__istartswith = settings.LANGUAGE_CODE)
ct = c.count()
if not c or ct == 0:
#log.debug("Trying to find *any* language content for: %s", self)
c = self.translations.all()
ct = c.count()
if ct > 0:
trans = c[0]
else:
trans = None
self.cache_set(trans=language_code, value=trans)
return trans
def is_form(self):
"""Returns true if the style is a FORM"""
return self.style.startswith("FORM")
def is_qty_one(self):
"""Returns true if this style has a '1' in the center field"""
parts = self.style.split("_")
return parts[1] == '1'
def is_checked(self):
"""Returns true if this style ends with TRUE"""
return self.style.endswith('TRUE')
def __unicode__(self):
return u"Upsell for %s" % self.goal
def save(self, force_insert=False, force_update=False):
self.create_date = datetime.date.today()
self.cache_delete()
super(Upsell, self).save(force_insert=force_insert, force_update=force_update)
self.cache_set()
return self
class Meta:
ordering = ('goal',)
class UpsellTranslation(models.Model):
menu = models.ForeignKey(Upsell, related_name="translations")
languagecode = models.CharField(_('language'), max_length=10,
choices=settings.LANGUAGES, default=settings.LANGUAGES[0][0])
description = models.TextField(_('Description'), blank=True)
class Meta:
ordering=('languagecode', )
|
{
"content_hash": "cc8434ebb5ccc43532b842e35e4e264e",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 97,
"avg_line_length": 33.474074074074075,
"alnum_prop": 0.6085417127683116,
"repo_name": "sankroh/satchmo",
"id": "8c21fffb111c927cf6dd913b70abf3d93551b951",
"size": "4519",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "satchmo/upsell/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
from __future__ import absolute_import
import ujson
from typing import Any, Mapping, List
from six import string_types
from zerver.lib.emoji import emoji_name_to_emoji_code
from zerver.lib.request import JsonableError
from zerver.lib.test_helpers import tornado_redirected_to_list, get_display_recipient, \
get_test_image_file
from zerver.lib.test_classes import ZulipTestCase
from zerver.models import get_realm, RealmEmoji, Recipient, UserMessage
class ReactionEmojiTest(ZulipTestCase):
def test_missing_emoji(self):
# type: () -> None
"""
Sending reaction without emoji fails
"""
sender = self.example_email("hamlet")
result = self.client_put('/api/v1/messages/1/emoji_reactions/',
**self.api_auth(sender))
self.assertEqual(result.status_code, 400)
def test_add_invalid_emoji(self):
# type: () -> None
"""
Sending invalid emoji fails
"""
sender = self.example_email("hamlet")
result = self.client_put('/api/v1/messages/1/emoji_reactions/foo',
**self.api_auth(sender))
self.assert_json_error(result, "Emoji 'foo' does not exist")
def test_remove_invalid_emoji(self):
# type: () -> None
"""
Removing invalid emoji fails
"""
sender = self.example_email("hamlet")
result = self.client_delete('/api/v1/messages/1/emoji_reactions/foo',
**self.api_auth(sender))
self.assert_json_error(result, "Emoji 'foo' does not exist")
def test_add_deactivated_realm_emoji(self):
# type: () -> None
"""
Sending deactivated realm emoji fails.
"""
emoji = RealmEmoji.objects.get(name="green_tick")
emoji.deactivated = True
emoji.save(update_fields=['deactivated'])
sender = self.example_email("hamlet")
result = self.client_put('/api/v1/messages/1/emoji_reactions/green_tick',
**self.api_auth(sender))
self.assert_json_error(result, "Emoji 'green_tick' does not exist")
def test_valid_emoji(self):
# type: () -> None
"""
Reacting with valid emoji succeeds
"""
sender = self.example_email("hamlet")
result = self.client_put('/api/v1/messages/1/emoji_reactions/smile',
**self.api_auth(sender))
self.assert_json_success(result)
self.assertEqual(200, result.status_code)
def test_zulip_emoji(self):
# type: () -> None
"""
Reacting with zulip emoji succeeds
"""
sender = self.example_email("hamlet")
result = self.client_put('/api/v1/messages/1/emoji_reactions/zulip',
**self.api_auth(sender))
self.assert_json_success(result)
self.assertEqual(200, result.status_code)
def test_valid_emoji_react_historical(self):
# type: () -> None
"""
Reacting with valid emoji on a historical message succeeds
"""
stream_name = "Saxony"
self.subscribe(self.example_user("cordelia"), stream_name)
message_id = self.send_message(self.example_email("cordelia"), stream_name, Recipient.STREAM)
user_profile = self.example_user('hamlet')
sender = user_profile.email
# Verify that hamlet did not receive the message.
self.assertFalse(UserMessage.objects.filter(user_profile=user_profile,
message_id=message_id).exists())
# Have hamlet react to the message
result = self.client_put('/api/v1/messages/%s/emoji_reactions/smile' % (message_id,),
**self.api_auth(sender))
self.assert_json_success(result)
# Fetch the now-created UserMessage object to confirm it exists and is historical
user_message = UserMessage.objects.get(user_profile=user_profile, message_id=message_id)
self.assertTrue(user_message.flags.historical)
self.assertTrue(user_message.flags.read)
self.assertFalse(user_message.flags.starred)
def test_valid_realm_emoji(self):
# type: () -> None
"""
Reacting with valid realm emoji succeeds
"""
sender = self.example_email("hamlet")
emoji_name = 'green_tick'
result = self.client_put('/api/v1/messages/1/emoji_reactions/%s' % (emoji_name,),
**self.api_auth(sender))
self.assert_json_success(result)
def test_emoji_name_to_emoji_code(self):
# type: () -> None
"""
An emoji name is mapped canonically to emoji code.
"""
realm = get_realm('zulip')
# Test active realm emoji.
emoji_code, reaction_type = emoji_name_to_emoji_code(realm, 'green_tick')
self.assertEqual(emoji_code, 'green_tick')
self.assertEqual(reaction_type, 'realm_emoji')
# Test deactivated realm emoji.
emoji = RealmEmoji.objects.get(name="green_tick")
emoji.deactivated = True
emoji.save(update_fields=['deactivated'])
with self.assertRaises(JsonableError) as exc:
emoji_name_to_emoji_code(realm, 'green_tick')
self.assertEqual(str(exc.exception), "Emoji 'green_tick' does not exist")
# Test ':zulip:' emoji.
emoji_code, reaction_type = emoji_name_to_emoji_code(realm, 'zulip')
self.assertEqual(emoji_code, 'zulip')
self.assertEqual(reaction_type, 'zulip_extra_emoji')
# Test unicode emoji.
emoji_code, reaction_type = emoji_name_to_emoji_code(realm, 'astonished')
self.assertEqual(emoji_code, '1f632')
self.assertEqual(reaction_type, 'unicode_emoji')
# Test override unicode emoji.
overriding_emoji = RealmEmoji.objects.create(
name='astonished', realm=realm, file_name='astonished')
emoji_code, reaction_type = emoji_name_to_emoji_code(realm, 'astonished')
self.assertEqual(emoji_code, 'astonished')
self.assertEqual(reaction_type, 'realm_emoji')
# Test deactivate over-ridding realm emoji.
overriding_emoji.deactivated = True
overriding_emoji.save(update_fields=['deactivated'])
emoji_code, reaction_type = emoji_name_to_emoji_code(realm, 'astonished')
self.assertEqual(emoji_code, '1f632')
self.assertEqual(reaction_type, 'unicode_emoji')
# Test override `:zulip:` emoji.
overriding_emoji = RealmEmoji.objects.create(
name='zulip', realm=realm, file_name='zulip')
emoji_code, reaction_type = emoji_name_to_emoji_code(realm, 'zulip')
self.assertEqual(emoji_code, 'zulip')
self.assertEqual(reaction_type, 'realm_emoji')
# Test non-existent emoji.
with self.assertRaises(JsonableError) as exc:
emoji_name_to_emoji_code(realm, 'invalid_emoji')
self.assertEqual(str(exc.exception), "Emoji 'invalid_emoji' does not exist")
class ReactionMessageIDTest(ZulipTestCase):
def test_missing_message_id(self):
# type: () -> None
"""
Reacting without a message_id fails
"""
sender = self.example_email("hamlet")
result = self.client_put('/api/v1/messages//emoji_reactions/smile',
**self.api_auth(sender))
self.assertEqual(result.status_code, 404)
def test_invalid_message_id(self):
# type: () -> None
"""
Reacting to an invalid message id fails
"""
sender = self.example_email("hamlet")
result = self.client_put('/api/v1/messages/-1/emoji_reactions/smile',
**self.api_auth(sender))
self.assertEqual(result.status_code, 404)
def test_inaccessible_message_id(self):
# type: () -> None
"""
Reacting to a inaccessible (for instance, private) message fails
"""
pm_sender = self.example_email("hamlet")
pm_recipient = self.example_email("othello")
reaction_sender = self.example_email("iago")
result = self.client_post("/api/v1/messages", {"type": "private",
"content": "Test message",
"to": pm_recipient},
**self.api_auth(pm_sender))
self.assert_json_success(result)
pm_id = result.json()['id']
result = self.client_put('/api/v1/messages/%s/emoji_reactions/smile' % (pm_id,),
**self.api_auth(reaction_sender))
self.assert_json_error(result, "Invalid message(s)")
class ReactionTest(ZulipTestCase):
def test_add_existing_reaction(self):
# type: () -> None
"""
Creating the same reaction twice fails
"""
pm_sender = self.example_email("hamlet")
pm_recipient = self.example_email("othello")
reaction_sender = pm_recipient
pm = self.client_post("/api/v1/messages", {"type": "private",
"content": "Test message",
"to": pm_recipient},
**self.api_auth(pm_sender))
self.assert_json_success(pm)
content = ujson.loads(pm.content)
pm_id = content['id']
first = self.client_put('/api/v1/messages/%s/emoji_reactions/smile' % (pm_id,),
**self.api_auth(reaction_sender))
self.assert_json_success(first)
second = self.client_put('/api/v1/messages/%s/emoji_reactions/smile' % (pm_id,),
**self.api_auth(reaction_sender))
self.assert_json_error(second, "Reaction already exists")
def test_remove_nonexisting_reaction(self):
# type: () -> None
"""
Removing a reaction twice fails
"""
pm_sender = self.example_email("hamlet")
pm_recipient = self.example_email("othello")
reaction_sender = pm_recipient
pm = self.client_post("/api/v1/messages", {"type": "private",
"content": "Test message",
"to": pm_recipient},
**self.api_auth(pm_sender))
self.assert_json_success(pm)
content = ujson.loads(pm.content)
pm_id = content['id']
add = self.client_put('/api/v1/messages/%s/emoji_reactions/smile' % (pm_id,),
**self.api_auth(reaction_sender))
self.assert_json_success(add)
first = self.client_delete('/api/v1/messages/%s/emoji_reactions/smile' % (pm_id,),
**self.api_auth(reaction_sender))
self.assert_json_success(first)
second = self.client_delete('/api/v1/messages/%s/emoji_reactions/smile' % (pm_id,),
**self.api_auth(reaction_sender))
self.assert_json_error(second, "Reaction does not exist")
class ReactionEventTest(ZulipTestCase):
def test_add_event(self):
# type: () -> None
"""
Recipients of the message receive the reaction event
and event contains relevant data
"""
pm_sender = self.example_user('hamlet')
pm_recipient = self.example_user('othello')
reaction_sender = pm_recipient
result = self.client_post("/api/v1/messages", {"type": "private",
"content": "Test message",
"to": pm_recipient.email},
**self.api_auth(pm_sender.email))
self.assert_json_success(result)
pm_id = result.json()['id']
expected_recipient_ids = set([pm_sender.id, pm_recipient.id])
events = [] # type: List[Mapping[str, Any]]
with tornado_redirected_to_list(events):
result = self.client_put('/api/v1/messages/%s/emoji_reactions/smile' % (pm_id,),
**self.api_auth(reaction_sender.email))
self.assert_json_success(result)
self.assertEqual(len(events), 1)
event = events[0]['event']
event_user_ids = set(events[0]['users'])
self.assertEqual(expected_recipient_ids, event_user_ids)
self.assertEqual(event['user']['email'], reaction_sender.email)
self.assertEqual(event['type'], 'reaction')
self.assertEqual(event['op'], 'add')
self.assertEqual(event['emoji_name'], 'smile')
self.assertEqual(event['message_id'], pm_id)
def test_remove_event(self):
# type: () -> None
"""
Recipients of the message receive the reaction event
and event contains relevant data
"""
pm_sender = self.example_user('hamlet')
pm_recipient = self.example_user('othello')
reaction_sender = pm_recipient
result = self.client_post("/api/v1/messages", {"type": "private",
"content": "Test message",
"to": pm_recipient.email},
**self.api_auth(pm_sender.email))
self.assert_json_success(result)
content = result.json()
pm_id = content['id']
expected_recipient_ids = set([pm_sender.id, pm_recipient.id])
add = self.client_put('/api/v1/messages/%s/emoji_reactions/smile' % (pm_id,),
**self.api_auth(reaction_sender.email))
self.assert_json_success(add)
events = [] # type: List[Mapping[str, Any]]
with tornado_redirected_to_list(events):
result = self.client_delete('/api/v1/messages/%s/emoji_reactions/smile' % (pm_id,),
**self.api_auth(reaction_sender.email))
self.assert_json_success(result)
self.assertEqual(len(events), 1)
event = events[0]['event']
event_user_ids = set(events[0]['users'])
self.assertEqual(expected_recipient_ids, event_user_ids)
self.assertEqual(event['user']['email'], reaction_sender.email)
self.assertEqual(event['type'], 'reaction')
self.assertEqual(event['op'], 'remove')
self.assertEqual(event['emoji_name'], 'smile')
self.assertEqual(event['message_id'], pm_id)
|
{
"content_hash": "71e091aeaaa4cd756278b9a66ddba1ff",
"timestamp": "",
"source": "github",
"line_count": 345,
"max_line_length": 101,
"avg_line_length": 42.35072463768116,
"alnum_prop": 0.5678598316337007,
"repo_name": "verma-varsha/zulip",
"id": "67a5dbfd6a7665edf59599af5b97d7c03eee6f8f",
"size": "14635",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zerver/tests/test_reactions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "426706"
},
{
"name": "Emacs Lisp",
"bytes": "158"
},
{
"name": "HTML",
"bytes": "489996"
},
{
"name": "JavaScript",
"bytes": "2151770"
},
{
"name": "Nginx",
"bytes": "1280"
},
{
"name": "Pascal",
"bytes": "1113"
},
{
"name": "Perl",
"bytes": "401825"
},
{
"name": "Puppet",
"bytes": "85239"
},
{
"name": "Python",
"bytes": "3780334"
},
{
"name": "Ruby",
"bytes": "249744"
},
{
"name": "Shell",
"bytes": "45134"
}
],
"symlink_target": ""
}
|
import datetime
from tempest.api.compute import base
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions as e
# Time that waits for until returning valid response
# TODO(takmatsu): Ideally this value would come from configuration.
VALID_WAIT = 30
class TenantUsagesTestJSON(base.BaseV2ComputeAdminTest):
@classmethod
def setup_clients(cls):
super(TenantUsagesTestJSON, cls).setup_clients()
cls.adm_client = cls.os_admin.tenant_usages_client
cls.client = cls.os_primary.tenant_usages_client
@classmethod
def resource_setup(cls):
super(TenantUsagesTestJSON, cls).resource_setup()
cls.tenant_id = cls.client.tenant_id
# Create a server in the demo tenant
cls.create_test_server(wait_until='ACTIVE')
now = datetime.datetime.now()
cls.start = cls._parse_strtime(now - datetime.timedelta(days=1))
cls.end = cls._parse_strtime(now + datetime.timedelta(days=1))
@classmethod
def _parse_strtime(cls, at):
# Returns formatted datetime
return at.strftime('%Y-%m-%dT%H:%M:%S.%f')
def call_until_valid(self, func, duration, *args, **kwargs):
# Call until get valid response for "duration"
# because tenant usage doesn't become available immediately
# after create VM.
def is_valid():
try:
self.resp = func(*args, **kwargs)
return True
except e.InvalidHTTPResponseBody:
return False
self.assertEqual(test_utils.call_until_true(is_valid, duration, 1),
True, "%s not return valid response in %s secs" % (
func.__name__, duration))
return self.resp
@decorators.idempotent_id('062c8ae9-9912-4249-8b51-e38d664e926e')
def test_list_usage_all_tenants(self):
# Get usage for all tenants
tenant_usage = self.call_until_valid(
self.adm_client.list_tenant_usages, VALID_WAIT,
start=self.start, end=self.end, detailed="1")['tenant_usages'][0]
self.assertEqual(len(tenant_usage), 8)
@decorators.idempotent_id('94135049-a4c5-4934-ad39-08fa7da4f22e')
def test_get_usage_tenant(self):
# Get usage for a specific tenant
tenant_usage = self.call_until_valid(
self.adm_client.show_tenant_usage, VALID_WAIT,
self.tenant_id, start=self.start, end=self.end)['tenant_usage']
self.assertEqual(len(tenant_usage), 8)
@decorators.idempotent_id('9d00a412-b40e-4fd9-8eba-97b496316116')
def test_get_usage_tenant_with_non_admin_user(self):
# Get usage for a specific tenant with non admin user
tenant_usage = self.call_until_valid(
self.client.show_tenant_usage, VALID_WAIT,
self.tenant_id, start=self.start, end=self.end)['tenant_usage']
self.assertEqual(len(tenant_usage), 8)
|
{
"content_hash": "062f5a74054fabbbb734346392067ae9",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 77,
"avg_line_length": 39.21052631578947,
"alnum_prop": 0.6473154362416107,
"repo_name": "Juniper/tempest",
"id": "d4c60b349ed62b3c6dd41c698bfc6a76c56c6fe8",
"size": "3611",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tempest/api/compute/admin/test_simple_tenant_usage.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4194970"
},
{
"name": "Shell",
"bytes": "19343"
}
],
"symlink_target": ""
}
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from collections import defaultdict
from .util import mapping_mixin
from .. import QtCore, QtGui
import logging
logger = logging.getLogger(__name__)
_defaults = {
"check_box_hover_text": "Enable this widget",
"check_box_state": True,
}
class UtilsWidget(QtGui.QWidget):
"""
`UtilsWidget` has a `QHBoxLayout`, an en/disable checkbox and a label
with hover text. Daughter classes of `UtilsWidget` just need to
add their respective input box to self._layout and set the layout with
:code:`to self.setLayout(self._layout)`
"""
def __init__(self, label_text, hover_text=None, has_check_box=False):
super(UtilsWidget, self).__init__()
# set the defaults
if hover_text is None:
hover_text = label_text
# make the label
self._lab = QtGui.QLabel(label_text)
# set the text to display on mouse cursor hover
self._lab.setToolTip(hover_text)
# make layout
self._layout = QtGui.QHBoxLayout()
# make the check box if it is needed
self._has_check_box = has_check_box
if self._has_check_box:
self._check_box = QtGui.QCheckBox()
self._check_box.setToolTip(_defaults["check_box_hover_text"])
self._check_box.setChecked(_defaults["check_box_state"])
# todo disable input when the check_box is not checked
self._layout.addWidget(self._check_box)
self._layout.addWidget(self._lab)
class Slider(UtilsWidget):
"""
Fancier version of a slider which includes a label and
a spinbox
"""
# export sub-set of slider signals
# this should be exhaustive eventually
valueChanged = QtCore.Signal(int)
rangeChanged = QtCore.Signal(int, int)
# todo make more things configurable
def __init__(self, label_text, min_v, max_v, tracking=True,
hover_text=None, has_check_box=False):
super(Slider, self).__init__(label_text=label_text,
hover_text=hover_text,
has_check_box=has_check_box)
# set up slider
self._slider = QtGui.QSlider(parent=self)
self._slider.setRange(min_v, max_v)
self._slider.setTracking(tracking)
self._slider.setSingleStep(1)
self._slider.setOrientation(QtCore.Qt.Horizontal)
# internal connections
self._slider.valueChanged.connect(self.valueChanged)
self._slider.rangeChanged.connect(self.rangeChanged)
# make buddy with label
self._label.setBuddy(self._slider)
# and its spin box
self._spinbox = QtGui.QSpinBox(parent=self)
self._spinbox.setRange(self._slider.minimum(), self._slider.maximum())
self._spinbox.valueChanged.connect(self._slider.setValue)
self._slider.valueChanged.connect(self._spinbox.setValue)
self._slider.rangeChanged.connect(self._spinbox.setRange)
# add widegts
self._layout.addWidget(self._slider)
self._layout.addWidget(self._spinbox)
self.setLayout(self._layout)
# TODO make sure all the slots are included
@QtCore.Slot(int)
def setValue(self, val):
# internal call backs will take care of the spinbox
self._slider.setValue(val)
class DateTimeBox(UtilsWidget):
dateChanged = QtCore.Signal(QtCore.QDate)
dateTimeChanged = QtCore.Signal(QtCore.QDateTime)
timeChanged = QtCore.Signal(QtCore.QTime)
# todo make more things configurable
def __init__(self, label_text, hover_text=None, has_check_box=False):
# pass up the stack
super(DateTimeBox, self).__init__(label_text=label_text,
hover_text=hover_text,
has_check_box=has_check_box)
# make the date time box
self._datetime = QtGui.QDateTimeEdit(QtCore.QDate.currentDate())
self._datetime.setCalendarPopup(True)
self._datetime.setDisplayFormat("yyyy-MM-dd hh:mm:ss")
# buddy them up
self._lab.setBuddy(self._datetime)
# add the datetime widget to the layout
self._layout.addWidget(self._datetime)
# set the widget's layout
self.setLayout(self._layout)
# connect the signals
self._datetime.dateChanged.connect(self.dateChanged)
self._datetime.dateTimeChanged.connect(self.dateTimeChanged)
self._datetime.timeChanged.connect(self.timeChanged)
# forward the slots
@QtCore.Slot(QtCore.QDate)
def setDate(self, date):
self._datetime.setDate(date)
@QtCore.Slot(QtCore.QDateTime)
def setDateTime(self, dateTime):
self._datetime.setDateTime(dateTime)
@QtCore.Slot(QtCore.QTime)
def setTime(self, time):
self._datetime.setTime(time)
def getValue(self):
if self._has_check_box and self._check_box.isChecked():
try:
return self._datetime.dateTime().toPython()
except AttributeError :
return self._datetime.dateTime().toPyDateTime()
return None
class ComboBox(UtilsWidget):
activated = QtCore.Signal(str)
currentIndexChanged = QtCore.Signal(str)
editTextChanged = QtCore.Signal(str)
highlighted = QtCore.Signal(str)
# todo make more things configurable
def __init__(self, label_text, list_of_strings, hover_text=None,
default_entry=0, editable=True, has_check_box=False):
# pass up the stack
super(ComboBox, self).__init__(label_text=label_text,
hover_text=hover_text,
has_check_box=has_check_box)
# make the cb
self._cb = QtGui.QComboBox()
self._cb.setEditable(editable)
# stash the text
self._list_of_strings = list_of_strings
# shove in the text
self._cb.addItems(list_of_strings)
# buddy them up
self._lab.setBuddy(self._cb)
# make and set the layout
# add the combo box to the layout defined in UtilsWidget
self._layout.addWidget(self._cb)
self.setLayout(self._layout)
# connect on the signals
self._cb.activated[str].connect(self.activated)
self._cb.currentIndexChanged[str].connect(self.currentIndexChanged)
self._cb.editTextChanged[str].connect(self.editTextChanged)
self._cb.highlighted[str].connect(self.highlighted)
# forward the slots
@QtCore.Slot()
def clear(self):
self._cb.clear()
@QtCore.Slot(int)
def setCurrentIndex(self, in_val):
self._cb.setCurrentIndex(in_val)
@QtCore.Slot(str)
def setEditText(self, in_str):
self._cb.setEditText(in_str)
def getValue(self):
if self._has_check_box and self._check_box.isChecked():
return self._list_of_strings[self._cb.currentIndex()]
return None
class LineEdit(UtilsWidget):
cursorPositionChanged = QtCore.Signal(int, int)
editingFinished = QtCore.Signal()
returnPressed = QtCore.Signal()
selectionChanged = QtCore.Signal()
textChanged = QtCore.Signal(str)
textEdited = QtCore.Signal(str)
def __init__(self, label_text, hover_text=None, editable=True,
has_check_box=False):
# pass up the stack
super(LineEdit, self).__init__(label_text=label_text,
hover_text=hover_text,
has_check_box=has_check_box)
# make the line edit box
self._line_editor = QtGui.QLineEdit()
# buddy them up
self._lab.setBuddy(self._line_editor)
# add the line edit widget to the layout
self._layout.addWidget(self._line_editor)
self.setLayout(self._layout)
# connect the signals
self._line_editor.cursorPositionChanged.connect(self.cursorPositionChanged)
self._line_editor.editingFinished.connect(self.editingFinished)
self._line_editor.returnPressed.connect(self.returnPressed)
self._line_editor.selectionChanged.connect(self.selectionChanged)
self._line_editor.textChanged.connect(self.textChanged)
self._line_editor.textEdited.connect(self.textEdited)
# forward the slots
@QtCore.Slot()
def clear(self):
self._line_editor.clear()
@QtCore.Slot()
def copy(self):
self._line_editor.copy()
@QtCore.Slot()
def cut(self):
self._line_editor.cut()
@QtCore.Slot()
def paste(self):
self._line_editor.paste()
@QtCore.Slot()
def redo(self):
self._line_editor.redo()
@QtCore.Slot()
def selectAll(self):
self._line_editor.selectAll()
@QtCore.Slot()
def setText(self, str):
self._line_editor.setText(str)
@QtCore.Slot()
def undo(self):
self._line_editor.undo()
def getValue(self):
if self._has_check_box and self._check_box.isChecked():
# returned as a QString
text = str(self._line_editor.text())
# check to see if it empty
if text == '':
return None
return text
return None
class CheckBox(UtilsWidget):
stateChanged = QtCore.Signal()
# todo make more things configurable
def __init__(self, label_text, hover_text=None, editable=True,
has_check_box=False):
# pass up the stack
super(CheckBox, self).__init__(label_text=label_text,
hover_text=hover_text,
has_check_box=has_check_box)
# make the check box
self._check = QtGui.QCheckBox()
# buddy them up
self._lab.setBuddy(self._check)
self._layout.addWidget(self._check)
self.setLayout(self._layout)
# connect the signal
self._check.stateChanged.connect(self.stateChanged)
# forward the slots
# no slots to forward
def getValue(self):
if self._has_check_box and self._check_box.isChecked():
return self._check.isChecked()
return None
class TripleSpinner(QtGui.QGroupBox):
"""
A class to wrap up the logic for dealing with a min/max/step
triple spin box.
"""
# signal to be emitted when the spin boxes are changed
# and settled
valueChanged = QtCore.Signal(float, float)
def __init__(self, title='', parent=None):
QtGui.QGroupBox.__init__(self, title, parent=parent)
self._spinbox_min_intensity = QtGui.QDoubleSpinBox(parent=self)
self._spinbox_max_intensity = QtGui.QDoubleSpinBox(parent=self)
self._spinbox_intensity_step = QtGui.QDoubleSpinBox(parent=self)
ispiner_form = QtGui.QFormLayout()
ispiner_form.addRow("min", self._spinbox_min_intensity)
ispiner_form.addRow("max", self._spinbox_max_intensity)
ispiner_form.addRow("step", self._spinbox_intensity_step)
self.setLayout(ispiner_form)
# TODO
@QtCore.Slot(float, float)
def setValues(self, bottom, top):
"""
"""
pass
@QtCore.Slot(float, float)
def setLimits(self, bottom, top):
"""
"""
pass
@QtCore.Slot(float)
def setStep(self, step):
"""
"""
pass
@property
def values(self):
return (self._spinbox_min_intensity.value,
self._spinbox_max_intensity.value)
class PairSpinner(QtGui.QGroupBox):
valueChanged = QtCore.Signal(float)
rangeChanged = QtCore.Signal(float, float)
def __init__(self, init_min, init_max,
init_step, parent=None, title='',
value_str=None, step_str=None):
QtGui.QGroupBox.__init__(self, title, parent=parent)
if value_str is None:
value_str = 'value'
if step_str is None:
step_str = 'step'
self._spinbox_value = QtGui.QDoubleSpinBox(parent=self)
self._spinbox_step = QtGui.QDoubleSpinBox(parent=self)
self._spinbox_step.valueChanged.connect(
self._spinbox_value.setSingleStep)
self._spinbox_value.valueChanged.connect(
self.valueChanged)
ispiner_form = QtGui.QFormLayout()
ispiner_form.addRow(value_str, self._spinbox_value)
ispiner_form.addRow(step_str, self._spinbox_step)
self.setLayout(ispiner_form)
self.setStep(init_step)
self.setRange(init_min, init_max)
@QtCore.Slot(float)
def setStep(self, new_step):
self._spinbox_step.setValue(new_step)
@QtCore.Slot(float, float)
def setRange(self, new_min, new_max):
self._spinbox_value.setMinimum(new_min)
self._spinbox_value.setMaximum(new_max)
self.rangeChanged.emit(new_min, new_max)
class ControlContainer(QtGui.QGroupBox, mapping_mixin):
_delim = '.'
_dispatch_map = {'slider': 'create_slider'}
def create_widget(self, key, type_str, param_dict):
create_fun_name = self._dispatch_map[type_str]
create_fun = getattr(self, create_fun_name)
return create_fun(key, **param_dict)
def __len__(self):
print('len')
return len(list(iter(self)))
def __contains__(self, key):
print('contains')
return key in iter(self)
def __init__(self, title, parent=None):
# call parent constructor
QtGui.QGroupBox.__init__(self, title, parent=parent)
# nested containers
self._containers = dict()
# all non-container contents of this container
self._contents = dict()
# specialized listings
# this is a dict keyed on type of dicts
# The inner dicts are keyed on name
self._by_type = defaultdict(dict)
# make the layout
self._layout = QtGui.QVBoxLayout()
# add it to self
self.setLayout(self._layout)
def __getitem__(self, key):
print(key)
# TODO make this sensible un-wrap KeyException errors
try:
# split the key
split_key = key.strip(self._delim).split(self._delim, 1)
except TypeError:
raise KeyError("key is not a string")
# if one element back -> no splitting needed
if len(split_key) == 1:
return self._contents[split_key[0]]
# else, at least one layer of testing
else:
# unpack the key parts
outer, inner = split_key
# get the container and pass through the remaining key
return self._containers[outer][inner]
def create_container(self, key, container_title=None):
"""
Create a nested container with in this container
TODO : add rest of GroupBox parameters
Parameters
----------
key : str
The key used to identify this container
container_title : str or None
The title of the container.
If None, defaults to the key.
If you want to title, use ''
Returns
-------
control_container : ControlContainer
The container created.
"""
if container_title is None:
container_title = key
control_container = ControlContainer(container_title, parent=self)
self._layout.addWidget(control_container)
self._containers[key] = control_container
return control_container
def create_button(self, key):
pass
def create_checkbox(self, key):
pass
def create_combobox(self, key, key_list, editable=True, title=None):
if title is None:
title = key
cb = ComboBox(title, key_list, editable=editable)
self._add_widget(key, cb)
return cb
def create_dict_display(self, key, input_dict):
pass
def create_pairspinner(self, key, *args, **kwargs):
ds = PairSpinner(*args, **kwargs)
self._add_widget(key, ds)
return ds
def create_text(self, key, text):
"""
Create and add a text label to the control panel
"""
# create text
tmp_label = QtGui.QLabel(text)
self._add_widget(key, tmp_label)
def create_radiobuttons(self, key):
pass
def create_slider(self, key, min_val, max_val, label=None):
"""
Parameters
----------
"""
if label is None:
label = key
# set up slider
slider = Slider(label, min_val, max_val)
self._add_widget(key, slider)
return slider
def create_triplespinbox(self, key):
pass
def _add_widget(self, key, in_widget):
split_key = key.strip(self._delim).rsplit(self._delim, 1)
# key is not nested, add to this object
if len(split_key) == 1:
key = split_key[0]
# add to the type dict
self._by_type[type(in_widget)][key] = in_widget
# add to the contents list
self._contents[key] = in_widget
# add to layout
self._layout.addWidget(in_widget)
# else, grab the nested container and add it to that
else:
container, key = split_key
self[container]._add_widget(key, in_widget)
def iter_containers(self):
return self._iter_helper_container([])
def get_container(self, key):
"""
Get a (possibly nested) container (the normal
iterator skips these). We may end up with two
parallel sets of mapping functions.
"""
split_key = key.strip(self._delim).rsplit(self._delim, 1)
if len(split_key) == 1:
return self._containers[split_key[0]]
return self._containers[split_key[0]].get_containers(split_key[1])
def _iter_helper(self, cur_path_list):
"""
Recursively (depth-first) walk the tree and return the names
of the leaves
Parameters
----------
cur_path_list : list of str
A list of the current path
"""
for k, v in six.iteritems(self._containers):
for inner_v in v._iter_helper(cur_path_list + [k]):
yield inner_v
for k in six.iterkeys(self._contents):
yield self._delim.join(cur_path_list + [k])
def _iter_helper_container(self, cur_path_list):
"""
Recursively (depth-first) walk the tree and return the names
of the containers
Parameters
----------
cur_path_list : list of str
A list of the current path
"""
for k, v in six.iteritems(self._containers):
for inner_v in v._iter_helper_container(cur_path_list + [k]):
yield inner_v
if len(cur_path_list):
yield self._delim.join(cur_path_list)
def __iter__(self):
return self._iter_helper([])
def addStretch(self):
self._layout.addStretch()
class DictDisplay(QtGui.QGroupBox):
"""
A generic widget for displaying dictionaries
Parameters
----------
title : string
Widget title
ignore_list : iterable or None
keys to ignore
parent : QWidget or None
Parent widget, passed up stack
"""
def __init__(self, title, ignore_list=None, parent=None):
# pass up the stack, GroupBox takes care of the title
QtGui.QGroupBox.__init__(self, title, parent=parent)
if ignore_list is None:
ignore_list = ()
# make layout
self.full_layout = QtGui.QVBoxLayout()
# set layout
self.setLayout(self.full_layout)
# make a set of the ignore list
self._ignore = set(ignore_list)
self._disp_table = []
@QtCore.Slot(dict)
def update(self, in_dict):
"""
updates the table
Parameters
----------
in_dict : dict
The dictionary to display
"""
# remove everything that is there
for c in self._disp_table:
c.deleteLater()
# make a new list
self._disp_table = []
# add the keys alphabetically
for k, v in sorted(list(in_dict.iteritems())):
# if key in the ignore list, continue
if k in self._ignore:
continue
self._add_row(k, v)
def _add_row(self, k, v):
"""
Private function
Adds a row to the table
Parameters
----------
k : object
The key
v : object
The value
"""
# make a widget for our row
tmp_widget = QtGui.QWidget(self)
tmp_layout = QtGui.QHBoxLayout()
tmp_widget.setLayout(tmp_layout)
# add the key and value to the row widget
tmp_layout.addWidget(QtGui.QLabel(str(k) + ':'))
tmp_layout.addStretch()
tmp_layout.addWidget(QtGui.QLabel(str(v)))
# add the row widget to the full layout
self.full_layout.addWidget(tmp_widget)
# add the row widget to
self._disp_table.append(tmp_widget)
|
{
"content_hash": "c085ca09f6c1f6c6d98937969cf29da3",
"timestamp": "",
"source": "github",
"line_count": 674,
"max_line_length": 83,
"avg_line_length": 31.5459940652819,
"alnum_prop": 0.5923243344934626,
"repo_name": "giltis/xray-vision",
"id": "551706122c1076edc7a5577decddd1e51d2f5cfb",
"size": "23744",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "xray_vision/qt_widgets/control_widgets.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "211396"
},
{
"name": "Shell",
"bytes": "39"
}
],
"symlink_target": ""
}
|
from django.contrib import admin
# PROJECT IMPORTS
from grappelli.tests.models import Category, Entry
site = admin.AdminSite(name="Admin Site")
class CategoryOptions(admin.ModelAdmin):
list_display = ("id", "name",)
list_display_links = ("name",)
class EntryOptions(admin.ModelAdmin):
list_display = ("id", "title", "category", "user",)
list_display_links = ("title",)
def get_queryset(self, request):
qs = super(EntryOptions, self).get_queryset(request)
if request.user.is_superuser:
return qs
return qs.filter(user=request.user)
site.register(Category, CategoryOptions)
site.register(Entry, EntryOptions)
|
{
"content_hash": "86728d3c36451ce273af288e47ffd604",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 60,
"avg_line_length": 25.846153846153847,
"alnum_prop": 0.6875,
"repo_name": "lz1988/django-web2015",
"id": "10716f1ae8aa37b1d9700a2adc05a48d692a3f81",
"size": "706",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "mysite/mysiteapp/static/django-grappelli-master/grappelli/tests/admin.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "797682"
},
{
"name": "CSS",
"bytes": "527578"
},
{
"name": "Emacs Lisp",
"bytes": "152779"
},
{
"name": "Groff",
"bytes": "61139"
},
{
"name": "HTML",
"bytes": "3184026"
},
{
"name": "JavaScript",
"bytes": "760809"
},
{
"name": "Python",
"bytes": "13157847"
},
{
"name": "Ruby",
"bytes": "1758"
},
{
"name": "Shell",
"bytes": "154036"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "Tcl",
"bytes": "2476"
},
{
"name": "Yacc",
"bytes": "7550"
}
],
"symlink_target": ""
}
|
import unittest
from collections import namedtuple
import mock
from stacker import exceptions
from stacker.actions import build
from stacker.actions.build import resolve_parameters
from stacker.context import Context
from stacker.exceptions import StackDidNotChange
from stacker.providers.base import BaseProvider
from stacker.status import (
COMPLETE,
PENDING,
SKIPPED,
SUBMITTED
)
def mock_stack(parameters):
return {
'Parameters': [
{'ParameterKey': k, 'ParameterValue': v} for k, v in
parameters.items()
]
}
class TestProvider(BaseProvider):
def __init__(self, outputs=None, *args, **kwargs):
self._outputs = outputs or {}
def set_outputs(self, outputs):
self._outputs = outputs
def get_stack(self, stack_name, **kwargs):
if stack_name not in self._outputs:
raise exceptions.StackDoesNotExist(stack_name)
return {"name": stack_name, "outputs": self._outputs[stack_name]}
def get_outputs(self, stack_name, *args, **kwargs):
stack = self.get_stack(stack_name)
return stack["outputs"]
class TestBuildAction(unittest.TestCase):
def setUp(self):
self.context = Context({"namespace": "namespace"})
self.build_action = build.Action(self.context, provider=TestProvider())
def _get_context(self, **kwargs):
config = {"stacks": [
{"name": "vpc"},
{"name": "bastion", "parameters": {"test": "vpc::something"}},
{"name": "db", "parameters": {"test": "vpc::something",
"else": "bastion::something"}},
{"name": "other", "parameters": {}}
]}
return Context({"namespace": "namespace"}, config=config, **kwargs)
def test_resolve_parameters_referencing_non_existant_stack(self):
parameters = {
"param_1": "mock::output_1",
}
self.build_action.provider.set_outputs({})
mock_blueprint = mock.MagicMock()
type(mock_blueprint).parameters = parameters
with self.assertRaises(exceptions.StackDoesNotExist):
self.build_action._resolve_parameters(parameters,
mock_blueprint)
def test_handle_missing_params(self):
stack = {'StackName': 'teststack'}
def_params = {"Address": "192.168.0.1"}
required = ["Address"]
result = self.build_action._handle_missing_parameters(def_params,
required, stack)
self.assertEqual(result, def_params.items())
def test_gather_missing_from_stack(self):
stack_params = {"Address": "10.0.0.1"}
stack = mock_stack(stack_params)
def_params = {}
required = ["Address"]
self.assertEqual(
self.build_action._handle_missing_parameters(def_params, required,
stack),
stack_params.items())
def test_missing_params_no_stack(self):
params = {}
required = ["Address"]
with self.assertRaises(exceptions.MissingParameterException) as cm:
self.build_action._handle_missing_parameters(params, required)
self.assertEqual(cm.exception.parameters, required)
def test_stack_params_dont_override_given_params(self):
stack_params = {"Address": "10.0.0.1"}
stack = mock_stack(stack_params)
def_params = {"Address": "192.168.0.1"}
required = ["Address"]
result = self.build_action._handle_missing_parameters(def_params,
required, stack)
self.assertEqual(result, def_params.items())
def test_get_dependencies(self):
context = self._get_context()
build_action = build.Action(context)
dependencies = build_action._get_dependencies()
self.assertEqual(
dependencies[context.get_fqn("bastion")],
set([context.get_fqn("vpc")]),
)
self.assertEqual(
dependencies[context.get_fqn("db")],
set([context.get_fqn(s) for s in ["vpc", "bastion"]]),
)
self.assertFalse(dependencies[context.get_fqn("other")])
def test_get_stack_execution_order(self):
context = self._get_context()
build_action = build.Action(context)
dependencies = build_action._get_dependencies()
execution_order = build_action.get_stack_execution_order(dependencies)
self.assertEqual(
execution_order,
[context.get_fqn(s) for s in ["other", "vpc", "bastion", "db"]],
)
def test_generate_plan(self):
context = self._get_context()
build_action = build.Action(context)
plan = build_action._generate_plan()
self.assertEqual(
plan.keys(),
[context.get_fqn(s) for s in ["other", "vpc", "bastion", "db"]],
)
def test_dont_execute_plan_when_outline_specified(self):
context = self._get_context()
build_action = build.Action(context)
with mock.patch.object(build_action, "_generate_plan") as \
mock_generate_plan:
build_action.run(outline=True)
self.assertEqual(mock_generate_plan().execute.call_count, 0)
def test_execute_plan_when_outline_not_specified(self):
context = self._get_context()
build_action = build.Action(context)
with mock.patch.object(build_action, "_generate_plan") as \
mock_generate_plan:
build_action.run(outline=False)
self.assertEqual(mock_generate_plan().execute.call_count, 1)
def test_launch_stack_step_statuses(self):
mock_provider = mock.MagicMock()
mock_stack = mock.MagicMock()
context = self._get_context()
build_action = build.Action(context, provider=mock_provider)
plan = build_action._generate_plan()
_, step = plan.list_pending()[0]
step.stack = mock.MagicMock()
step.stack.locked = False
# mock provider shouldn't return a stack at first since it hasn't been
# launched
mock_provider.get_stack.return_value = None
with mock.patch.object(build_action, "s3_stack_push"):
# initial status should be PENDING
self.assertEqual(step.status, PENDING)
# initial run should return SUBMITTED since we've passed off to CF
status = step.run()
step.set_status(status)
self.assertEqual(status, SUBMITTED)
self.assertEqual(status.reason, "creating new stack")
# provider should now return the CF stack since it exists
mock_provider.get_stack.return_value = mock_stack
# simulate that we're still in progress
mock_provider.is_stack_in_progress.return_value = True
mock_provider.is_stack_completed.return_value = False
status = step.run()
step.set_status(status)
# status should still be SUBMITTED since we're waiting for it to
# complete
self.assertEqual(status, SUBMITTED)
self.assertEqual(status.reason, "creating new stack")
# simulate completed stack
mock_provider.is_stack_completed.return_value = True
mock_provider.is_stack_in_progress.return_value = False
status = step.run()
step.set_status(status)
self.assertEqual(status, COMPLETE)
self.assertEqual(status.reason, "creating new stack")
# simulate stack should be skipped
mock_provider.is_stack_completed.return_value = False
mock_provider.is_stack_in_progress.return_value = False
mock_provider.update_stack.side_effect = StackDidNotChange
status = step.run()
step.set_status(status)
self.assertEqual(status, SKIPPED)
self.assertEqual(status.reason, "nochange")
# simulate an update is required
mock_provider.reset_mock()
mock_provider.update_stack.side_effect = None
step.set_status(PENDING)
status = step.run()
step.set_status(status)
self.assertEqual(status, SUBMITTED)
self.assertEqual(status.reason, "updating existing stack")
self.assertEqual(mock_provider.update_stack.call_count, 1)
def test_should_update(self):
test_scenario = namedtuple("test_scenario",
["locked", "force", "result"])
test_scenarios = (
test_scenario(locked=False, force=False, result=True),
test_scenario(locked=False, force=True, result=True),
test_scenario(locked=True, force=False, result=False),
test_scenario(locked=True, force=True, result=True)
)
mock_stack = mock.MagicMock(["locked", "force", "name"])
mock_stack.name = "test-stack"
for t in test_scenarios:
mock_stack.locked = t.locked
mock_stack.force = t.force
self.assertEqual(build.should_update(mock_stack), t.result)
def test_should_submit(self):
test_scenario = namedtuple("test_scenario",
["enabled", "result"])
test_scenarios = (
test_scenario(enabled=False, result=False),
test_scenario(enabled=True, result=True),
)
mock_stack = mock.MagicMock(["enabled", "name"])
mock_stack.name = "test-stack"
for t in test_scenarios:
mock_stack.enabled = t.enabled
self.assertEqual(build.should_submit(mock_stack), t.result)
class TestFunctions(unittest.TestCase):
""" test module level functions """
def setUp(self):
self.ctx = Context({"namespace": "test"})
self.prov = mock.MagicMock()
self.bp = mock.MagicMock()
def test_resolve_parameters_unused_parameter(self):
self.bp.parameters = {
"a": {
"type": "String",
"description": "A"},
"b": {
"type": "String",
"description": "B"}
}
params = {"a": "Apple", "c": "Carrot"}
p = resolve_parameters(params, self.bp, self.ctx, self.prov)
self.assertNotIn("c", p)
self.assertIn("a", p)
def test_resolve_parameters_none_conversion(self):
self.bp.parameters = {
"a": {
"type": "String",
"description": "A"},
"b": {
"type": "String",
"description": "B"}
}
params = {"a": None, "c": "Carrot"}
p = resolve_parameters(params, self.bp, self.ctx, self.prov)
self.assertNotIn("a", p)
def test_resolve_parameters_resolve_outputs(self):
self.bp.parameters = {
"a": {
"type": "String",
"description": "A"},
"b": {
"type": "String",
"description": "B"}
}
params = {"a": "other-stack::a", "b": "Banana"}
self.prov.get_output.return_value = "Apple"
p = resolve_parameters(params, self.bp, self.ctx, self.prov)
kall = self.prov.get_output.call_args
args, kwargs = kall
self.assertTrue(args[0], "test-other-stack")
self.assertTrue(args[1], "a")
self.assertEqual(p["a"], "Apple")
self.assertEqual(p["b"], "Banana")
def test_resolve_parameters_multiple_outputs(self):
def get_output(stack, param):
d = {"a": "Apple", "c": "Carrot"}
return d[param]
self.bp.parameters = {
"a": {
"type": "String",
"description": "A"},
"b": {
"type": "String",
"description": "B"}
}
params = {"a": "other-stack::a,other-stack::c", "b": "Banana"}
self.prov.get_output.side_effect = get_output
p = resolve_parameters(params, self.bp, self.ctx, self.prov)
self.assertEqual(self.prov.get_output.call_count, 2)
self.assertEqual(p["a"], "Apple,Carrot")
self.assertEqual(p["b"], "Banana")
# Test multi-output with spaces
params = {"a": "other-stack::a, other-stack::c", "b": "Banana"}
self.prov.get_output.side_effect = get_output
p = resolve_parameters(params, self.bp, self.ctx, self.prov)
self.assertEqual(self.prov.get_output.call_count, 4)
self.assertEqual(p["a"], "Apple,Carrot")
self.assertEqual(p["b"], "Banana")
def test_resolve_parameters_output_does_not_exist(self):
def get_output(stack, param):
d = {"c": "Carrot"}
return d[param]
self.bp.parameters = {
"a": {
"type": "String",
"description": "A"
},
}
params = {"a": "other-stack::a"}
self.prov.get_output.side_effect = get_output
with self.assertRaises(exceptions.OutputDoesNotExist) as cm:
resolve_parameters(params, self.bp, self.ctx, self.prov)
exc = cm.exception
self.assertEqual(exc.stack_name, "test-other-stack")
# Not sure this is actually what we want - should probably change it
# so the output is just the output name, not the stack name + the
# output name
self.assertEqual(exc.output, "other-stack::a")
def test_resolve_parameters_booleans(self):
self.bp.parameters = {
"a": {
"type": "String",
"description": "A"},
"b": {
"type": "String",
"description": "B"},
}
params = {"a": True, "b": False}
p = resolve_parameters(params, self.bp, self.ctx, self.prov)
self.assertEquals("true", p["a"])
self.assertEquals("false", p["b"])
|
{
"content_hash": "c0aacff5d3a715358c1a3f316e9d7c4a",
"timestamp": "",
"source": "github",
"line_count": 365,
"max_line_length": 79,
"avg_line_length": 38.50684931506849,
"alnum_prop": 0.5657061543934543,
"repo_name": "mhahn/stacker",
"id": "6ca4d375ac6a5f7e6bbc8494801313d76497d698",
"size": "14055",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stacker/tests/actions/test_build.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "187"
},
{
"name": "Python",
"bytes": "250329"
},
{
"name": "Shell",
"bytes": "216"
}
],
"symlink_target": ""
}
|
"""
Internal implementation of request Body validating middleware.
"""
import base64
import re
import jsonschema
from jsonschema import exceptions as jsonschema_exc
import netaddr
from oslo_utils import timeutils
from oslo_utils import uuidutils
import rfc3986
import six
from nova.api.validation import parameter_types
from nova import exception
from nova.i18n import _
@jsonschema.FormatChecker.cls_checks('date-time')
def _validate_datetime_format(instance):
try:
timeutils.parse_isotime(instance)
except ValueError:
return False
else:
return True
@jsonschema.FormatChecker.cls_checks('base64')
def _validate_base64_format(instance):
try:
base64.decodestring(instance)
except base64.binascii.Error:
return False
return True
@jsonschema.FormatChecker.cls_checks('cidr')
def _validate_cidr_format(cidr):
try:
netaddr.IPNetwork(cidr)
except netaddr.AddrFormatError:
return False
if '/' not in cidr:
return False
if re.search('\s', cidr):
return False
return True
@jsonschema.FormatChecker.cls_checks('uuid')
def _validate_uuid_format(instance):
return uuidutils.is_uuid_like(instance)
@jsonschema.FormatChecker.cls_checks('uri')
def _validate_uri(instance):
return rfc3986.is_valid_uri(instance, require_scheme=True,
require_authority=True)
@jsonschema.FormatChecker.cls_checks('name_with_leading_trailing_spaces',
exception.InvalidName)
def _validate_name_with_leading_trailing_spaces(instance):
regex = parameter_types.valid_name_leading_trailing_spaces_regex
try:
if re.search(regex.regex, instance):
return True
except TypeError:
# The name must be string type. If instance isn't string type, the
# TypeError will be raised at here.
pass
raise exception.InvalidName(reason=regex.reason)
@jsonschema.FormatChecker.cls_checks('name', exception.InvalidName)
def _validate_name(instance):
regex = parameter_types.valid_name_regex
try:
if re.search(regex.regex, instance):
return True
except TypeError:
# The name must be string type. If instance isn't string type, the
# TypeError will be raised at here.
pass
raise exception.InvalidName(reason=regex.reason)
@jsonschema.FormatChecker.cls_checks('cell_name_with_leading_trailing_spaces',
exception.InvalidName)
def _validate_cell_name_with_leading_trailing_spaces(instance):
regex = parameter_types.valid_cell_name_leading_trailing_spaces_regex
try:
if re.search(regex.regex, instance):
return True
except TypeError:
# The name must be string type. If instance isn't string type, the
# TypeError will be raised at here.
pass
raise exception.InvalidName(reason=regex.reason)
@jsonschema.FormatChecker.cls_checks('cell_name', exception.InvalidName)
def _validate_cell_name(instance):
regex = parameter_types.valid_cell_name_regex
try:
if re.search(regex.regex, instance):
return True
except TypeError:
# The name must be string type. If instance isn't string type, the
# TypeError will be raised at here.
pass
raise exception.InvalidName(reason=regex.reason)
def _soft_validate_additional_properties(validator,
additional_properties_value,
instance,
schema):
"""This validator function is used for legacy v2 compatible mode in v2.1.
This will skip all the additional properties checking but keep check the
'patternProperties'. 'patternProperties' is used for metadata API.
If there are not any properties on the instance that are not specified in
the schema, this will return without any effect. If there are any such
extra properties, they will be handled as follows:
- if the validator passed to the method is not of type "object", this
method will return without any effect.
- if the 'additional_properties_value' parameter is True, this method will
return without any effect.
- if the schema has an additionalProperties value of True, the extra
properties on the instance will not be touched.
- if the schema has an additionalProperties value of False and there
aren't patternProperties specified, the extra properties will be stripped
from the instance.
- if the schema has an additionalProperties value of False and there
are patternProperties specified, the extra properties will not be
touched and raise validation error if pattern doesn't match.
"""
if (not validator.is_type(instance, "object") or
additional_properties_value):
return
properties = schema.get("properties", {})
patterns = "|".join(schema.get("patternProperties", {}))
extra_properties = set()
for prop in instance:
if prop not in properties:
if patterns:
if not re.search(patterns, prop):
extra_properties.add(prop)
else:
extra_properties.add(prop)
if not extra_properties:
return
if patterns:
error = "Additional properties are not allowed (%s %s unexpected)"
if len(extra_properties) == 1:
verb = "was"
else:
verb = "were"
yield jsonschema_exc.ValidationError(
error % (", ".join(repr(extra) for extra in extra_properties),
verb))
else:
for prop in extra_properties:
del instance[prop]
class FormatChecker(jsonschema.FormatChecker):
"""A FormatChecker can output the message from cause exception
We need understandable validation errors messages for users. When a
custom checker has an exception, the FormatChecker will output a
readable message provided by the checker.
"""
def check(self, instance, format):
"""Check whether the instance conforms to the given format.
:argument instance: the instance to check
:type: any primitive type (str, number, bool)
:argument str format: the format that instance should conform to
:raises: :exc:`FormatError` if instance does not conform to format
"""
if format not in self.checkers:
return
# For safety reasons custom checkers can be registered with
# allowed exception types. Anything else will fall into the
# default formatter.
func, raises = self.checkers[format]
result, cause = None, None
try:
result = func(instance)
except raises as e:
cause = e
if not result:
msg = "%r is not a %r" % (instance, format)
raise jsonschema_exc.FormatError(msg, cause=cause)
class _SchemaValidator(object):
"""A validator class
This class is changed from Draft4Validator to validate minimum/maximum
value of a string number(e.g. '10'). This changes can be removed when
we tighten up the API definition and the XML conversion.
Also FormatCheckers are added for checking data formats which would be
passed through nova api commonly.
"""
validator = None
validator_org = jsonschema.Draft4Validator
def __init__(self, schema, relax_additional_properties=False):
validators = {
'minimum': self._validate_minimum,
'maximum': self._validate_maximum,
}
if relax_additional_properties:
validators[
'additionalProperties'] = _soft_validate_additional_properties
validator_cls = jsonschema.validators.extend(self.validator_org,
validators)
format_checker = FormatChecker()
self.validator = validator_cls(schema, format_checker=format_checker)
def validate(self, *args, **kwargs):
try:
self.validator.validate(*args, **kwargs)
except jsonschema.ValidationError as ex:
if isinstance(ex.cause, exception.InvalidName):
detail = ex.cause.format_message()
elif len(ex.path) > 0:
# NOTE: For whole OpenStack message consistency, this error
# message has been written as the similar format of WSME.
detail = _("Invalid input for field/attribute %(path)s."
" Value: %(value)s. %(message)s") % {
'path': ex.path.pop(), 'value': ex.instance,
'message': ex.message
}
else:
detail = ex.message
raise exception.ValidationError(detail=detail)
except TypeError as ex:
# NOTE: If passing non string value to patternProperties parameter,
# TypeError happens. Here is for catching the TypeError.
detail = six.text_type(ex)
raise exception.ValidationError(detail=detail)
def _number_from_str(self, instance):
try:
value = int(instance)
except (ValueError, TypeError):
try:
value = float(instance)
except (ValueError, TypeError):
return None
return value
def _validate_minimum(self, validator, minimum, instance, schema):
instance = self._number_from_str(instance)
if instance is None:
return
return self.validator_org.VALIDATORS['minimum'](validator, minimum,
instance, schema)
def _validate_maximum(self, validator, maximum, instance, schema):
instance = self._number_from_str(instance)
if instance is None:
return
return self.validator_org.VALIDATORS['maximum'](validator, maximum,
instance, schema)
|
{
"content_hash": "8c0f3b19aa8753cdb54162425ded7733",
"timestamp": "",
"source": "github",
"line_count": 284,
"max_line_length": 79,
"avg_line_length": 35.74295774647887,
"alnum_prop": 0.6301842183036154,
"repo_name": "BeyondTheClouds/nova",
"id": "392689d114900df3d43ed81a06799e59fc9bb859",
"size": "10781",
"binary": false,
"copies": "4",
"ref": "refs/heads/disco/mitaka",
"path": "nova/api/validation/validators.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1612"
},
{
"name": "JavaScript",
"bytes": "3159"
},
{
"name": "Python",
"bytes": "17483458"
},
{
"name": "Shell",
"bytes": "36658"
},
{
"name": "Smarty",
"bytes": "295884"
}
],
"symlink_target": ""
}
|
import sys
import pyeapi
import ssl
import re
def re_in_list(regex,list):
for l in list:
m = re.match('(%s*)' %regex, l)
if m:
return l
return None
def main():
opcodes = [
'--name',
'--remove'
]
opcode=sys.argv.pop(1)
if not opcode in opcodes:
print "INVALID OPERATION: USE --name or --remove"
if opcode == '--name':
vlan_num=sys.argv.pop(1)
vlan_name=sys.argv.pop(1)
elif opcode == '--remove':
vlan_num = sys.argv.pop(1)
vlan_name = None
vlan_string = "vlan %s" %vlan_num
name_string = "name %s" %vlan_name
ssl._create_default_https_context = ssl._create_unverified_context
remote_connect=pyeapi.connect_to("pynet-sw4")
response=remote_connect.enable("show running-config")
config=response[0]['result']['cmds']
if vlan_string in config.keys():
is_configured = True
vlan_config = config[vlan_string]
else:
is_configured = False
if opcode == '--name':
if is_configured:
print "vlan %s IS CONFIGURED" %vlan_num
print " NAME IS %s" %name_string
if not name_string in vlan_config.keys():
print " BUT NAME IS INCORRECT"
print "RECONFIGURE VLAN NAME"
commands = [vlan_string,name_string]
print "COMMANDS=%s" %commands
remote_connect.config(commands)
else:
print "vlan %s IS NOT CONFIGURED" %vlan_num
commands = [vlan_string,name_string]
print "COMMANDS=%s" %commands
remote_connect.config(commands)
print "vlan %s CONFIGURED" %vlan_num
elif opcode=='--remove':
if is_configured:
commands = ['no %s' %vlan_string]
remote_connect.config(commands)
print "vlan %s REMOVED" %vlan_num
else:
print "vlan %s IS NOT CONFIGURED" %vlan_num
if __name__ == "__main__":
main()
|
{
"content_hash": "2160e3d860a6c834621dfaf6c768e71d",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 70,
"avg_line_length": 28.380281690140844,
"alnum_prop": 0.5508684863523573,
"repo_name": "patrebert/pynet_cert",
"id": "9b58e39c2fa330c321486644745bd8ac9f6d379b",
"size": "2135",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "class7/ex2/ex2.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "OpenEdge ABL",
"bytes": "144"
},
{
"name": "Python",
"bytes": "69997"
},
{
"name": "Shell",
"bytes": "597"
}
],
"symlink_target": ""
}
|
"""Utilities for probability distributions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import hashlib
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
def assert_close(
x, y, data=None, summarize=None, message=None, name="assert_close"):
"""Assert that x and y are within machine epsilon of each other.
Args:
x: Floating-point `Tensor`
y: Floating-point `Tensor`
data: The tensors to print out if the condition is `False`. Defaults to
error message and first few entries of `x` and `y`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional).
Returns:
Op raising `InvalidArgumentError` if |x - y| > machine epsilon.
"""
message = message or ""
x = ops.convert_to_tensor(x, name="x")
y = ops.convert_to_tensor(y, name="y")
if data is None:
data = [
message,
"Condition x ~= y did not hold element-wise: x = ", x.name, x, "y = ",
y.name, y
]
if x.dtype.is_integer:
return check_ops.assert_equal(
x, y, data=data, summarize=summarize, message=message, name=name)
with ops.name_scope(name, "assert_close", [x, y, data]):
tol = np.finfo(x.dtype.as_numpy_dtype).eps
condition = math_ops.reduce_all(math_ops.less_equal(math_ops.abs(x-y), tol))
return control_flow_ops.Assert(
condition, data, summarize=summarize)
def assert_integer_form(
x, data=None, summarize=None, message=None,
int_dtype=None, name="assert_integer_form"):
"""Assert that x has integer components (or floats equal to integers).
Args:
x: Floating-point `Tensor`
data: The tensors to print out if the condition is `False`. Defaults to
error message and first few entries of `x` and `y`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
int_dtype: A `tf.dtype` used to cast the float to. The default (`None`)
implies the smallest possible signed int will be used for casting.
name: A name for this operation (optional).
Returns:
Op raising `InvalidArgumentError` if `cast(x, int_dtype) != x`.
"""
with ops.name_scope(name, values=[x, data]):
x = ops.convert_to_tensor(x, name="x")
if x.dtype.is_integer:
return control_flow_ops.no_op()
message = message or "{} has non-integer components".format(x.op.name)
if int_dtype is None:
try:
int_dtype = {
dtypes.float16: dtypes.int16,
dtypes.float32: dtypes.int32,
dtypes.float64: dtypes.int64,
}[x.dtype.base_dtype]
except KeyError:
raise TypeError("Unrecognized type {}".format(x.dtype.name))
return check_ops.assert_equal(
x, math_ops.cast(math_ops.cast(x, int_dtype), x.dtype),
data=data, summarize=summarize, message=message, name=name)
def assert_symmetric(matrix):
matrix_t = array_ops.matrix_transpose(matrix)
return control_flow_ops.with_dependencies(
[check_ops.assert_equal(matrix, matrix_t)], matrix)
def embed_check_nonnegative_integer_form(
x, name="embed_check_nonnegative_integer_form"):
"""Assert x is a non-negative tensor, and optionally of integers."""
with ops.name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
assertions = [
check_ops.assert_non_negative(
x, message="'{}' must be non-negative.".format(x.op.name)),
]
if not x.dtype.is_integer:
assertions += [
assert_integer_form(
x, message="'{}' cannot contain fractional components.".format(
x.op.name)),
]
return control_flow_ops.with_dependencies(assertions, x)
def same_dynamic_shape(a, b):
"""Returns whether a and b have the same dynamic shape.
Args:
a: `Tensor`
b: `Tensor`
Returns:
`bool` `Tensor` representing if both tensors have the same shape.
"""
a = ops.convert_to_tensor(a, name="a")
b = ops.convert_to_tensor(b, name="b")
# Here we can't just do math_ops.equal(a.shape, b.shape), since
# static shape inference may break the equality comparison between
# shape(a) and shape(b) in math_ops.equal.
def all_shapes_equal():
return math_ops.reduce_all(math_ops.equal(
array_ops.concat([array_ops.shape(a), array_ops.shape(b)], 0),
array_ops.concat([array_ops.shape(b), array_ops.shape(a)], 0)))
# One of the shapes isn't fully defined, so we need to use the dynamic
# shape.
return control_flow_ops.cond(
math_ops.equal(array_ops.rank(a), array_ops.rank(b)),
all_shapes_equal,
lambda: constant_op.constant(False))
def get_logits_and_probs(logits=None,
probs=None,
multidimensional=False,
validate_args=False,
name="get_logits_and_probs"):
"""Converts logit to probabilities (or vice-versa), and returns both.
Args:
logits: Floating-point `Tensor` representing log-odds.
probs: Floating-point `Tensor` representing probabilities.
multidimensional: Python `bool`, default `False`.
If `True`, represents whether the last dimension of `logits` or `probs`,
a `[N1, N2, ... k]` dimensional tensor, representing the
logit or probability of `shape[-1]` classes.
validate_args: Python `bool`, default `False`. When `True`, either assert
`0 <= probs <= 1` (if not `multidimensional`) or that the last dimension
of `probs` sums to one.
name: A name for this operation (optional).
Returns:
logits, probs: Tuple of `Tensor`s. If `probs` has an entry that is `0` or
`1`, then the corresponding entry in the returned logit will be `-Inf` and
`Inf` respectively.
Raises:
ValueError: if neither `probs` nor `logits` were passed in, or both were.
"""
with ops.name_scope(name, values=[probs, logits]):
if (probs is None) == (logits is None):
raise ValueError("Must pass probs or logits, but not both.")
if probs is None:
logits = ops.convert_to_tensor(logits, name="logits")
if not logits.dtype.is_floating:
raise TypeError("logits must having floating type.")
# We can early return since we constructed probs and therefore know
# they're valid.
if multidimensional:
if validate_args:
logits = embed_check_categorical_event_shape(logits)
return logits, nn.softmax(logits, name="probs")
return logits, math_ops.sigmoid(logits, name="probs")
probs = ops.convert_to_tensor(probs, name="probs")
if not probs.dtype.is_floating:
raise TypeError("probs must having floating type.")
if validate_args:
with ops.name_scope("validate_probs"):
one = constant_op.constant(1., probs.dtype)
dependencies = [check_ops.assert_non_negative(probs)]
if multidimensional:
probs = embed_check_categorical_event_shape(probs)
dependencies += [assert_close(math_ops.reduce_sum(probs, -1), one,
message="probs does not sum to 1.")]
else:
dependencies += [check_ops.assert_less_equal(
probs, one, message="probs has components greater than 1.")]
probs = control_flow_ops.with_dependencies(dependencies, probs)
with ops.name_scope("logits"):
if multidimensional:
# Here we don't compute the multidimensional case, in a manner
# consistent with respect to the unidimensional case. We do so
# following the TF convention. Typically, you might expect to see
# logits = log(probs) - log(probs[pivot]). A side-effect of
# being consistent with the TF approach is that the unidimensional case
# implicitly handles the second dimension but the multidimensional case
# explicitly keeps the pivot dimension.
return math_ops.log(probs), probs
return math_ops.log(probs) - math_ops.log1p(-1. * probs), probs
def _is_known_unsigned_by_dtype(dt):
"""Helper returning True if dtype is known to be unsigned."""
return {
dtypes.bool: True,
dtypes.uint8: True,
dtypes.uint16: True,
}.get(dt.base_dtype, False)
def _is_known_signed_by_dtype(dt):
"""Helper returning True if dtype is known to be signed."""
return {
dtypes.float16: True,
dtypes.float32: True,
dtypes.float64: True,
dtypes.int8: True,
dtypes.int16: True,
dtypes.int32: True,
dtypes.int64: True,
}.get(dt.base_dtype, False)
def _is_known_dtype(dt):
"""Helper returning True if dtype is known."""
return _is_known_unsigned_by_dtype(dt) or _is_known_signed_by_dtype(dt)
def _largest_integer_by_dtype(dt):
"""Helper returning the largest integer exactly representable by dtype."""
if not _is_known_dtype(dt):
raise TypeError("Unrecognized dtype: {}".format(dt.name))
if dt.is_floating:
return int(2**(np.finfo(dt.as_numpy_dtype).nmant + 1))
if dt.is_integer:
return np.iinfo(dt.as_numpy_dtype).max
if dt.base_dtype == dtypes.bool:
return int(1)
# We actually can't land here but keep the case for completeness.
raise TypeError("Unrecognized dtype: {}".format(dt.name))
def _smallest_integer_by_dtype(dt):
"""Helper returning the smallest integer exactly representable by dtype."""
if not _is_known_dtype(dt):
raise TypeError("Unrecognized dtype: {}".format(dt.name))
if _is_known_unsigned_by_dtype(dt):
return 0
return -1 * _largest_integer_by_dtype(dt)
def _is_integer_like_by_dtype(dt):
"""Helper returning True if dtype.is_integer or is `bool`."""
if not _is_known_dtype(dt):
raise TypeError("Unrecognized dtype: {}".format(dt.name))
return dt.is_integer or dt.base_dtype == dtypes.bool
def embed_check_categorical_event_shape(
categorical_param,
name="embed_check_categorical_event_shape"):
"""Embeds checks that categorical distributions don't have too many classes.
A categorical-type distribution is one which, e.g., returns the class label
rather than a one-hot encoding. E.g., `Categorical(probs)`.
Since distributions output samples in the same dtype as the parameters, we
must ensure that casting doesn't lose precision. That is, the
`parameter.dtype` implies a maximum number of classes. However, since shape is
`int32` and categorical variables are presumed to be indexes into a `Tensor`,
we must also ensure that the number of classes is no larger than the largest
possible `int32` index, i.e., `2**31-1`.
In other words the number of classes, `K`, must satisfy the following
condition:
```python
K <= min(
int(2**31 - 1), # Largest float as an index.
{
dtypes.float16: int(2**11), # Largest int as a float16.
dtypes.float32: int(2**24),
dtypes.float64: int(2**53),
}.get(categorical_param.dtype.base_dtype, 0))
```
Args:
categorical_param: Floating-point `Tensor` representing parameters of
distribution over categories. The rightmost shape is presumed to be the
number of categories.
name: A name for this operation (optional).
Returns:
categorical_param: Input `Tensor` with appropriate assertions embedded.
Raises:
TypeError: if `categorical_param` has an unknown `dtype`.
ValueError: if we can statically identify `categorical_param` as being too
large (for being closed under int32/float casting).
"""
with ops.name_scope(name, values=[categorical_param]):
x = ops.convert_to_tensor(categorical_param, name="categorical_param")
# The size must not exceed both of:
# - The largest possible int32 (since categorical values are presumed to be
# indexes into a Tensor).
# - The largest possible integer exactly representable under the given
# floating-point dtype (since we need to cast to/from).
#
# The chosen floating-point thresholds are 2**(1 + mantissa_bits).
# For more details, see:
# https://en.wikipedia.org/wiki/Floating-point_arithmetic#Internal_representation
x_dtype = x.dtype.base_dtype
max_event_size = (_largest_integer_by_dtype(x_dtype)
if x_dtype.is_floating else 0)
if max_event_size is 0:
raise TypeError("Unable to validate size of unrecognized dtype "
"({}).".format(x_dtype.name))
try:
x_shape_static = x.get_shape().with_rank_at_least(1)
except ValueError:
raise ValueError("A categorical-distribution parameter must have "
"at least 1 dimension.")
if x_shape_static[-1].value is not None:
event_size = x_shape_static[-1].value
if event_size < 2:
raise ValueError("A categorical-distribution parameter must have at "
"least 2 events.")
if event_size > max_event_size:
raise ValueError(
"Number of classes exceeds `dtype` precision, i.e., "
"{} implies shape ({}) cannot exceed {}.".format(
x_dtype.name, event_size, max_event_size))
return x
else:
event_size = array_ops.shape(x, name="x_shape")[-1]
return control_flow_ops.with_dependencies([
check_ops.assert_rank_at_least(
x, 1, message=("A categorical-distribution parameter must have "
"at least 1 dimension.")),
check_ops.assert_greater_equal(
array_ops.shape(x)[-1], 2,
message=("A categorical-distribution parameter must have at "
"least 2 events.")),
check_ops.assert_less_equal(
event_size, max_event_size,
message="Number of classes exceeds `dtype` precision, "
"i.e., {} dtype cannot exceed {} shape.".format(
x_dtype.name, max_event_size)),
], x)
def embed_check_integer_casting_closed(
x,
target_dtype,
assert_nonnegative=True,
name="embed_check_casting_closed"):
"""Ensures integers remain unaffected despite casting to/from int/float types.
Example integer-types: `uint8`, `int32`, `bool`.
Example floating-types: `float32`, `float64`.
The largest possible integer representable by an IEEE754 floating-point is
`2**(1 + mantissa_bits)` yet the largest possible integer as an int-type is
`2**(bits - 1) - 1`. This function ensures that a `Tensor` purporting to have
integer-form values can be cast to some other type without loss of precision.
The smallest representable integer is the negative of the largest
representable integer, except for types: `uint8`, `uint16`, `bool`. For these
types, the smallest representable integer is `0`.
Args:
x: `Tensor` representing integer-form values.
target_dtype: TF `dtype` under which `x` should have identical values.
assert_nonnegative: `bool` indicating `x` should contain nonnegative values.
name: A name for this operation (optional).
Returns:
x: Input `Tensor` with appropriate assertions embedded.
Raises:
TypeError: if `x` is neither integer- nor floating-type.
TypeError: if `target_dtype` is neither integer- nor floating-type.
TypeError: if neither `x` nor `target_dtype` are integer-type.
"""
with ops.name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
if (not _is_integer_like_by_dtype(x.dtype)
and not x.dtype.is_floating):
raise TypeError("{}.dtype must be floating- or "
"integer-type.".format(x.dtype.name))
if (not _is_integer_like_by_dtype(target_dtype)
and not target_dtype.is_floating):
raise TypeError("target_dtype ({}) must be floating- or "
"integer-type.".format(target_dtype.name))
if (not _is_integer_like_by_dtype(x.dtype)
and not _is_integer_like_by_dtype(target_dtype)):
raise TypeError("At least one of {}.dtype ({}) and target_dtype ({}) "
"must be integer-type.".format(
x.op.name, x.dtype.name, target_dtype.name))
assertions = []
if assert_nonnegative:
assertions += [
check_ops.assert_non_negative(
x, message="Elements must be non-negative."),
]
if x.dtype.is_floating:
# Being here means _is_integer_like_by_dtype(target_dtype) = True.
# Since this check implies the magnitude check below, we need only it.
assertions += [
assert_integer_form(
x, int_dtype=target_dtype,
message="Elements must be {}-equivalent.".format(
target_dtype.name)),
]
else:
if (_largest_integer_by_dtype(x.dtype)
> _largest_integer_by_dtype(target_dtype)):
# Cast may lose integer precision.
assertions += [
check_ops.assert_less_equal(
x, _largest_integer_by_dtype(target_dtype),
message=("Elements cannot exceed {}.".format(
_largest_integer_by_dtype(target_dtype)))),
]
if (not assert_nonnegative and
(_smallest_integer_by_dtype(x.dtype)
< _smallest_integer_by_dtype(target_dtype))):
assertions += [
check_ops.assert_greater_equal(
x, _smallest_integer_by_dtype(target_dtype),
message=("Elements cannot be smaller than {}.".format(
_smallest_integer_by_dtype(target_dtype)))),
]
if not assertions:
return x
return control_flow_ops.with_dependencies(assertions, x)
def log_combinations(n, counts, name="log_combinations"):
"""Multinomial coefficient.
Given `n` and `counts`, where `counts` has last dimension `k`, we compute
the multinomial coefficient as:
```n! / sum_i n_i!```
where `i` runs over all `k` classes.
Args:
n: Floating-point `Tensor` broadcastable with `counts`. This represents `n`
outcomes.
counts: Floating-point `Tensor` broadcastable with `n`. This represents
counts in `k` classes, where `k` is the last dimension of the tensor.
name: A name for this operation (optional).
Returns:
`Tensor` representing the multinomial coefficient between `n` and `counts`.
"""
# First a bit about the number of ways counts could have come in:
# E.g. if counts = [1, 2], then this is 3 choose 2.
# In general, this is (sum counts)! / sum(counts!)
# The sum should be along the last dimension of counts. This is the
# "distribution" dimension. Here n a priori represents the sum of counts.
with ops.name_scope(name, values=[n, counts]):
n = ops.convert_to_tensor(n, name="n")
counts = ops.convert_to_tensor(counts, name="counts")
total_permutations = math_ops.lgamma(n + 1)
counts_factorial = math_ops.lgamma(counts + 1)
redundant_permutations = math_ops.reduce_sum(counts_factorial, axis=[-1])
return total_permutations - redundant_permutations
def matrix_diag_transform(matrix, transform=None, name=None):
"""Transform diagonal of [batch-]matrix, leave rest of matrix unchanged.
Create a trainable covariance defined by a Cholesky factor:
```python
# Transform network layer into 2 x 2 array.
matrix_values = tf.contrib.layers.fully_connected(activations, 4)
matrix = tf.reshape(matrix_values, (batch_size, 2, 2))
# Make the diagonal positive. If the upper triangle was zero, this would be a
# valid Cholesky factor.
chol = matrix_diag_transform(matrix, transform=tf.nn.softplus)
# LinearOperatorLowerTriangular ignores the upper triangle.
operator = LinearOperatorLowerTriangular(chol)
```
Example of heteroskedastic 2-D linear regression.
```python
# Get a trainable Cholesky factor.
matrix_values = tf.contrib.layers.fully_connected(activations, 4)
matrix = tf.reshape(matrix_values, (batch_size, 2, 2))
chol = matrix_diag_transform(matrix, transform=tf.nn.softplus)
# Get a trainable mean.
mu = tf.contrib.layers.fully_connected(activations, 2)
# This is a fully trainable multivariate normal!
dist = tf.contrib.distributions.MVNCholesky(mu, chol)
# Standard log loss. Minimizing this will "train" mu and chol, and then dist
# will be a distribution predicting labels as multivariate Gaussians.
loss = -1 * tf.reduce_mean(dist.log_prob(labels))
```
Args:
matrix: Rank `R` `Tensor`, `R >= 2`, where the last two dimensions are
equal.
transform: Element-wise function mapping `Tensors` to `Tensors`. To
be applied to the diagonal of `matrix`. If `None`, `matrix` is returned
unchanged. Defaults to `None`.
name: A name to give created ops.
Defaults to "matrix_diag_transform".
Returns:
A `Tensor` with same shape and `dtype` as `matrix`.
"""
with ops.name_scope(name, "matrix_diag_transform", [matrix]):
matrix = ops.convert_to_tensor(matrix, name="matrix")
if transform is None:
return matrix
# Replace the diag with transformed diag.
diag = array_ops.matrix_diag_part(matrix)
transformed_diag = transform(diag)
transformed_mat = array_ops.matrix_set_diag(matrix, transformed_diag)
return transformed_mat
def rotate_transpose(x, shift, name="rotate_transpose"):
"""Circularly moves dims left or right.
Effectively identical to:
```python
numpy.transpose(x, numpy.roll(numpy.arange(len(x.shape)), shift))
```
When `validate_args=False` additional graph-runtime checks are
performed. These checks entail moving data from to GPU to CPU.
Example:
```python
x = tf.random_normal([1, 2, 3, 4]) # Tensor of shape [1, 2, 3, 4].
rotate_transpose(x, -1).shape == [2, 3, 4, 1]
rotate_transpose(x, -2).shape == [3, 4, 1, 2]
rotate_transpose(x, 1).shape == [4, 1, 2, 3]
rotate_transpose(x, 2).shape == [3, 4, 1, 2]
rotate_transpose(x, 7).shape == rotate_transpose(x, 3).shape # [2, 3, 4, 1]
rotate_transpose(x, -7).shape == rotate_transpose(x, -3).shape # [4, 1, 2, 3]
```
Args:
x: `Tensor`.
shift: `Tensor`. Number of dimensions to transpose left (shift<0) or
transpose right (shift>0).
name: Python `str`. The name to give this op.
Returns:
rotated_x: Input `Tensor` with dimensions circularly rotated by shift.
Raises:
TypeError: if shift is not integer type.
"""
with ops.name_scope(name, values=[x, shift]):
x = ops.convert_to_tensor(x, name="x")
shift = ops.convert_to_tensor(shift, name="shift")
# We do not assign back to preserve constant-ness.
check_ops.assert_integer(shift)
shift_value_static = tensor_util.constant_value(shift)
ndims = x.get_shape().ndims
if ndims is not None and shift_value_static is not None:
if ndims < 2: return x
shift_value_static = np.sign(shift_value_static) * (
abs(shift_value_static) % ndims)
if shift_value_static == 0: return x
perm = np.roll(np.arange(ndims), shift_value_static)
return array_ops.transpose(x, perm=perm)
else:
# Consider if we always had a positive shift, and some specified
# direction.
# When shifting left we want the new array:
# last(x, n-shift) + first(x, shift)
# and if shifting right then we want:
# last(x, shift) + first(x, n-shift)
# Observe that last(a) == slice(a, n) and first(a) == slice(0, a).
# Also, we can encode direction and shift as one: direction * shift.
# Combining these facts, we have:
# a = cond(shift<0, -shift, n-shift)
# last(x, n-a) + first(x, a) == x[a:n] + x[0:a]
# Finally, we transform shift by modulo length so it can be specified
# independently from the array upon which it operates (like python).
ndims = array_ops.rank(x)
shift = array_ops.where(math_ops.less(shift, 0),
math_ops.mod(-shift, ndims),
ndims - math_ops.mod(shift, ndims))
first = math_ops.range(0, shift)
last = math_ops.range(shift, ndims)
perm = array_ops.concat([last, first], 0)
return array_ops.transpose(x, perm=perm)
def pick_vector(cond,
true_vector,
false_vector,
name="pick_vector"):
"""Picks possibly different length row `Tensor`s based on condition.
Value `Tensor`s should have exactly one dimension.
If `cond` is a python Boolean or `tf.constant` then either `true_vector` or
`false_vector` is immediately returned. I.e., no graph nodes are created and
no validation happens.
Args:
cond: `Tensor`. Must have `dtype=tf.bool` and be scalar.
true_vector: `Tensor` of one dimension. Returned when cond is `True`.
false_vector: `Tensor` of one dimension. Returned when cond is `False`.
name: Python `str`. The name to give this op.
Example:
```python
pick_vector(tf.less(0, 5), tf.range(10, 12), tf.range(15, 18)) # [10, 11]
pick_vector(tf.less(5, 0), tf.range(10, 12), tf.range(15, 18)) # [15, 16, 17]
```
Returns:
true_or_false_vector: `Tensor`.
Raises:
TypeError: if `cond.dtype != tf.bool`
TypeError: if `cond` is not a constant and
`true_vector.dtype != false_vector.dtype`
"""
with ops.name_scope(name, values=(cond, true_vector, false_vector)):
cond = ops.convert_to_tensor(cond, name="cond")
if cond.dtype != dtypes.bool:
raise TypeError("%s.dtype=%s which is not %s" %
(cond.name, cond.dtype, dtypes.bool))
cond_value_static = tensor_util.constant_value(cond)
if cond_value_static is not None:
return true_vector if cond_value_static else false_vector
true_vector = ops.convert_to_tensor(true_vector, name="true_vector")
false_vector = ops.convert_to_tensor(false_vector, name="false_vector")
if true_vector.dtype != false_vector.dtype:
raise TypeError(
"%s.dtype=%s does not match %s.dtype=%s"
% (true_vector.name, true_vector.dtype,
false_vector.name, false_vector.dtype))
n = array_ops.shape(true_vector)[0]
return array_ops.slice(
array_ops.concat([true_vector, false_vector], 0),
[array_ops.where(cond, 0, n)], [array_ops.where(cond, n, -1)])
def gen_new_seed(seed, salt):
"""Generate a new seed, from the given seed and salt."""
if seed is None:
return None
string = (str(seed) + salt).encode("utf-8")
return int(hashlib.md5(string).hexdigest()[:8], 16) & 0x7FFFFFFF
def fill_triangular(x, upper=False, name=None):
"""Creates a (batch of) triangular matrix from a vector of inputs.
Created matrix can be lower- or upper-triangular. (It is more efficient to
create the matrix as upper or lower, rather than transpose.)
Triangular matrix elements are filled in a clockwise spiral. See example,
below.
If `x.get_shape()` is `[b1, b2, ..., bK, d]` then the output shape is `[b1,
b2, ..., bK, n, n]` where `n` is such that `d = n(n+1)/2`, i.e.,
`n = int(np.sqrt(0.25 + 2. * m) - 0.5)`.
Example:
```python
fill_triangular([1, 2, 3, 4, 5, 6])
# ==> [[4, 0, 0],
# [6, 5, 0],
# [3, 2, 1]]
fill_triangular([1, 2, 3, 4, 5, 6], upper=True)
# ==> [[1, 2, 3],
# [0, 5, 6],
# [0, 0, 4]]
```
For comparison, a pure numpy version of this function can be found in
`util_test.py`, function `_fill_triangular`.
Args:
x: `Tensor` representing lower (or upper) triangular elements.
upper: Python `bool` representing whether output matrix should be upper
triangular (`True`) or lower triangular (`False`, default).
name: Python `str`. The name to give this op.
Returns:
tril: `Tensor` with lower (or upper) triangular elements filled from `x`.
Raises:
ValueError: if `x` cannot be mapped to a triangular matrix.
"""
with ops.name_scope(name, "fill_triangular", values=[x]):
if x.shape.with_rank_at_least(1)[-1].value is not None:
# Formula derived by solving for n: m = n(n+1)/2.
m = np.int32(x.shape[-1].value)
n = np.sqrt(0.25 + 2. * m) - 0.5
if n != np.floor(n):
raise ValueError("Input right-most shape ({}) does not "
"correspond to a triangular matrix.".format(m))
n = np.int32(n)
static_final_shape = x.shape[:-1].concatenate([n, n])
else:
m = array_ops.shape(x)[-1]
# For derivation, see above. Casting automatically lops off the 0.5, so we
# omit it. We don't validate n is an integer because this has
# graph-execution cost; an error will be thrown from the reshape, below.
n = math_ops.cast(
math_ops.sqrt(0.25 + math_ops.cast(2 * m, dtype=dtypes.float32)),
dtype=dtypes.int32)
static_final_shape = x.shape.with_rank_at_least(1)[:-1].concatenate(
[None, None])
# We now concatenate the "tail" of `x` to `x` (and reverse one of them).
#
# We do this based on the insight that the input `x` provides `ceil(n/2)`
# rows of an `n x n` matrix, some of which will get zeroed out being on the
# wrong side of the diagonal. The first row will not get zeroed out at all,
# and we need `floor(n/2)` more rows, so the first is what we omit from
# `x_tail`. If we then stack those `ceil(n/2)` rows with the `floor(n/2)`
# rows provided by a reversed tail, it is exactly the other set of elements
# of the reversed tail which will be zeroed out for being on the wrong side
# of the diagonal further up/down the matrix. And, in doing-so, we've filled
# the triangular matrix in a clock-wise spiral pattern. Neat!
#
# Try it out in numpy:
# n = 3
# x = np.arange(n * (n + 1) / 2)
# m = x.shape[0]
# n = np.int32(np.sqrt(.25 + 2 * m) - .5)
# x_tail = x[(m - (n**2 - m)):]
# np.concatenate([x_tail, x[::-1]], 0).reshape(n, n) # lower
# # ==> array([[3, 4, 5],
# [5, 4, 3],
# [2, 1, 0]])
# np.concatenate([x, x_tail[::-1]], 0).reshape(n, n) # upper
# # ==> array([[0, 1, 2],
# [3, 4, 5],
# [5, 4, 3]])
#
# Note that we can't simply do `x[..., -(n**2 - m):]` because this doesn't
# correctly handle `m == n == 1`. Hence, we do nonnegative indexing.
# Furthermore observe that:
# m - (n**2 - m)
# = n**2 / 2 + n / 2 - (n**2 - n**2 / 2 + n / 2)
# = 2 (n**2 / 2 + n / 2) - n**2
# = n**2 + n - n**2
# = n
if upper:
x_list = [x, array_ops.reverse(x[..., n:], axis=[-1])]
else:
x_list = [x[..., n:], array_ops.reverse(x, axis=[-1])]
new_shape = (
static_final_shape.as_list()
if static_final_shape.is_fully_defined()
else array_ops.concat([array_ops.shape(x)[:-1], [n, n]], axis=0))
x = array_ops.reshape(array_ops.concat(x_list, axis=-1), new_shape)
x = array_ops.matrix_band_part(
x,
num_lower=(0 if upper else -1),
num_upper=(-1 if upper else 0))
x.set_shape(static_final_shape)
return x
def tridiag(below=None, diag=None, above=None, name=None):
"""Creates a matrix with values set above, below, and on the diagonal.
Example:
```python
tridiag(below=[1., 2., 3.],
diag=[4., 5., 6., 7.],
above=[8., 9., 10.])
# ==> array([[ 4., 8., 0., 0.],
# [ 1., 5., 9., 0.],
# [ 0., 2., 6., 10.],
# [ 0., 0., 3., 7.]], dtype=float32)
```
Warning: This Op is intended for convenience, not efficiency.
Args:
below: `Tensor` of shape `[B1, ..., Bb, d-1]` corresponding to the below
diagonal part. `None` is logically equivalent to `below = 0`.
diag: `Tensor` of shape `[B1, ..., Bb, d]` corresponding to the diagonal
part. `None` is logically equivalent to `diag = 0`.
above: `Tensor` of shape `[B1, ..., Bb, d-1]` corresponding to the above
diagonal part. `None` is logically equivalent to `above = 0`.
name: Python `str`. The name to give this op.
Returns:
tridiag: `Tensor` with values set above, below and on the diagonal.
Raises:
ValueError: if all inputs are `None`.
"""
def _pad(x):
"""Prepends and appends a zero to every vector in a batch of vectors."""
shape = array_ops.concat([array_ops.shape(x)[:-1], [1]], axis=0)
z = array_ops.zeros(shape, dtype=x.dtype)
return array_ops.concat([z, x, z], axis=-1)
def _add(*x):
"""Adds list of Tensors, ignoring `None`."""
s = None
for y in x:
if y is None:
continue
elif s is None:
s = y
else:
s += y
if s is None:
raise ValueError("Must specify at least one of `below`, `diag`, `above`.")
return s
with ops.name_scope(name, "tridiag", [below, diag, above]):
if below is not None:
below = ops.convert_to_tensor(below, name="below")
below = array_ops.matrix_diag(_pad(below))[..., :-1, 1:]
if diag is not None:
diag = ops.convert_to_tensor(diag, name="diag")
diag = array_ops.matrix_diag(diag)
if above is not None:
above = ops.convert_to_tensor(above, name="above")
above = array_ops.matrix_diag(_pad(above))[..., 1:, :-1]
# TODO(jvdillon): Consider using scatter_nd instead of creating three full
# matrices.
return _add(below, diag, above)
def reduce_weighted_logsumexp(
logx,
w=None,
axis=None,
keep_dims=False,
return_sign=False,
name=None):
"""Computes `log(abs(sum(weight * exp(elements across tensor dimensions))))`.
If all weights `w` are known to be positive, it is more efficient to directly
use `reduce_logsumexp`, i.e., `tf.reduce_logsumexp(logx + tf.log(w))` is more
efficient than `du.reduce_weighted_logsumexp(logx, w)`.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
This function is more numerically stable than log(sum(w * exp(input))). It
avoids overflows caused by taking the exp of large inputs and underflows
caused by taking the log of small inputs.
For example:
```python
x = tf.constant([[0., 0, 0],
[0, 0, 0]])
w = tf.constant([[-1., 1, 1],
[1, 1, 1]])
du.reduce_weighted_logsumexp(x, w)
# ==> log(-1*1 + 1*1 + 1*1 + 1*1 + 1*1 + 1*1) = log(4)
du.reduce_weighted_logsumexp(x, w, axis=0)
# ==> [log(-1+1), log(1+1), log(1+1)]
du.reduce_weighted_logsumexp(x, w, axis=1)
# ==> [log(-1+1+1), log(1+1+1)]
du.reduce_weighted_logsumexp(x, w, axis=1, keep_dims=True)
# ==> [[log(-1+1+1)], [log(1+1+1)]]
du.reduce_weighted_logsumexp(x, w, axis=[0, 1])
# ==> log(-1+5)
```
Args:
logx: The tensor to reduce. Should have numeric type.
w: The weight tensor. Should have numeric type identical to `logx`.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions. Must be in the range
`[-rank(input_tensor), rank(input_tensor))`.
keep_dims: If true, retains reduced dimensions with length 1.
return_sign: If `True`, returns the sign of the result.
name: A name for the operation (optional).
Returns:
lswe: The `log(abs(sum(weight * exp(x))))` reduced tensor.
sign: (Optional) The sign of `sum(weight * exp(x))`.
"""
with ops.name_scope(name, "reduce_weighted_logsumexp", [logx, w]):
logx = ops.convert_to_tensor(logx, name="logx")
if w is None:
lswe = math_ops.reduce_logsumexp(logx, axis=axis, keep_dims=keep_dims)
if return_sign:
sgn = array_ops.ones_like(lswe)
return lswe, sgn
return lswe
w = ops.convert_to_tensor(w, dtype=logx.dtype, name="w")
log_absw_x = logx + math_ops.log(math_ops.abs(w))
max_log_absw_x = math_ops.reduce_max(log_absw_x, axis=axis, keep_dims=True)
# If the largest element is `-inf` or `inf` then we don't bother subtracting
# off the max. We do this because otherwise we'd get `inf - inf = NaN`. That
# this is ok follows from the fact that we're actually free to subtract any
# value we like, so long as we add it back after taking the `log(sum(...))`.
max_log_absw_x = array_ops.where(
math_ops.is_inf(max_log_absw_x),
array_ops.zeros_like(max_log_absw_x),
max_log_absw_x)
wx_over_max_absw_x = (
math_ops.sign(w) * math_ops.exp(log_absw_x - max_log_absw_x))
sum_wx_over_max_absw_x = math_ops.reduce_sum(
wx_over_max_absw_x,
axis=axis,
keep_dims=keep_dims)
if not keep_dims:
max_log_absw_x = array_ops.squeeze(max_log_absw_x, axis)
sgn = math_ops.sign(sum_wx_over_max_absw_x)
lswe = max_log_absw_x + math_ops.log(sgn * sum_wx_over_max_absw_x)
if return_sign:
return lswe, sgn
return lswe
# TODO(jvdillon): Merge this test back into:
# tensorflow/python/ops/softplus_op_test.py
# once TF core is accepting new ops.
def softplus_inverse(x, name=None):
"""Computes the inverse softplus, i.e., x = softplus_inverse(softplus(x)).
Mathematically this op is equivalent to:
```none
softplus_inverse = log(exp(x) - 1.)
```
Args:
x: `Tensor`. Non-negative (not enforced), floating-point.
name: A name for the operation (optional).
Returns:
`Tensor`. Has the same type/shape as input `x`.
"""
with ops.name_scope(name, "softplus_inverse", values=[x]):
x = ops.convert_to_tensor(x, name="x")
# We begin by deriving a more numerically stable softplus_inverse:
# x = softplus(y) = Log[1 + exp{y}], (which means x > 0).
# ==> exp{x} = 1 + exp{y} (1)
# ==> y = Log[exp{x} - 1] (2)
# = Log[(exp{x} - 1) / exp{x}] + Log[exp{x}]
# = Log[(1 - exp{-x}) / 1] + Log[exp{x}]
# = Log[1 - exp{-x}] + x (3)
# (2) is the "obvious" inverse, but (3) is more stable than (2) for large x.
# For small x (e.g. x = 1e-10), (3) will become -inf since 1 - exp{-x} will
# be zero. To fix this, we use 1 - exp{-x} approx x for small x > 0.
#
# In addition to the numerically stable derivation above, we clamp
# small/large values to be congruent with the logic in:
# tensorflow/core/kernels/softplus_op.h
#
# Finally, we set the input to one whenever the input is too large or too
# small. This ensures that no unchosen codepath is +/- inf. This is
# necessary to ensure the gradient doesn't get NaNs. Recall that the
# gradient of `where` behaves like `pred*pred_true + (1-pred)*pred_false`
# thus an `inf` in an unselected path results in `0*inf=nan`. We are careful
# to overwrite `x` with ones only when we will never actually use this
# value. Note that we use ones and not zeros since `log(expm1(0.)) = -inf`.
threshold = np.log(np.finfo(x.dtype.as_numpy_dtype).eps) + 2.
is_too_small = math_ops.less(x, np.exp(threshold))
is_too_large = math_ops.greater(x, -threshold)
too_small_value = math_ops.log(x)
too_large_value = x
# This `where` will ultimately be a NOP because we won't select this
# codepath whenever we used the surrogate `ones_like`.
x = array_ops.where(math_ops.logical_or(is_too_small, is_too_large),
array_ops.ones_like(x), x)
y = x + math_ops.log(-math_ops.expm1(-x)) # == log(expm1(x))
return array_ops.where(is_too_small, too_small_value,
array_ops.where(is_too_large, too_large_value, y))
# TODO(b/35290280): Add unit-tests.
def dimension_size(x, axis):
"""Returns the size of a specific dimension."""
# Since tf.gather isn't "constant-in, constant-out", we must first check the
# static shape or fallback to dynamic shape.
s = x.shape.with_rank_at_least(axis + 1)[axis].value
if axis > -1 and s is not None:
return s
return array_ops.shape(x)[axis]
def process_quadrature_grid_and_probs(
quadrature_grid_and_probs, dtype, validate_args, name=None):
"""Validates quadrature grid, probs or computes them as necessary.
Args:
quadrature_grid_and_probs: Python pair of `float`-like `Tensor`s
representing the sample points and the corresponding (possibly
normalized) weight. When `None`, defaults to:
`np.polynomial.hermite.hermgauss(deg=8)`.
dtype: The expected `dtype` of `grid` and `probs`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
name: Python `str` name prefixed to Ops created by this class.
Returns:
quadrature_grid_and_probs: Python pair of `float`-like `Tensor`s
representing the sample points and the corresponding (possibly
normalized) weight.
Raises:
ValueError: if `quadrature_grid_and_probs is not None` and
`len(quadrature_grid_and_probs[0]) != len(quadrature_grid_and_probs[1])`
"""
with ops.name_scope(name, "process_quadrature_grid_and_probs",
[quadrature_grid_and_probs]):
if quadrature_grid_and_probs is None:
grid, probs = np.polynomial.hermite.hermgauss(deg=8)
grid = grid.astype(dtype.as_numpy_dtype)
probs = probs.astype(dtype.as_numpy_dtype)
probs /= np.linalg.norm(probs, ord=1, keepdims=True)
grid = ops.convert_to_tensor(grid, name="grid", dtype=dtype)
probs = ops.convert_to_tensor(probs, name="probs", dtype=dtype)
return grid, probs
grid, probs = tuple(quadrature_grid_and_probs)
grid = ops.convert_to_tensor(grid, name="grid", dtype=dtype)
probs = ops.convert_to_tensor(probs, name="unnormalized_probs",
dtype=dtype)
probs /= linalg_ops.norm(probs, ord=1, axis=-1, keep_dims=True,
name="probs")
def _static_dim_size(x, axis):
"""Returns the static size of a specific dimension or `None`."""
return x.shape.with_rank_at_least(axis + 1)[axis].value
m, n = _static_dim_size(probs, axis=0), _static_dim_size(grid, axis=0)
if m is not None and n is not None:
if m != n:
raise ValueError("`quadrature_grid_and_probs` must be a `tuple` of "
"same-length zero-th-dimension `Tensor`s "
"(saw lengths {}, {})".format(m, n))
elif validate_args:
grid = control_flow_ops.with_dependencies([
check_ops.assert_equal(
dimension_size(probs, axis=0),
dimension_size(grid, axis=0),
message=("`quadrature_grid_and_probs` must be a `tuple` of "
"same-length zero-th-dimension `Tensor`s")),
], grid)
return grid, probs
class AppendDocstring(object):
"""Helper class to promote private subclass docstring to public counterpart.
Example:
```python
class TransformedDistribution(Distribution):
@distribution_util.AppendDocstring(
additional_note="A special note!",
kwargs_dict={"foo": "An extra arg."})
def _prob(self, y, foo=None):
pass
```
In this case, the `AppendDocstring` decorator appends the `additional_note` to
the docstring of `prob` (not `_prob`) and adds a new `kwargs`
section with each dictionary item as a bullet-point.
For a more detailed example, see `TransformedDistribution`.
"""
def __init__(self, additional_note="", kwargs_dict=None):
"""Initializes the AppendDocstring object.
Args:
additional_note: Python string added as additional docstring to public
version of function.
kwargs_dict: Python string/string dictionary representing
specific kwargs expanded from the **kwargs input.
Raises:
ValueError: if kwargs_dict.key contains whitespace.
ValueError: if kwargs_dict.value contains newlines.
"""
self._additional_note = additional_note
if kwargs_dict:
bullets = []
for key in sorted(kwargs_dict.keys()):
value = kwargs_dict[key]
if any(x.isspace() for x in key):
raise ValueError(
"Parameter name \"%s\" contains whitespace." % key)
value = value.lstrip()
if "\n" in value:
raise ValueError(
"Parameter description for \"%s\" contains newlines." % key)
bullets.append("* `%s`: %s" % (key, value))
self._additional_note += ("\n\n##### `kwargs`:\n\n" +
"\n".join(bullets))
def __call__(self, fn):
@functools.wraps(fn)
def _fn(*args, **kwargs):
return fn(*args, **kwargs)
if _fn.__doc__ is None:
_fn.__doc__ = self._additional_note
else:
_fn.__doc__ += "\n%s" % self._additional_note
return _fn
|
{
"content_hash": "b059481f31791339be308f734090d746",
"timestamp": "",
"source": "github",
"line_count": 1168,
"max_line_length": 85,
"avg_line_length": 38.87071917808219,
"alnum_prop": 0.6316821215391731,
"repo_name": "benoitsteiner/tensorflow-opencl",
"id": "41b86f79409aef76dbd710606d09b21f34cab7ba",
"size": "46090",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "tensorflow/python/ops/distributions/util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "6963"
},
{
"name": "C",
"bytes": "171616"
},
{
"name": "C++",
"bytes": "20990085"
},
{
"name": "CMake",
"bytes": "120459"
},
{
"name": "CSS",
"bytes": "774"
},
{
"name": "Go",
"bytes": "775176"
},
{
"name": "HTML",
"bytes": "560372"
},
{
"name": "Java",
"bytes": "271897"
},
{
"name": "JavaScript",
"bytes": "13406"
},
{
"name": "Jupyter Notebook",
"bytes": "1833840"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "32953"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "64592"
},
{
"name": "Protocol Buffer",
"bytes": "189423"
},
{
"name": "Python",
"bytes": "17431186"
},
{
"name": "Shell",
"bytes": "311737"
},
{
"name": "TypeScript",
"bytes": "772082"
}
],
"symlink_target": ""
}
|
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class GetRegion(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the GetRegion Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(GetRegion, self).__init__(temboo_session, '/Library/Google/ComputeEngine/Regions/GetRegion')
def new_input_set(self):
return GetRegionInputSet()
def _make_result_set(self, result, path):
return GetRegionResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return GetRegionChoreographyExecution(session, exec_id, path)
class GetRegionInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the GetRegion
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((optional, string) A valid access token retrieved during the OAuth process. This is required unless you provide the ClientID, ClientSecret, and RefreshToken to generate a new access token.)
"""
super(GetRegionInputSet, self)._set_input('AccessToken', value)
def set_ClientID(self, value):
"""
Set the value of the ClientID input for this Choreo. ((conditional, string) The Client ID provided by Google. Required unless providing a valid AccessToken.)
"""
super(GetRegionInputSet, self)._set_input('ClientID', value)
def set_ClientSecret(self, value):
"""
Set the value of the ClientSecret input for this Choreo. ((conditional, string) The Client Secret provided by Google. Required unless providing a valid AccessToken.)
"""
super(GetRegionInputSet, self)._set_input('ClientSecret', value)
def set_Fields(self, value):
"""
Set the value of the Fields input for this Choreo. ((optional, string) Comma-seperated list of fields you want to include in the response.)
"""
super(GetRegionInputSet, self)._set_input('Fields', value)
def set_Project(self, value):
"""
Set the value of the Project input for this Choreo. ((required, string) The ID of a Google Compute project.)
"""
super(GetRegionInputSet, self)._set_input('Project', value)
def set_RefreshToken(self, value):
"""
Set the value of the RefreshToken input for this Choreo. ((conditional, string) An OAuth refresh token used to generate a new access token when the original token is expired. Required unless providing a valid AccessToken.)
"""
super(GetRegionInputSet, self)._set_input('RefreshToken', value)
def set_Region(self, value):
"""
Set the value of the Region input for this Choreo. ((required, string) The name of the region to retrieve.)
"""
super(GetRegionInputSet, self)._set_input('Region', value)
class GetRegionResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the GetRegion Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Google.)
"""
return self._output.get('Response', None)
def get_NewAccessToken(self):
"""
Retrieve the value for the "NewAccessToken" output from this Choreo execution. ((string) Contains a new AccessToken when the RefreshToken is provided.)
"""
return self._output.get('NewAccessToken', None)
class GetRegionChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return GetRegionResultSet(response, path)
|
{
"content_hash": "e94541290049f6e34faca71285df1d28",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 254,
"avg_line_length": 45.582417582417584,
"alnum_prop": 0.6887656702025072,
"repo_name": "lupyuen/RaspberryPiImage",
"id": "d3a0c7c25c5f306f2aa22f5aba236e05b005b70f",
"size": "5012",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "home/pi/GrovePi/Software/Python/others/temboo/Library/Google/ComputeEngine/Regions/GetRegion.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Arduino",
"bytes": "82308"
},
{
"name": "C",
"bytes": "3197439"
},
{
"name": "C#",
"bytes": "33056"
},
{
"name": "C++",
"bytes": "1020255"
},
{
"name": "CSS",
"bytes": "208338"
},
{
"name": "CoffeeScript",
"bytes": "87200"
},
{
"name": "Eagle",
"bytes": "1632170"
},
{
"name": "Go",
"bytes": "3646"
},
{
"name": "Groff",
"bytes": "286691"
},
{
"name": "HTML",
"bytes": "41527"
},
{
"name": "JavaScript",
"bytes": "403603"
},
{
"name": "Makefile",
"bytes": "33808"
},
{
"name": "Objective-C",
"bytes": "69457"
},
{
"name": "Perl",
"bytes": "96047"
},
{
"name": "Processing",
"bytes": "1304"
},
{
"name": "Python",
"bytes": "13358098"
},
{
"name": "Shell",
"bytes": "68795"
},
{
"name": "TeX",
"bytes": "4317"
}
],
"symlink_target": ""
}
|
from django.db import models
from shared_schema_tenants.settings import get_setting
from shared_schema_tenants.managers import SingleTenantModelManager, MultipleTenantModelManager
from shared_schema_tenants.helpers.tenants import get_current_tenant
from shared_schema_tenants.exceptions import TenantNotFoundError
def get_default_tenant():
from shared_schema_tenants.models import Tenant
return Tenant.objects.filter(slug=get_setting('DEFAULT_TENANT_SLUG')).first()
class SingleTenantModelMixin(models.Model):
tenant = models.ForeignKey(
'shared_schema_tenants.Tenant', default=get_default_tenant)
objects = SingleTenantModelManager()
original_manager = models.Manager()
tenant_objects = SingleTenantModelManager()
class Meta:
abstract = True
default_manager_name = 'objects'
base_manager_name = 'objects'
def save(self, *args, **kwargs):
if not hasattr(self, 'tenant'):
self.tenant = get_current_tenant()
if getattr(self, 'tenant', False):
return super(SingleTenantModelMixin, self).save(*args, **kwargs)
else:
raise TenantNotFoundError()
class MultipleTenantsModelMixin(models.Model):
tenants = models.ManyToManyField('shared_schema_tenants.Tenant')
objects = MultipleTenantModelManager()
tenant_objects = MultipleTenantModelManager()
original_manager = models.Manager()
class Meta:
abstract = True
default_manager_name = 'objects'
base_manager_name = 'objects'
def save(self, *args, **kwargs):
tenant = get_current_tenant()
if tenant:
instance = super(MultipleTenantsModelMixin, self).save(*args, **kwargs)
self.tenants.add(tenant)
return instance
else:
raise TenantNotFoundError()
|
{
"content_hash": "2c156007ca1e7abc9a6cd612148cdd12",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 95,
"avg_line_length": 31.810344827586206,
"alnum_prop": 0.6888888888888889,
"repo_name": "hugobessa/django-shared-schema-tenants",
"id": "e120536f08750128d327e579b5928842955be1e7",
"size": "1845",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "shared_schema_tenants/mixins.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1653"
},
{
"name": "Python",
"bytes": "234851"
}
],
"symlink_target": ""
}
|
"""
:codeauthor: Jayesh Kariya <jayeshk@saltstack.com>
"""
import pytest
import salt.states.schedule as schedule
from tests.support.mock import MagicMock, patch
@pytest.fixture
def configure_loader_modules():
return {schedule: {}}
def test_present():
"""
Test to ensure a job is present in the schedule.
"""
name = "job1"
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
mock_dict = MagicMock(side_effect=[ret, []])
mock_mod = MagicMock(return_value=ret)
mock_lst = MagicMock(side_effect=[{name: {}}, {name: {}}, {}, {}])
with patch.dict(
schedule.__salt__,
{
"schedule.list": mock_lst,
"schedule.build_schedule_item": mock_dict,
"schedule.modify": mock_mod,
"schedule.add": mock_mod,
},
):
assert schedule.present(name) == ret
with patch.dict(schedule.__opts__, {"test": False}):
assert schedule.present(name) == ret
assert schedule.present(name) == ret
with patch.dict(schedule.__opts__, {"test": True}):
ret.update({"result": True})
assert schedule.present(name) == ret
def test_absent():
"""
Test to ensure a job is absent from the schedule.
"""
name = "job1"
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
mock_mod = MagicMock(return_value=ret)
mock_lst = MagicMock(side_effect=[{name: {}}, {}])
with patch.dict(
schedule.__salt__, {"schedule.list": mock_lst, "schedule.delete": mock_mod}
):
with patch.dict(schedule.__opts__, {"test": False}):
assert schedule.absent(name) == ret
with patch.dict(schedule.__opts__, {"test": True}):
comt = "Job job1 not present in schedule"
ret.update({"comment": comt, "result": True})
assert schedule.absent(name) == ret
|
{
"content_hash": "cea794ae807fefe66c1dad994a5244cb",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 83,
"avg_line_length": 28.62686567164179,
"alnum_prop": 0.5662148070907195,
"repo_name": "saltstack/salt",
"id": "f58a51a22a731b57ce187b0aa6f16495747bb6c8",
"size": "1918",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/pytests/unit/states/test_schedule.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "14911"
},
{
"name": "C",
"bytes": "1571"
},
{
"name": "Cython",
"bytes": "1458"
},
{
"name": "Dockerfile",
"bytes": "184"
},
{
"name": "Groovy",
"bytes": "12318"
},
{
"name": "HCL",
"bytes": "257"
},
{
"name": "HTML",
"bytes": "8031"
},
{
"name": "Jinja",
"bytes": "45598"
},
{
"name": "Makefile",
"bytes": "713"
},
{
"name": "NSIS",
"bytes": "76572"
},
{
"name": "PowerShell",
"bytes": "75891"
},
{
"name": "Python",
"bytes": "41444811"
},
{
"name": "Rich Text Format",
"bytes": "6242"
},
{
"name": "Roff",
"bytes": "191"
},
{
"name": "Ruby",
"bytes": "961"
},
{
"name": "SaltStack",
"bytes": "35856"
},
{
"name": "Scheme",
"bytes": "895"
},
{
"name": "Scilab",
"bytes": "1147"
},
{
"name": "Shell",
"bytes": "524917"
}
],
"symlink_target": ""
}
|
import mock
import unittest
from nsq.sockets.base import SocketWrapper
class TestSocketWrapper(unittest.TestCase):
'''Test the SocketWrapper class'''
def setUp(self):
self.socket = mock.Mock()
self.wrapped = SocketWrapper.wrap_socket(self.socket)
def test_wrap_socket(self):
'''Passes through objects to the constructor'''
with mock.patch.object(SocketWrapper, '__init__') as mock_init:
mock_init.return_value = None
SocketWrapper.wrap_socket(5, hello='foo')
mock_init.assert_called_with(5, hello='foo')
def test_method_pass_through(self):
'''Passes through most methods directly to the underlying socket'''
self.assertEqual(self.wrapped.accept, self.socket.accept)
def test_send(self):
'''SocketWrapper.send saises NotImplementedError'''
self.assertRaises(NotImplementedError, self.wrapped.send, 'foo')
def test_sendall(self):
'''Repeatedly calls send until everything has been sent'''
with mock.patch.object(self.wrapped, 'send') as mock_send:
# Only sends one byte at a time
mock_send.return_value = 1
self.wrapped.sendall('hello')
self.assertEqual(mock_send.call_count, 5)
def test_recv(self):
'''SocketWrapper.recv saises NotImplementedError'''
self.assertRaises(NotImplementedError, self.wrapped.recv, 5)
def test_recv_into(self):
'''SocketWrapper.recv_into saises NotImplementedError'''
self.assertRaises(NotImplementedError, self.wrapped.recv_into, 'foo', 5)
def test_inheritance_overrides(self):
'''Classes that inherit can override things like accept'''
class Foo(SocketWrapper):
def close(self):
pass
wrapped = Foo.wrap_socket(self.socket)
self.assertNotEqual(wrapped.close, self.socket.close)
|
{
"content_hash": "e6f172894f3b0a765465b7fb9220c195",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 80,
"avg_line_length": 37.411764705882355,
"alnum_prop": 0.6567085953878407,
"repo_name": "dlecocq/nsq-py",
"id": "9b2e8b3123973f4dfc540aaef290850dee9cd7c0",
"size": "1908",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_sockets/test_base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "386"
},
{
"name": "Python",
"bytes": "156556"
},
{
"name": "Shell",
"bytes": "1920"
}
],
"symlink_target": ""
}
|
from ..common.optim import SGD as optimizer
from ..common.coco_schedule import lr_multiplier_1x as lr_multiplier
from ..common.data.coco import dataloader
from ..common.models.mask_rcnn_fpn import model
from ..common.train import train
from detectron2.config import LazyCall as L
from detectron2.modeling.backbone import RegNet
from detectron2.modeling.backbone.regnet import SimpleStem, ResBottleneckBlock
# Replace default ResNet with RegNetY-4GF from the DDS paper. Config source:
# https://github.com/facebookresearch/pycls/blob/2c152a6e5d913e898cca4f0a758f41e6b976714d/configs/dds_baselines/regnety/RegNetY-4.0GF_dds_8gpu.yaml#L4-L10 # noqa
model.backbone.bottom_up = L(RegNet)(
stem_class=SimpleStem,
stem_width=32,
block_class=ResBottleneckBlock,
depth=22,
w_a=31.41,
w_0=96,
w_m=2.24,
group_width=64,
se_ratio=0.25,
freeze_at=2,
norm="FrozenBN",
out_features=["s1", "s2", "s3", "s4"],
)
model.pixel_std = [57.375, 57.120, 58.395]
optimizer.weight_decay = 5e-5
train.init_checkpoint = (
"https://dl.fbaipublicfiles.com/pycls/dds_baselines/160906838/RegNetY-4.0GF_dds_8gpu.pyth"
)
# RegNets benefit from enabling cudnn benchmark mode
train.cudnn_benchmark = True
|
{
"content_hash": "ff80355d55fd98f8bfde6f20ac62c790",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 162,
"avg_line_length": 35.02857142857143,
"alnum_prop": 0.7463295269168027,
"repo_name": "facebookresearch/detectron2",
"id": "72c6b7a5c8939970bd0e1e4a3c1155695943b19a",
"size": "1226",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "configs/COCO-InstanceSegmentation/mask_rcnn_regnety_4gf_dds_fpn_1x.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "79417"
},
{
"name": "CMake",
"bytes": "616"
},
{
"name": "Cuda",
"bytes": "112955"
},
{
"name": "Dockerfile",
"bytes": "3209"
},
{
"name": "Python",
"bytes": "3261609"
},
{
"name": "Shell",
"bytes": "14448"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('company', '0008_auto_20170211_2001'),
]
operations = [
migrations.AlterField(
model_name='job',
name='category',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='job', to='company.PlacementCategory'),
),
migrations.AlterField(
model_name='job',
name='ctc',
field=models.DecimalField(blank=True, decimal_places=2, max_digits=4, null=True),
),
migrations.AlterField(
model_name='job',
name='job_type',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='job', to='company.JobType'),
),
]
|
{
"content_hash": "a0e405a234606537768d31c50cdf250e",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 156,
"avg_line_length": 32.89655172413793,
"alnum_prop": 0.6132075471698113,
"repo_name": "aakashrana1995/svnit-tnp",
"id": "d3d06a3ae48f05eb3ef64ccb6530e5bdea3c3b70",
"size": "1027",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tnp/company/migrations/0009_auto_20170211_2017.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "45609"
},
{
"name": "HTML",
"bytes": "111453"
},
{
"name": "JavaScript",
"bytes": "68394"
},
{
"name": "Python",
"bytes": "112993"
}
],
"symlink_target": ""
}
|
"""
See https://zulip.readthedocs.io/en/latest/translating/internationalization.html
for background.
The contents of this file are taken from
https://github.com/niwinz/django-jinja/blob/master/django_jinja/management/commands/makemessages.py
Jinja2's i18n functionality is not exactly the same as Django's.
In particular, the tags names and their syntax are different:
1. The Django ``trans`` tag is replaced by a _() global.
2. The Django ``blocktrans`` tag is called ``trans``.
(1) isn't an issue, since the whole ``makemessages`` process is based on
converting the template tags to ``_()`` calls. However, (2) means that
those Jinja2 ``trans`` tags will not be picked up by Django's
``makemessages`` command.
There aren't any nice solutions here. While Jinja2's i18n extension does
come with extraction capabilities built in, the code behind ``makemessages``
unfortunately isn't extensible, so we can:
* Duplicate the command + code behind it.
* Offer a separate command for Jinja2 extraction.
* Try to get Django to offer hooks into makemessages().
* Monkey-patch.
We are currently doing that last thing. It turns out there we are lucky
for once: It's simply a matter of extending two regular expressions.
Credit for the approach goes to:
https://stackoverflow.com/questions/2090717
"""
import glob
import itertools
import json
import os
import re
from argparse import ArgumentParser
from typing import Any, Dict, Iterable, Iterator, List, Mapping
from django.core.management.commands import makemessages
from django.template.base import BLOCK_TAG_END, BLOCK_TAG_START
from django.utils.translation import template
strip_whitespace_right = re.compile(f"({BLOCK_TAG_START}-?\\s*(trans|pluralize).*?-{BLOCK_TAG_END})\\s+", re.U)
strip_whitespace_left = re.compile(f"\\s+({BLOCK_TAG_START}-\\s*(endtrans|pluralize).*?-?{BLOCK_TAG_END})", re.U)
regexes = [r'{{#tr .*?}}([\s\S]*?){{/tr}}', # '.' doesn't match '\n' by default
r'{{\s*t "(.*?)"\W*}}',
r"{{\s*t '(.*?)'\W*}}",
r'\(t "(.*?)"\)',
r'=\(t "(.*?)"\)(?=[^{]*}})',
r"=\(t '(.*?)'\)(?=[^{]*}})",
r"i18n\.t\('([^']*?)'\)",
r"i18n\.t\('(.*?)',\s*.*?[^,]\)",
r'i18n\.t\("([^"]*?)"\)',
r'i18n\.t\("(.*?)",\s*.*?[^,]\)',
]
tags = [('err_', "error"),
]
frontend_compiled_regexes = [re.compile(regex) for regex in regexes]
multiline_js_comment = re.compile(r"/\*.*?\*/", re.DOTALL)
singleline_js_comment = re.compile("//.*?\n")
def strip_whitespaces(src: str) -> str:
src = strip_whitespace_left.sub('\\1', src)
src = strip_whitespace_right.sub('\\1', src)
return src
class Command(makemessages.Command):
xgettext_options = makemessages.Command.xgettext_options
for func, tag in tags:
xgettext_options += [f'--keyword={func}:1,"{tag}"']
def add_arguments(self, parser: ArgumentParser) -> None:
super().add_arguments(parser)
parser.add_argument('--frontend-source',
default='static/templates',
help='Name of the Handlebars template directory')
parser.add_argument('--frontend-output',
default='locale',
help='Name of the frontend messages output directory')
parser.add_argument('--frontend-namespace',
default='translations.json',
help='Namespace of the frontend locale file')
def handle(self, *args: Any, **options: Any) -> None:
self.handle_django_locales(*args, **options)
self.handle_frontend_locales(**options)
def handle_frontend_locales(self, *,
frontend_source: str,
frontend_output: str,
frontend_namespace: str,
locale: List[str],
exclude: List[str],
all: bool,
**options: Any) -> None:
self.frontend_source = frontend_source
self.frontend_output = frontend_output
self.frontend_namespace = frontend_namespace
self.frontend_locale = locale
self.frontend_exclude = exclude
self.frontend_all = all
translation_strings = self.get_translation_strings()
self.write_translation_strings(translation_strings)
def handle_django_locales(self, *args: Any, **options: Any) -> None:
old_endblock_re = template.endblock_re
old_block_re = template.block_re
old_constant_re = template.constant_re
old_templatize = template.templatize
# Extend the regular expressions that are used to detect
# translation blocks with an "OR jinja-syntax" clause.
template.endblock_re = re.compile(
template.endblock_re.pattern + '|' + r"""^-?\s*endtrans\s*-?$""")
template.block_re = re.compile(
template.block_re.pattern + '|' + r"""^-?\s*trans(?:\s+(?!'|")(?=.*?=.*?)|\s*-?$)""")
template.plural_re = re.compile(
template.plural_re.pattern + '|' + r"""^-?\s*pluralize(?:\s+.+|-?$)""")
template.constant_re = re.compile(r"""_\(((?:".*?")|(?:'.*?')).*\)""")
def my_templatize(src: str, *args: Any, **kwargs: Any) -> str:
new_src = strip_whitespaces(src)
return old_templatize(new_src, *args, **kwargs)
template.templatize = my_templatize
try:
ignore_patterns = options.get('ignore_patterns', [])
ignore_patterns.append('docs/*')
ignore_patterns.append('var/*')
options['ignore_patterns'] = ignore_patterns
super().handle(*args, **options)
finally:
template.endblock_re = old_endblock_re
template.block_re = old_block_re
template.templatize = old_templatize
template.constant_re = old_constant_re
def extract_strings(self, data: str) -> List[str]:
translation_strings: List[str] = []
for regex in frontend_compiled_regexes:
for match in regex.findall(data):
match = match.strip()
match = ' '.join(line.strip() for line in match.splitlines())
match = match.replace('\n', '\\n')
translation_strings.append(match)
return translation_strings
def ignore_javascript_comments(self, data: str) -> str:
# Removes multi line comments.
data = multiline_js_comment.sub('', data)
# Removes single line (//) comments.
data = singleline_js_comment.sub('', data)
return data
def get_translation_strings(self) -> List[str]:
translation_strings: List[str] = []
dirname = self.get_template_dir()
for dirpath, dirnames, filenames in os.walk(dirname):
for filename in [f for f in filenames if f.endswith(".hbs")]:
if filename.startswith('.'):
continue
with open(os.path.join(dirpath, filename)) as reader:
data = reader.read()
translation_strings.extend(self.extract_strings(data))
for dirpath, dirnames, filenames in itertools.chain(os.walk("static/js"),
os.walk("static/shared/js")):
for filename in [f for f in filenames if f.endswith(".js") or f.endswith(".ts")]:
if filename.startswith('.'):
continue
with open(os.path.join(dirpath, filename)) as reader:
data = reader.read()
data = self.ignore_javascript_comments(data)
translation_strings.extend(self.extract_strings(data))
return list(set(translation_strings))
def get_template_dir(self) -> str:
return self.frontend_source
def get_namespace(self) -> str:
return self.frontend_namespace
def get_locales(self) -> Iterable[str]:
locale = self.frontend_locale
exclude = self.frontend_exclude
process_all = self.frontend_all
paths = glob.glob(f'{self.default_locale_path}/*')
all_locales = [os.path.basename(path) for path in paths if os.path.isdir(path)]
# Account for excluded locales
if process_all:
return all_locales
else:
locales = locale or all_locales
return set(locales) - set(exclude)
def get_base_path(self) -> str:
return self.frontend_output
def get_output_paths(self) -> Iterator[str]:
base_path = self.get_base_path()
locales = self.get_locales()
for path in [os.path.join(base_path, locale) for locale in locales]:
if not os.path.exists(path):
os.makedirs(path)
yield os.path.join(path, self.get_namespace())
def get_new_strings(self, old_strings: Mapping[str, str],
translation_strings: List[str], locale: str) -> Dict[str, str]:
"""
Missing strings are removed, new strings are added and already
translated strings are not touched.
"""
new_strings = {} # Dict[str, str]
for k in translation_strings:
k = k.replace('\\n', '\n')
if locale == 'en':
# For English language, translation is equal to the key.
new_strings[k] = old_strings.get(k, k)
else:
new_strings[k] = old_strings.get(k, "")
plurals = {k: v for k, v in old_strings.items() if k.endswith('_plural')}
for plural_key, value in plurals.items():
components = plural_key.split('_')
singular_key = '_'.join(components[:-1])
if singular_key in new_strings:
new_strings[plural_key] = value
return new_strings
def write_translation_strings(self, translation_strings: List[str]) -> None:
for locale, output_path in zip(self.get_locales(), self.get_output_paths()):
self.stdout.write(f"[frontend] processing locale {locale}")
try:
with open(output_path) as reader:
old_strings = json.load(reader)
except (OSError, ValueError):
old_strings = {}
new_strings = {
k: v
for k, v in self.get_new_strings(old_strings,
translation_strings,
locale).items()
}
with open(output_path, 'w') as writer:
json.dump(new_strings, writer, indent=2, sort_keys=True)
|
{
"content_hash": "d5ce5dbcaafbbdcf5b1fb8740a27b2cc",
"timestamp": "",
"source": "github",
"line_count": 260,
"max_line_length": 113,
"avg_line_length": 41.46153846153846,
"alnum_prop": 0.5687384044526902,
"repo_name": "showell/zulip",
"id": "fb0aea8d2287f7c7f6215626c6802b5a01919be8",
"size": "10780",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "zerver/management/commands/makemessages.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "433235"
},
{
"name": "Dockerfile",
"bytes": "2941"
},
{
"name": "Emacs Lisp",
"bytes": "157"
},
{
"name": "HTML",
"bytes": "634357"
},
{
"name": "Handlebars",
"bytes": "235334"
},
{
"name": "JavaScript",
"bytes": "3341135"
},
{
"name": "Perl",
"bytes": "8594"
},
{
"name": "Puppet",
"bytes": "79720"
},
{
"name": "Python",
"bytes": "8120030"
},
{
"name": "Ruby",
"bytes": "8480"
},
{
"name": "Shell",
"bytes": "133132"
},
{
"name": "TypeScript",
"bytes": "20603"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, print_function, with_statement
__author__ = "Microsoft Corporation <ptvshelp@microsoft.com>"
__version__ = "3.0.0.0"
import ctypes
import datetime
import os
import re
import struct
import sys
import traceback
from xml.dom import minidom
try:
from cStringIO import StringIO
BytesIO = StringIO
except ImportError:
from io import StringIO, BytesIO
try:
from thread import start_new_thread
except ImportError:
from _thread import start_new_thread
__version__ = '3.0.0'
if sys.version_info[0] == 3:
def to_str(value):
return value.decode(sys.getfilesystemencoding())
else:
def to_str(value):
return value.encode(sys.getfilesystemencoding())
# http://www.fastcgi.com/devkit/doc/fcgi-spec.html#S3
FCGI_VERSION_1 = 1
FCGI_HEADER_LEN = 8
FCGI_BEGIN_REQUEST = 1
FCGI_ABORT_REQUEST = 2
FCGI_END_REQUEST = 3
FCGI_PARAMS = 4
FCGI_STDIN = 5
FCGI_STDOUT = 6
FCGI_STDERR = 7
FCGI_DATA = 8
FCGI_GET_VALUES = 9
FCGI_GET_VALUES_RESULT = 10
FCGI_UNKNOWN_TYPE = 11
FCGI_MAXTYPE = FCGI_UNKNOWN_TYPE
FCGI_NULL_REQUEST_ID = 0
FCGI_KEEP_CONN = 1
FCGI_RESPONDER = 1
FCGI_AUTHORIZER = 2
FCGI_FILTER = 3
FCGI_REQUEST_COMPLETE = 0
FCGI_CANT_MPX_CONN = 1
FCGI_OVERLOADED = 2
FCGI_UNKNOWN_ROLE = 3
FCGI_MAX_CONNS = "FCGI_MAX_CONNS"
FCGI_MAX_REQS = "FCGI_MAX_REQS"
FCGI_MPXS_CONNS = "FCGI_MPXS_CONNS"
class FastCgiRecord(object):
"""Represents a FastCgiRecord. Encapulates the type, role, flags. Holds
onto the params which we will receive and update later."""
def __init__(self, type, req_id, role, flags):
self.type = type
self.req_id = req_id
self.role = role
self.flags = flags
self.params = {}
def __repr__(self):
return '<FastCgiRecord(%d, %d, %d, %d)>' % (self.type,
self.req_id,
self.role,
self.flags)
#typedef struct {
# unsigned char version;
# unsigned char type;
# unsigned char requestIdB1;
# unsigned char requestIdB0;
# unsigned char contentLengthB1;
# unsigned char contentLengthB0;
# unsigned char paddingLength;
# unsigned char reserved;
# unsigned char contentData[contentLength];
# unsigned char paddingData[paddingLength];
#} FCGI_Record;
class _ExitException(Exception):
pass
if sys.version_info[0] >= 3:
# indexing into byte strings gives us an int, so
# ord is unnecessary on Python 3
def ord(x):
return x
def chr(x):
return bytes((x, ))
def wsgi_decode(x):
return x.decode('iso-8859-1')
def wsgi_encode(x):
return x.encode('iso-8859-1')
def fs_encode(x):
return x
def exception_with_traceback(exc_value, exc_tb):
return exc_value.with_traceback(exc_tb)
zero_bytes = bytes
else:
# Replace the builtin open with one that supports an encoding parameter
from codecs import open
def wsgi_decode(x):
return x
def wsgi_encode(x):
return x
def fs_encode(x):
return x if isinstance(x, str) else x.encode(sys.getfilesystemencoding())
def exception_with_traceback(exc_value, exc_tb):
# x.with_traceback() is not supported on 2.x
return exc_value
bytes = str
def zero_bytes(length):
return '\x00' * length
def read_fastcgi_record(stream):
"""reads the main fast cgi record"""
data = stream.read(8) # read record
if not data:
# no more data, our other process must have died...
raise _ExitException()
fcgi_ver, reqtype, req_id, content_size, padding_len, _ = struct.unpack('>BBHHBB', data)
content = stream.read(content_size) # read content
stream.read(padding_len)
if fcgi_ver != FCGI_VERSION_1:
raise Exception('Unknown fastcgi version %s' % fcgi_ver)
processor = REQUEST_PROCESSORS.get(reqtype)
if processor is not None:
return processor(stream, req_id, content)
# unknown type requested, send response
log('Unknown request type %s' % reqtype)
send_response(stream, req_id, FCGI_UNKNOWN_TYPE, chr(reqtype) + zero_bytes(7))
return None
def read_fastcgi_begin_request(stream, req_id, content):
"""reads the begin request body and updates our _REQUESTS table to include
the new request"""
# typedef struct {
# unsigned char roleB1;
# unsigned char roleB0;
# unsigned char flags;
# unsigned char reserved[5];
# } FCGI_BeginRequestBody;
# TODO: Ignore request if it exists
res = FastCgiRecord(
FCGI_BEGIN_REQUEST,
req_id,
(ord(content[0]) << 8) | ord(content[1]), # role
ord(content[2]), # flags
)
_REQUESTS[req_id] = res
def read_encoded_int(content, offset):
i = struct.unpack_from('>B', content, offset)[0]
if i < 0x80:
return offset + 1, i
return offset + 4, struct.unpack_from('>I', content, offset)[0] & ~0x80000000
def read_fastcgi_keyvalue_pairs(content, offset):
"""Reads a FastCGI key/value pair stream"""
offset, name_len = read_encoded_int(content, offset)
offset, value_len = read_encoded_int(content, offset)
name = content[offset:(offset + name_len)]
offset += name_len
value = content[offset:(offset + value_len)]
offset += value_len
return offset, name, value
def get_encoded_int(i):
"""Writes the length of a single name for a key or value in a key/value
stream"""
if i <= 0x7f:
return struct.pack('>B', i)
elif i < 0x80000000:
return struct.pack('>I', i | 0x80000000)
else:
raise ValueError('cannot encode value %s (%x) because it is too large' % (i, i))
def write_fastcgi_keyvalue_pairs(pairs):
"""Creates a FastCGI key/value stream and returns it as a byte string"""
parts = []
for raw_key, raw_value in pairs.items():
key = wsgi_encode(raw_key)
value = wsgi_encode(raw_value)
parts.append(get_encoded_int(len(key)))
parts.append(get_encoded_int(len(value)))
parts.append(key)
parts.append(value)
return bytes().join(parts)
# Keys in this set will be stored in the record without modification but with a
# 'wsgi.' prefix. The original key will have the decoded version.
# (Following mod_wsgi from http://wsgi.readthedocs.org/en/latest/python3.html)
RAW_VALUE_NAMES = {
'SCRIPT_NAME' : 'wsgi.script_name',
'PATH_INFO' : 'wsgi.path_info',
'QUERY_STRING' : 'wsgi.query_string',
'HTTP_X_ORIGINAL_URL' : 'wfastcgi.http_x_original_url',
}
def read_fastcgi_params(stream, req_id, content):
if not content:
return None
offset = 0
res = _REQUESTS[req_id].params
while offset < len(content):
offset, name, value = read_fastcgi_keyvalue_pairs(content, offset)
name = wsgi_decode(name)
raw_name = RAW_VALUE_NAMES.get(name)
if raw_name:
res[raw_name] = value
res[name] = wsgi_decode(value)
def read_fastcgi_input(stream, req_id, content):
"""reads FastCGI std-in and stores it in wsgi.input passed in the
wsgi environment array"""
res = _REQUESTS[req_id].params
if 'wsgi.input' not in res:
res['wsgi.input'] = content
else:
res['wsgi.input'] += content
if not content:
# we've hit the end of the input stream, time to process input...
return _REQUESTS[req_id]
def read_fastcgi_data(stream, req_id, content):
"""reads FastCGI data stream and publishes it as wsgi.data"""
res = _REQUESTS[req_id].params
if 'wsgi.data' not in res:
res['wsgi.data'] = content
else:
res['wsgi.data'] += content
def read_fastcgi_abort_request(stream, req_id, content):
"""reads the wsgi abort request, which we ignore, we'll send the
finish execution request anyway..."""
pass
def read_fastcgi_get_values(stream, req_id, content):
"""reads the fastcgi request to get parameter values, and immediately
responds"""
offset = 0
request = {}
while offset < len(content):
offset, name, value = read_fastcgi_keyvalue_pairs(content, offset)
request[name] = value
response = {}
if FCGI_MAX_CONNS in request:
response[FCGI_MAX_CONNS] = '1'
if FCGI_MAX_REQS in request:
response[FCGI_MAX_REQS] = '1'
if FCGI_MPXS_CONNS in request:
response[FCGI_MPXS_CONNS] = '0'
send_response(
stream,
req_id,
FCGI_GET_VALUES_RESULT,
write_fastcgi_keyvalue_pairs(response)
)
# Our request processors for different FastCGI protocol requests. Only those
# requests that we receive are defined here.
REQUEST_PROCESSORS = {
FCGI_BEGIN_REQUEST : read_fastcgi_begin_request,
FCGI_ABORT_REQUEST : read_fastcgi_abort_request,
FCGI_PARAMS : read_fastcgi_params,
FCGI_STDIN : read_fastcgi_input,
FCGI_DATA : read_fastcgi_data,
FCGI_GET_VALUES : read_fastcgi_get_values
}
APPINSIGHT_CLIENT = None
def log(txt):
"""Logs messages to a log file if WSGI_LOG env var is defined."""
if APPINSIGHT_CLIENT:
try:
APPINSIGHT_CLIENT.track_event(txt)
except:
pass
log_file = os.environ.get('WSGI_LOG')
if log_file:
with open(log_file, 'a+', encoding='utf-8') as f:
txt = txt.replace('\r\n', '\n')
f.write('%s: %s%s' % (datetime.datetime.now(), txt, '' if txt.endswith('\n') else '\n'))
def maybe_log(txt):
"""Logs messages to a log file if WSGI_LOG env var is defined, and does not
raise exceptions if logging fails."""
try:
log(txt)
except:
pass
def send_response(stream, req_id, resp_type, content, streaming=True):
"""sends a response w/ the given id, type, and content to the server.
If the content is streaming then an empty record is sent at the end to
terminate the stream"""
if not isinstance(content, bytes):
raise TypeError("content must be encoded before sending: %r" % content)
offset = 0
while True:
len_remaining = max(min(len(content) - offset, 0xFFFF), 0)
data = struct.pack(
'>BBHHBB',
FCGI_VERSION_1, # version
resp_type, # type
req_id, # requestIdB1:B0
len_remaining, # contentLengthB1:B0
0, # paddingLength
0, # reserved
) + content[offset:(offset + len_remaining)]
offset += len_remaining
os.write(stream.fileno(), data)
if len_remaining == 0 or not streaming:
break
stream.flush()
def get_environment(dir):
web_config = os.path.join(dir, 'Web.config')
if not os.path.exists(web_config):
return {}
d = {}
doc = minidom.parse(web_config)
config = doc.getElementsByTagName('configuration')
for configSection in config:
appSettings = configSection.getElementsByTagName('appSettings')
for appSettingsSection in appSettings:
values = appSettingsSection.getElementsByTagName('add')
for curAdd in values:
key = curAdd.getAttribute('key')
value = curAdd.getAttribute('value')
if key and value is not None:
d[key.strip()] = value
return d
ReadDirectoryChangesW = ctypes.windll.kernel32.ReadDirectoryChangesW
ReadDirectoryChangesW.restype = ctypes.c_uint32
ReadDirectoryChangesW.argtypes = [
ctypes.c_void_p, # HANDLE hDirectory
ctypes.c_void_p, # LPVOID lpBuffer
ctypes.c_uint32, # DWORD nBufferLength
ctypes.c_uint32, # BOOL bWatchSubtree
ctypes.c_uint32, # DWORD dwNotifyFilter
ctypes.POINTER(ctypes.c_uint32), # LPDWORD lpBytesReturned
ctypes.c_void_p, # LPOVERLAPPED lpOverlapped
ctypes.c_void_p # LPOVERLAPPED_COMPLETION_ROUTINE lpCompletionRoutine
]
try:
from _winapi import (CreateFile, CloseHandle, GetLastError, ExitProcess,
WaitForSingleObject, INFINITE, OPEN_EXISTING)
except ImportError:
CreateFile = ctypes.windll.kernel32.CreateFileW
CreateFile.restype = ctypes.c_void_p
CreateFile.argtypes = [
ctypes.c_wchar_p, # lpFilename
ctypes.c_uint32, # dwDesiredAccess
ctypes.c_uint32, # dwShareMode
ctypes.c_void_p, # LPSECURITY_ATTRIBUTES,
ctypes.c_uint32, # dwCreationDisposition,
ctypes.c_uint32, # dwFlagsAndAttributes,
ctypes.c_void_p # hTemplateFile
]
CloseHandle = ctypes.windll.kernel32.CloseHandle
CloseHandle.argtypes = [ctypes.c_void_p]
GetLastError = ctypes.windll.kernel32.GetLastError
GetLastError.restype = ctypes.c_uint32
ExitProcess = ctypes.windll.kernel32.ExitProcess
ExitProcess.restype = ctypes.c_void_p
ExitProcess.argtypes = [ctypes.c_uint32]
WaitForSingleObject = ctypes.windll.kernel32.WaitForSingleObject
WaitForSingleObject.argtypes = [ctypes.c_void_p, ctypes.c_uint32]
WaitForSingleObject.restype = ctypes.c_uint32
OPEN_EXISTING = 3
INFINITE = -1
FILE_LIST_DIRECTORY = 1
FILE_SHARE_READ = 0x00000001
FILE_SHARE_WRITE = 0x00000002
FILE_SHARE_DELETE = 0x00000004
FILE_FLAG_BACKUP_SEMANTICS = 0x02000000
MAX_PATH = 260
FILE_NOTIFY_CHANGE_LAST_WRITE = 0x10
ERROR_NOTIFY_ENUM_DIR = 1022
INVALID_HANDLE_VALUE = 0xFFFFFFFF
class FILE_NOTIFY_INFORMATION(ctypes.Structure):
_fields_ = [('NextEntryOffset', ctypes.c_uint32),
('Action', ctypes.c_uint32),
('FileNameLength', ctypes.c_uint32),
('Filename', ctypes.c_wchar)]
_ON_EXIT_TASKS = None
def run_exit_tasks():
global _ON_EXIT_TASKS
maybe_log("Running on_exit tasks")
while _ON_EXIT_TASKS:
tasks, _ON_EXIT_TASKS = _ON_EXIT_TASKS, []
for t in tasks:
try:
t()
except Exception:
maybe_log("Error in exit task: " + traceback.format_exc())
def on_exit(task):
global _ON_EXIT_TASKS
if _ON_EXIT_TASKS is None:
_ON_EXIT_TASKS = tasks = []
try:
evt = int(os.getenv('_FCGI_SHUTDOWN_EVENT_'))
except (TypeError, ValueError):
maybe_log("Could not wait on event %s" % os.getenv('_FCGI_SHUTDOWN_EVENT_'))
else:
def _wait_for_exit():
WaitForSingleObject(evt, INFINITE)
run_exit_tasks()
ExitProcess(0)
start_new_thread(_wait_for_exit, ())
_ON_EXIT_TASKS.append(task)
def start_file_watcher(path, restart_regex):
if restart_regex is None:
restart_regex = ".*((\\.py)|(\\.config))$"
elif not restart_regex:
# restart regex set to empty string, no restart behavior
return
def enum_changes(path):
"""Returns a generator that blocks until a change occurs, then yields
the filename of the changed file.
Yields an empty string and stops if the buffer overruns, indicating that
too many files were changed."""
buffer = ctypes.create_string_buffer(32 * 1024)
bytes_ret = ctypes.c_uint32()
try:
the_dir = CreateFile(
path,
FILE_LIST_DIRECTORY,
FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE,
0,
OPEN_EXISTING,
FILE_FLAG_BACKUP_SEMANTICS,
0,
)
except OSError:
maybe_log("Unable to create watcher")
return
if not the_dir or the_dir == INVALID_HANDLE_VALUE:
maybe_log("Unable to create watcher")
return
while True:
ret_code = ReadDirectoryChangesW(
the_dir,
buffer,
ctypes.sizeof(buffer),
True,
FILE_NOTIFY_CHANGE_LAST_WRITE,
ctypes.byref(bytes_ret),
None,
None,
)
if ret_code:
cur_pointer = ctypes.addressof(buffer)
while True:
fni = ctypes.cast(cur_pointer, ctypes.POINTER(FILE_NOTIFY_INFORMATION))
# FileName is not null-terminated, so specifying length is mandatory.
filename = ctypes.wstring_at(cur_pointer + 12, fni.contents.FileNameLength // 2)
yield filename
if fni.contents.NextEntryOffset == 0:
break
cur_pointer = cur_pointer + fni.contents.NextEntryOffset
elif GetLastError() == ERROR_NOTIFY_ENUM_DIR:
CloseHandle(the_dir)
yield ''
return
else:
CloseHandle(the_dir)
return
log('wfastcgi.py will restart when files in %s are changed: %s' % (path, restart_regex))
def watcher(path, restart):
for filename in enum_changes(path):
if not filename:
log('wfastcgi.py exiting because the buffer was full')
run_exit_tasks()
ExitProcess(0)
elif restart.match(filename):
log('wfastcgi.py exiting because %s has changed, matching %s' % (filename, restart_regex))
# we call ExitProcess directly to quickly shutdown the whole process
# because sys.exit(0) won't have an effect on the main thread.
run_exit_tasks()
ExitProcess(0)
restart = re.compile(restart_regex)
start_new_thread(watcher, (path, restart))
def get_wsgi_handler(handler_name):
if not handler_name:
raise Exception('WSGI_HANDLER env var must be set')
if not isinstance(handler_name, str):
handler_name = to_str(handler_name)
module_name, _, callable_name = handler_name.rpartition('.')
should_call = callable_name.endswith('()')
callable_name = callable_name[:-2] if should_call else callable_name
name_list = [(callable_name, should_call)]
handler = None
last_tb = ''
while module_name:
try:
handler = __import__(module_name, fromlist=[name_list[0][0]])
last_tb = ''
for name, should_call in name_list:
handler = getattr(handler, name)
if should_call:
handler = handler()
break
except ImportError:
module_name, _, callable_name = module_name.rpartition('.')
should_call = callable_name.endswith('()')
callable_name = callable_name[:-2] if should_call else callable_name
name_list.insert(0, (callable_name, should_call))
handler = None
last_tb = ': ' + traceback.format_exc()
if handler is None:
raise ValueError('"%s" could not be imported%s' % (handler_name, last_tb))
return handler
def read_wsgi_handler(physical_path):
global APPINSIGHT_CLIENT
env = get_environment(physical_path)
os.environ.update(env)
for path in (v for k, v in env.items() if k.lower() == 'pythonpath'):
# Expand environment variables manually.
expanded_path = re.sub(
'%(\\w+?)%',
lambda m: os.getenv(m.group(1), ''),
path
)
sys.path.extend(fs_encode(p) for p in expanded_path.split(';') if p)
handler = get_wsgi_handler(os.getenv("WSGI_HANDLER"))
instr_key = os.getenv("APPINSIGHTS_INSTRUMENTATIONKEY")
if instr_key:
try:
# Attempt the import after updating sys.path - sites must
# include applicationinsights themselves.
from applicationinsights.requests import WSGIApplication
except ImportError:
maybe_log("Failed to import applicationinsights: " + traceback.format_exc())
else:
handler = WSGIApplication(instr_key, handler)
APPINSIGHT_CLIENT = handler.client
# Ensure we will flush any remaining events when we exit
on_exit(handler.client.flush)
return env, handler
class handle_response(object):
"""A context manager for handling the response. This will ensure that
exceptions in the handler are correctly reported, and the FastCGI request is
properly terminated.
"""
def __init__(self, stream, record, get_output, get_errors):
self.stream = stream
self.record = record
self._get_output = get_output
self._get_errors = get_errors
self.error_message = ''
self.fatal_errors = False
self.physical_path = ''
self.header_bytes = None
self.sent_headers = False
def __enter__(self):
record = self.record
record.params['wsgi.input'] = BytesIO(record.params['wsgi.input'])
record.params['wsgi.version'] = (1, 0)
record.params['wsgi.url_scheme'] = 'https' if record.params.get('HTTPS', '').lower() == 'on' else 'http'
record.params['wsgi.multiprocess'] = True
record.params['wsgi.multithread'] = False
record.params['wsgi.run_once'] = False
self.physical_path = record.params.get('APPL_PHYSICAL_PATH', os.path.dirname(__file__))
if 'HTTP_X_ORIGINAL_URL' in record.params:
# We've been re-written for shared FastCGI hosting, so send the
# original URL as PATH_INFO.
record.params['PATH_INFO'] = record.params['HTTP_X_ORIGINAL_URL']
record.params['wsgi.path_info'] = record.params['wfastcgi.http_x_original_url']
# PATH_INFO is not supposed to include the query parameters, so remove them
record.params['PATH_INFO'] = record.params['PATH_INFO'].partition('?')[0]
record.params['wsgi.path_info'] = record.params['wsgi.path_info'].partition(wsgi_encode('?'))[0]
return self
def __exit__(self, exc_type, exc_value, exc_tb):
# Send any error message on FCGI_STDERR.
if exc_type and exc_type is not _ExitException:
error_msg = "%s:\n\n%s\n\nStdOut: %s\n\nStdErr: %s" % (
self.error_message or 'Error occurred',
''.join(traceback.format_exception(exc_type, exc_value, exc_tb)),
self._get_output(),
self._get_errors(),
)
if not self.header_bytes or not self.sent_headers:
self.header_bytes = wsgi_encode('Status: 500 Internal Server Error\r\n')
self.send(FCGI_STDERR, wsgi_encode(error_msg))
# Best effort at writing to the log. It's more important to
# finish the response or the user will only see a generic 500
# error.
maybe_log(error_msg)
# End the request. This has to run in both success and failure cases.
self.send(FCGI_END_REQUEST, zero_bytes(8), streaming=False)
# Remove the request from our global dict
del _REQUESTS[self.record.req_id]
# Suppress all exceptions unless requested
return not self.fatal_errors
@staticmethod
def _decode_header(key, value):
if not isinstance(key, str):
key = wsgi_decode(key)
if not isinstance(value, str):
value = wsgi_decode(value)
return key, value
def start(self, status, headers, exc_info=None):
"""Starts sending the response. The response is ended when the context
manager exits."""
if exc_info:
try:
if self.sent_headers:
# We have to re-raise if we've already started sending data.
raise exception_with_traceback(exc_info[1], exc_info[2])
finally:
exc_info = None
elif self.header_bytes:
raise Exception('start_response has already been called')
if not isinstance(status, str):
status = wsgi_decode(status)
header_text = 'Status: %s\r\n' % status
if headers:
header_text += ''.join('%s: %s\r\n' % handle_response._decode_header(*i) for i in headers)
self.header_bytes = wsgi_encode(header_text + '\r\n')
return lambda content: self.send(FCGI_STDOUT, content)
def send(self, resp_type, content, streaming=True):
'''Sends part of the response.'''
if not self.sent_headers:
if not self.header_bytes:
raise Exception("start_response has not yet been called")
self.sent_headers = True
send_response(self.stream, self.record.req_id, FCGI_STDOUT, self.header_bytes)
self.header_bytes = None
return send_response(self.stream, self.record.req_id, resp_type, content, streaming)
_REQUESTS = {}
def main():
initialized = False
log('wfastcgi.py %s started' % __version__)
log('Python version: %s' % sys.version)
try:
fcgi_stream = sys.stdin.detach() if sys.version_info[0] >= 3 else sys.stdin
try:
import msvcrt
msvcrt.setmode(fcgi_stream.fileno(), os.O_BINARY)
except ImportError:
pass
while True:
record = read_fastcgi_record(fcgi_stream)
if not record:
continue
errors = sys.stderr = sys.__stderr__ = record.params['wsgi.errors'] = StringIO()
output = sys.stdout = sys.__stdout__ = StringIO()
with handle_response(fcgi_stream, record, output.getvalue, errors.getvalue) as response:
if not initialized:
log('wfastcgi.py %s initializing' % __version__)
os.chdir(response.physical_path)
sys.path[0] = '.'
# Initialization errors should be treated as fatal.
response.fatal_errors = True
response.error_message = 'Error occurred while reading WSGI handler'
env, handler = read_wsgi_handler(response.physical_path)
response.error_message = 'Error occurred starting file watcher'
start_file_watcher(response.physical_path, env.get('WSGI_RESTART_FILE_REGEX'))
response.error_message = ''
response.fatal_errors = False
log('wfastcgi.py %s initialized' % __version__)
initialized = True
os.environ.update(env)
# SCRIPT_NAME + PATH_INFO is supposed to be the full path
# (http://www.python.org/dev/peps/pep-0333/) but by default
# (http://msdn.microsoft.com/en-us/library/ms525840(v=vs.90).aspx)
# IIS is sending us the full URL in PATH_INFO, so we need to
# clear the script name here
if 'AllowPathInfoForScriptMappings' not in os.environ:
record.params['SCRIPT_NAME'] = ''
record.params['wsgi.script_name'] = wsgi_encode('')
# correct SCRIPT_NAME and PATH_INFO if we are told what our SCRIPT_NAME should be
if 'SCRIPT_NAME' in os.environ and record.params['PATH_INFO'].lower().startswith(os.environ['SCRIPT_NAME'].lower()):
record.params['SCRIPT_NAME'] = os.environ['SCRIPT_NAME']
record.params['PATH_INFO'] = record.params['PATH_INFO'][len(record.params['SCRIPT_NAME']):]
record.params['wsgi.script_name'] = wsgi_encode(record.params['SCRIPT_NAME'])
record.params['wsgi.path_info'] = wsgi_encode(record.params['PATH_INFO'])
# Send each part of the response to FCGI_STDOUT.
# Exceptions raised in the handler will be logged by the context
# manager and we will then wait for the next record.
result = handler(record.params, response.start)
try:
for part in result:
if part:
response.send(FCGI_STDOUT, part)
finally:
if hasattr(result, 'close'):
result.close()
except _ExitException:
pass
except Exception:
maybe_log('Unhandled exception in wfastcgi.py: ' + traceback.format_exc())
except BaseException:
maybe_log('Unhandled exception in wfastcgi.py: ' + traceback.format_exc())
raise
finally:
run_exit_tasks()
maybe_log('wfastcgi.py %s closed' % __version__)
def _run_appcmd(args):
from subprocess import check_call, CalledProcessError
if len(sys.argv) > 1 and os.path.isfile(sys.argv[1]):
appcmd = sys.argv[1:]
else:
appcmd = [os.path.join(os.getenv('SystemRoot'), 'system32', 'inetsrv', 'appcmd.exe')]
if not os.path.isfile(appcmd[0]):
print('IIS configuration tool appcmd.exe was not found at', appcmd, file=sys.stderr)
return -1
args = appcmd + args
try:
return check_call(args)
except CalledProcessError as ex:
print('''An error occurred running the command:
%r
Ensure your user has sufficient privileges and try again.''' % args, file=sys.stderr)
return ex.returncode
def enable():
res = _run_appcmd([
"set", "config", "/section:system.webServer/fastCGI",
"/+[fullPath='" + sys.executable + "', arguments='" + __file__ + "', signalBeforeTerminateSeconds='30']"
])
if res == 0:
print('"%s|%s" can now be used as a FastCGI script processor' % (sys.executable, __file__))
return res
def disable():
res = _run_appcmd([
"set", "config", "/section:system.webServer/fastCGI",
"/-[fullPath='" + sys.executable + "', arguments='" + __file__ + "', signalBeforeTerminateSeconds='30']"
])
if res == 0:
print('"%s|%s" is no longer registered for use with FastCGI' % (sys.executable, __file__))
return res
if __name__ == '__main__':
main()
|
{
"content_hash": "9bc4ab34094161d638846b7d92653e57",
"timestamp": "",
"source": "github",
"line_count": 874,
"max_line_length": 132,
"avg_line_length": 34.62929061784897,
"alnum_prop": 0.5951562809753519,
"repo_name": "DinoV/PTVS",
"id": "ac537c99e4bc6699b094f195f717ed2b91f784b6",
"size": "30973",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/Product/WFastCgi/wfastcgi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ASP",
"bytes": "109"
},
{
"name": "Batchfile",
"bytes": "4035"
},
{
"name": "C",
"bytes": "4974"
},
{
"name": "C#",
"bytes": "13192050"
},
{
"name": "C++",
"bytes": "187194"
},
{
"name": "CSS",
"bytes": "7024"
},
{
"name": "HTML",
"bytes": "45289"
},
{
"name": "JavaScript",
"bytes": "85712"
},
{
"name": "Objective-C",
"bytes": "4201"
},
{
"name": "PowerShell",
"bytes": "135280"
},
{
"name": "Python",
"bytes": "943244"
},
{
"name": "Smarty",
"bytes": "8356"
},
{
"name": "Tcl",
"bytes": "24968"
}
],
"symlink_target": ""
}
|
from spaceconfig import *
from mesh import *
from operator import *
from oplib import *
from generator import *
#__all__=['mesh','spaceconfig','operator','oplib','generator']
|
{
"content_hash": "db2a1c6861e6de07ccdf6c7d2522d11b",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 62,
"avg_line_length": 29.166666666666668,
"alnum_prop": 0.7257142857142858,
"repo_name": "Lynn-015/NJU_DMRG",
"id": "f6c7b6e199a36511c6b1c255894ff0d38643adba",
"size": "175",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "giggleliu/tba/hgen/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "5106"
},
{
"name": "Makefile",
"bytes": "13010"
},
{
"name": "Python",
"bytes": "314933"
}
],
"symlink_target": ""
}
|
import os
import sys
from pkg_resources import get_distribution
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'friend'
copyright = u'2017, Joseph Wright'
author = u'Joseph Wright'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = get_distribution('friend').version
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'frienddoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'friend.tex', u'Friend Documentation',
u'Joseph Wright', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'friend', u'Friend Documentation', [author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'friend', u'Friend Documentation', author,
'friend', 'A Python utility library.', 'Miscellaneous'),
]
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
|
{
"content_hash": "925a362e1a5dd0c60a21e5652b399983",
"timestamp": "",
"source": "github",
"line_count": 165,
"max_line_length": 78,
"avg_line_length": 29.266666666666666,
"alnum_prop": 0.6655622282045972,
"repo_name": "cloudboss/friend",
"id": "803e2788f54dae819dfae52e888beca9195ebb77",
"size": "5488",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/source/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "44795"
}
],
"symlink_target": ""
}
|
"""
AppShell provides a GUI application framework.
This is a streamlined adaptation of GuiAppD.py, originally
created by Doug Hellmann (doughellmann@mindspring.com).
"""
from Tkinter import *
import Pmw
import sys, string
import ProgressBar
class AppShell(Pmw.MegaWidget):
appversion = '1.0'
appname = 'Generic Application Frame'
copyright = 'ANL-APS-AOD-BCDA, All Rights Reserved.'
contactname = 'Ben-chin K Cha'
contactphone = '(630) 252-8653'
contactemail = 'cha@aps.anl.gov'
frameWidth = 450
frameHeight = 320
padx = 5
pady = 5
usecommandarea = 0
balloonhelp = 1
busyCursor = 'watch'
def __init__(self, **kw):
optiondefs = (
('padx', 1, Pmw.INITOPT),
('pady', 1, Pmw.INITOPT),
('framewidth', 1, Pmw.INITOPT),
('frameheight', 1, Pmw.INITOPT),
('usecommandarea', self.usecommandarea, Pmw.INITOPT))
self.defineoptions(kw, optiondefs)
self.root = Tk()
self.initializeTk(self.root)
Pmw.initialise(self.root)
self.root.title(self.appname)
self.root.geometry('%dx%d' % (self.frameWidth, self.frameHeight))
# Initialize the base class
Pmw.MegaWidget.__init__(self, parent=self.root)
# initialize the application
self.appInit()
# create the interface
self.__createInterface()
# create a table to hold the cursors for
# widgets which get changed when we go busy
self.preBusyCursors = None
# pack the container and set focus
# to ourselves
self._hull.pack(side=TOP, fill=BOTH, expand=YES)
self.focus_set()
# initialize our options
self.initialiseoptions(AppShell)
def appInit(self):
# Called before interface is created (should be overridden).
pass
def initializeTk(self, root):
# Initialize platform-specific options
if sys.platform == 'mac':
self.__initializeTk_mac(root)
elif sys.platform == 'win32':
self.__initializeTk_win32(root)
else:
self.__initializeTk_unix(root)
def __initializeTk_colors_common(self, root):
root.option_add('*background', 'grey')
root.option_add('*foreground', 'black')
root.option_add('*EntryField.Entry.background', 'white')
root.option_add('*Entry.background', 'white')
root.option_add('*MessageBar.Entry.background', 'gray85')
root.option_add('*Listbox*background', 'white')
root.option_add('*Listbox*selectBackground', 'dark slate blue')
root.option_add('*Listbox*selectForeground', 'white')
def __initializeTk_win32(self, root):
self.__initializeTk_colors_common(root)
root.option_add('*Font', 'Verdana 10 bold')
root.option_add('*EntryField.Entry.Font', 'Courier 10')
root.option_add('*Listbox*Font', 'Courier 10')
def __initializeTk_mac(self, root):
self.__initializeTk_colors_common(root)
def __initializeTk_unix(self, root):
self.__initializeTk_colors_common(root)
def busyStart(self, newcursor=None):
if not newcursor:
newcursor = self.busyCursor
newPreBusyCursors = {}
for component in self.busyWidgets:
newPreBusyCursors[component] = component['cursor']
component.configure(cursor=newcursor)
component.update_idletasks()
self.preBusyCursors = (newPreBusyCursors, self.preBusyCursors)
def busyEnd(self):
if not self.preBusyCursors:
return
oldPreBusyCursors = self.preBusyCursors[0]
self.preBusyCursors = self.preBusyCursors[1]
for component in self.busyWidgets:
try:
component.configure(cursor=oldPreBusyCursors[component])
except KeyError:
pass
component.update_idletasks()
def __createAboutBox(self):
Pmw.aboutversion(self.appversion)
Pmw.aboutcopyright(self.copyright)
Pmw.aboutcontact(
'For more information, contact:\n %s\n Phone: %s\n Email: %s' %\
(self.contactname, self.contactphone,
self.contactemail))
self.about = Pmw.AboutDialog(self._hull,
applicationname=self.appname)
self.about.withdraw()
return None
def showAbout(self):
# Create the dialog to display about and contact information.
self.about.show()
self.about.focus_set()
def toggleBalloon(self):
if self.toggleBalloonVar.get():
self.__balloon.configure(state = 'both')
else:
self.__balloon.configure(state = 'status')
def __createMenuBar(self):
self.menuBar = self.createcomponent('menubar', (), None,
Pmw.MenuBar,
(self._hull,),
hull_relief=RAISED,
hull_borderwidth=1,
balloon=self.balloon())
self.menuBar.pack(fill=X)
self.menuBar.addmenu('Help', 'About %s' % self.appname, side='right')
self.menuBar.addmenu('File', 'File commands and Quit')
self.menuBar.addmenu('Setup', 'Setup legends for multiline plot ')
def createMenuBar(self):
self.menuBar.addmenuitem('Help', 'command',
'Get information on application',
label='About...', command=self.showAbout)
self.toggleBalloonVar = IntVar()
self.toggleBalloonVar.set(1)
self.menuBar.addmenuitem('Help', 'checkbutton',
'Toggle balloon help',
label='Balloon help',
variable = self.toggleBalloonVar,
command=self.toggleBalloon)
# self.menuBar.addmenuitem('File', 'command', 'Quit this application',
# label='Quit',
# command=self.quit)
def __createBalloon(self):
# Create the balloon help manager for the frame.
# Create the manager for the balloon help
self.__balloon = self.createcomponent('balloon', (), None,
Pmw.Balloon, (self._hull,))
def balloon(self):
return self.__balloon
def __createDataArea(self):
# Create data area where data entry widgets are placed.
self.dataArea = self.createcomponent('dataarea',
(), None,
Frame, (self._hull,),
relief=GROOVE,
bd=1)
self.dataArea.pack(side=TOP, fill=BOTH, expand=YES,
padx=self['padx'], pady=self['pady'])
def __createCommandArea(self):
# Create a command area for application-wide buttons.
self.__commandFrame = self.createcomponent('commandframe', (), None,
Frame,
(self._hull,),
relief=SUNKEN,
bd=1)
self.__buttonBox = self.createcomponent('buttonbox', (), None,
Pmw.ButtonBox,
(self.__commandFrame,),
padx=0, pady=0)
self.__buttonBox.pack(side=TOP, expand=NO, fill=X)
if self['usecommandarea']:
self.__commandFrame.pack(side=TOP,
expand=NO,
fill=X,
padx=self['padx'],
pady=self['pady'])
def __createMessageBar(self):
# Create the message bar area for help and status messages.
frame = self.createcomponent('bottomtray', (), None,
Frame,(self._hull,), relief=SUNKEN)
self.__messageBar = self.createcomponent('messagebar',
(), None,
Pmw.MessageBar,
(frame,),
#entry_width = 40,
entry_relief=SUNKEN,
entry_bd=1,
labelpos=None)
self.__messageBar.pack(side=LEFT, expand=YES, fill=X)
self.__progressBar = ProgressBar.ProgressBar(frame,
fillColor='slateblue',
doLabel=1,
width=150)
self.__progressBar.frame.pack(side=LEFT, expand=NO, fill=NONE)
self.updateProgress(0)
frame.pack(side=BOTTOM, expand=NO, fill=X)
self.__balloon.configure(statuscommand = \
self.__messageBar.helpmessage)
def messageBar(self):
return self.__messageBar
def updateProgress(self, newValue=0, newMax=0):
self.__progressBar.updateProgress(newValue, newMax)
def bind(self, child, balloonHelpMsg, statusHelpMsg=None):
# Bind a help message and/or status message to a widget.
self.__balloon.bind(child, balloonHelpMsg, statusHelpMsg)
def interior(self):
# Retrieve the interior site where widgets should go.
return self.dataArea
def buttonBox(self):
# Retrieve the button box.
return self.__buttonBox
def buttonAdd(self, buttonName, helpMessage=None,
statusMessage=None, **kw):
# Add a button to the button box.
newBtn = self.__buttonBox.add(buttonName)
newBtn.configure(kw)
if helpMessage:
self.bind(newBtn, helpMessage, statusMessage)
return newBtn
def __createInterface(self):
self.__createBalloon()
self.__createMenuBar()
self.__createDataArea()
self.__createCommandArea()
self.__createMessageBar()
self.__createAboutBox()
#
# Create the parts of the interface
# which can be modified by subclasses
#
self.busyWidgets = ( self.root, )
self.createMenuBar()
self.createInterface()
def createInterface(self):
# Override this method to create the interface for the app.
pass
def main(self):
# This method should be left intact!
self.pack()
self.mainloop()
def run(self):
self.main()
class TestAppShell(AppShell):
usecommandarea=1
def createButtons(self):
self.buttonAdd('Ok',
helpMessage='Exit',
statusMessage='Exit',
command=self.quit)
def createMain(self):
self.label = self.createcomponent('label', (), None,
Label,
(self.interior(),),
text='Data Area')
self.label.pack()
self.bind(self.label, 'Space taker')
def createInterface(self):
AppShell.createInterface(self)
self.createButtons()
self.createMain()
if __name__ == '__main__':
test = TestAppShell(balloon_state='both')
test.run()
|
{
"content_hash": "de26805a674ab7aca965a36447de7c01",
"timestamp": "",
"source": "github",
"line_count": 319,
"max_line_length": 77,
"avg_line_length": 38.6833855799373,
"alnum_prop": 0.5,
"repo_name": "sniemi/SamPy",
"id": "db2d937465706d294a8bac8013fa9623e452d6c1",
"size": "12360",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sandbox/src1/pviewer/AppShell.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "296"
},
{
"name": "C",
"bytes": "68436"
},
{
"name": "C++",
"bytes": "45956"
},
{
"name": "CSS",
"bytes": "35570"
},
{
"name": "Fortran",
"bytes": "45191"
},
{
"name": "HTML",
"bytes": "107435"
},
{
"name": "IDL",
"bytes": "13651"
},
{
"name": "JavaScript",
"bytes": "25435"
},
{
"name": "Makefile",
"bytes": "26035"
},
{
"name": "Matlab",
"bytes": "1508"
},
{
"name": "Perl",
"bytes": "59198"
},
{
"name": "PostScript",
"bytes": "1403536"
},
{
"name": "Prolog",
"bytes": "16061"
},
{
"name": "Python",
"bytes": "5763358"
},
{
"name": "R",
"bytes": "208346"
},
{
"name": "Rebol",
"bytes": "161"
},
{
"name": "Roff",
"bytes": "73616"
},
{
"name": "Ruby",
"bytes": "2032"
},
{
"name": "Shell",
"bytes": "41512"
},
{
"name": "Tcl",
"bytes": "44150"
},
{
"name": "TeX",
"bytes": "107783"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Static()
result.template = "object/static/item/shared_item_substance_analyzer.iff"
result.attribute_template_id = -1
result.stfName("obj_n","unknown_object")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "969060a7603a9411fe2da85e8447f06b",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 74,
"avg_line_length": 23.307692307692307,
"alnum_prop": 0.693069306930693,
"repo_name": "anhstudios/swganh",
"id": "aa763ead94fa17dff91d3cd22e10acbd48f34c7b",
"size": "448",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/static/item/shared_item_substance_analyzer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
}
|
import re
import sys
from pathlib import Path
from rich.console import Console
if __name__ not in ("__main__", "__mp_main__"):
raise SystemExit(
"This file is intended to be executed as an executable program. You cannot use it as a module."
f"To run this script, run the ./{__file__} command [FILE] ..."
)
console = Console(color_system="standard", width=200)
def _check_file(file: Path) -> list:
content = file.read_text()
return re.findall(r' +\:type .+?\:', content)
def _join_with_newline(list_):
return '\n'.join(list_)
if __name__ == '__main__':
error_list = []
for file in sys.argv[1:]:
matches = _check_file(Path(file))
if matches:
error_list.append((file, matches))
if error_list:
error_message = '\n'.join([f"{f}: \n{_join_with_newline(m)}" for f, m in error_list])
console.print(
f"""
[red]Found files with types specified in docstring.
This is no longer needed since sphinx can now infer types from type annotations.[/]
{error_message}
"""
)
sys.exit(1)
|
{
"content_hash": "d47786c5dca8e27bf980792ae08f9072",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 103,
"avg_line_length": 26.73170731707317,
"alnum_prop": 0.6021897810218978,
"repo_name": "bolkedebruin/airflow",
"id": "38f7d4081c2d6178308c36cf14786fc273862dd2",
"size": "1904",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scripts/ci/pre_commit/pre_commit_docstring_param_type.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25286"
},
{
"name": "Dockerfile",
"bytes": "40459"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "157840"
},
{
"name": "JavaScript",
"bytes": "167972"
},
{
"name": "Jinja",
"bytes": "33382"
},
{
"name": "Jupyter Notebook",
"bytes": "2933"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "19287942"
},
{
"name": "Shell",
"bytes": "645244"
},
{
"name": "TypeScript",
"bytes": "173854"
}
],
"symlink_target": ""
}
|
"""
tipfy.appengine.db
~~~~~~~~~~~~~~~~~~
Datastore utilities extension.
:copyright: 2011 by tipfy.org.
:license: BSD, see LICENSE.txt for more details.
"""
import logging
import time
from google.appengine.api import datastore_errors
from google.appengine.api.namespace_manager import namespace_manager
from google.appengine.ext import db
from werkzeug import abort
def get_protobuf_from_entity(entities):
"""Converts one or more ``db.Model`` instances to encoded Protocol Buffers.
This is useful to store entities in memcache, and preferable than storing
the entities directly as it has slightly better performance and avoids
crashes when unpickling (when, for example, the entity class is moved to a
different module).
Cached protobufs can be de-serialized using
:func:`get_entity_from_protobuf`. Example::
from google.appengine.api import memcache
from tipfy.appengine.db import get_protobuf_from_entity
# Inside a handler, given that a MyModel model is defined.
entity = MyModel(key_name='foo')
entity.put()
# Cache the protobuf.
memcache.set('my-cache-key', get_protobuf_from_entity(entity))
This function derives from `Nick's Blog <http://blog.notdot.net/2009/9/Efficient-model-memcaching>`_.
:param entities:
A single or a list of ``db.Model`` instances to be serialized.
:returns:
One or more entities serialized to Protocol Buffer (a string or a
list).
"""
if not entities:
return None
elif isinstance(entities, db.Model):
return db.model_to_protobuf(entities).Encode()
elif isinstance(entities, dict):
return dict((k, db.model_to_protobuf(v).Encode()) for k, v in \
entities.iteritems())
else:
return [db.model_to_protobuf(x).Encode() for x in entities]
def get_entity_from_protobuf(data):
"""Converts one or more encoded Protocol Buffers to ``db.Model`` instances.
This is used to de-serialize entities previously serialized using
:func:`get_protobuf_from_entity`. After retrieving an entity protobuf
from memcache, this converts it back to a ``db.Model`` instance.
Example::
from google.appengine.api import memcache
from tipfy.appengine.db import get_entity_from_protobuf
# Get the protobuf from cache and de-serialize it.
protobuf = memcache.get('my-cache-key')
if protobuf:
entity = get_entity_from_protobuf(protobuf)
This function derives from `Nick's Blog <http://blog.notdot.net/2009/9/Efficient-model-memcaching>`_.
:param data:
One or more entities serialized to Protocol Buffer (a string or a
list).
:returns:
One or more entities de-serialized from Protocol Buffers (a
``db.Model`` inatance or a list of ``db.Model`` instances).
"""
if not data:
return None
elif isinstance(data, str):
return db.model_from_protobuf(data)
elif isinstance(data, dict):
return dict((k, db.model_from_protobuf(v)) for k, v in data.iteritems())
else:
return [db.model_from_protobuf(x) for x in data]
def get_reference_key(entity, prop_name):
"""Returns a encoded key from a ``db.ReferenceProperty`` without fetching
the referenced entity. Example::
from google.appengine.ext import db
from tipfy.appengine.db import get_reference_key
# Set a book entity with an author reference.
class Author(db.Model):
name = db.StringProperty()
class Book(db.Model):
title = db.StringProperty()
author = db.ReferenceProperty(Author)
author = Author(name='Stephen King')
author.put()
book = Book(key_name='the-shining', title='The Shining', author=author)
book.put()
# Now let's fetch the book and get the author key without fetching it.
fetched_book = Book.get_by_key_name('the-shining')
assert str(author.key()) == str(get_reference_key(fetched_book,
'author'))
:param entity:
A ``db.Model`` instance.
:param prop_name:
The name of the ``db.ReferenceProperty`` property.
:returns:
An entity Key, as a string.
"""
return getattr(entity.__class__, prop_name).get_value_for_datastore(entity)
def populate_entity(entity, **kwargs):
"""Sets a batch of property values in an entity. This is useful to set
multiple properties coming from a form or set in a dictionary. Example::
from google.appengine.ext import db
from tipfy.appengine.db import populate_entity
class Author(db.Model):
name = db.StringProperty(required=True)
city = db.StringProperty()
state = db.StringProperty()
country = db.StringProperty()
# Save an author entity.
author = Author(key_name='stephen-king', name='Stephen King')
author.put()
# Now let's update the record.
author = Author.get_by_key_name('stephen-king')
populate_entity(author, city='Lovell', state='Maine', country='USA')
author.put()
:param entity:
A ``db.Model`` instance.
:param kwargs:
Keyword arguments for each entity property value.
:returns:
None.
"""
properties = get_entity_properties(entity)
for key, value in kwargs.iteritems():
if key in properties:
setattr(entity, key, value)
def get_entity_properties(entity):
"""Returns a list with all property names in an entity.
:param entity:
A ``db.Model`` instance.
:returns:
A list with all property names in the entity.
"""
return entity.properties().keys() + entity.dynamic_properties()
def get_entity_dict(entities):
"""Returns a dictionary with all the properties and values in an entity.
:param entities:
One or more ``db.Model`` instances.
:returns:
A dictionary or a list of dictionaries mapping property names to
values.
"""
if isinstance(entities, db.Model):
return _get_entity_dict(entities)
return [_get_entity_dict(e) for e in entities]
def _get_entity_dict(entity):
"""See :func:`get_entity_dict`."""
return dict((k, getattr(entity, k)) for k in get_entity_properties(entity))
def get_or_insert_with_flag(model, key_name, **kwargs):
"""Transactionally retrieve or create an instance of ``db.Model`` class.
This is the same as ``db.Model.get_or_insert()``, but it returns a tuple
``(entity, flag)`` to indicate if the entity was inserted. If the entity
is inserted, the flag is True, otherwise it is False. Example::
from google.appengine.ext import db
from tipfy.appengine.db import get_or_insert_with_flag
class Author(db.Model):
name = db.StringProperty()
author, is_new = get_or_insert_with_flag(Author, 'stephen-king',
name='Stephen King')
:param model:
A ``db.Model`` class to fetch or create an entity.
:param key_name:
The entity's key name.
:param kwargs:
Keyword argumens to create an entity, if it doesn't exist yet.
:returns:
A tuple ``(entity, flag)``, where entity is the fetched or inserted
entity and flag is a boolean True if the entity was inserted or
False if it existed already.
"""
def txn():
entity = model.get_by_key_name(key_name, parent=kwargs.get('parent'))
if entity:
return (entity, False)
entity = model(key_name=key_name, **kwargs)
entity.put()
return (entity, True)
return db.run_in_transaction(txn)
def get_or_404(*args, **kwargs):
"""Returns a model instance fetched by key or raises a 404 Not Found error.
Example:
from tipfy import RequestHandler
from tipfy.appengine.db import get_or_404
from mymodels import Contact
class EditContactHandler(RequestHandler):
def get(self, **kwargs):
contact = get_or_404(kwargs['contact_key'])
# ... continue processing contact ...
This function derives from `Kay <http://code.google.com/p/kay-framework/>`_.
:param args:
Positional arguments to construct a key using ``db.Key.from_path()``
or a ``db.Key`` instance or encoded key.
:param kwargs:
Keyword arguments to construct a key using ``db.Key.from_path()``.
:returns:
A ``db.Model`` instance.
"""
try:
if len(args) == 1:
# A Key or encoded Key is the single argument.
obj = db.get(args[0])
else:
# Build a key using all arguments.
obj = db.get(db.Key.from_path(*args, **kwargs))
if obj:
return obj
except (db.BadArgumentError, db.BadKeyError):
# Falling through to raise the NotFound.
pass
abort(404)
def get_by_id_or_404(model, id, parent=None):
"""Returns a model instance fetched by id or raises a 404 Not Found error.
Example::
from tipfy import RequestHandler
from tipfy.appengine.db import get_by_id_or_404
from mymodels import Contact
class EditContactHandler(RequestHandler):
def get(self, **kwargs):
contact = get_by_id_or_404(Contact, kwargs['contact_id'])
# ... continue processing contact ...
This function derives from `Kay <http://code.google.com/p/kay-framework/>`_.
:param model:
A ``db.Model`` class to load an entity.
:param id:
An id from a ``db.Key`` (an integer).
:param parent:
The parent entity for the requested entities, as a Model
instance or Key instance, or None (the default) if the requested
entities do not have a parent.
:returns:
A ``db.Model`` instance.
"""
obj = model.get_by_id(id, parent=parent)
if obj:
return obj
abort(404)
def get_by_key_name_or_404(model, key_name, parent=None):
"""Returns a model instance fetched by key name or raises a 404 Not Found
error. Example::
from tipfy import RequestHandler
from tipfy.appengine.db import get_by_key_name_or_404
from mymodels import Contact
class EditContactHandler(RequestHandler):
def get(self, **kwargs):
contact = get_by_key_name_or_404(Contact,
kwargs['contact_key_name'])
# ... continue processing contact ...
This function derives from `Kay <http://code.google.com/p/kay-framework/>`_.
:param model:
A ``db.Model`` class to load an entity.
:param key_name:
A key name from a ``db.Key`` (a string).
:param parent:
The parent entity for the requested entities, as a Model
instance or Key instance, or None (the default) if the requested
entities do not have a parent.
:returns:
A ``db.Model`` instance.
"""
obj = model.get_by_key_name(key_name, parent=parent)
if obj:
return obj
abort(404)
def run_in_namespace(namespace, function, *args, **kwargs):
"""Executes a function in a given namespace, then returns back to the
current namescape.
:param namespace:
Name of the namespace to run the function.
:param function:
Function to be executed in the given namespace.
:param args:
Arguments to be passed to the function.
:param kwargs:
Keyword arguments to be passed to the function.
"""
current_namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace(namespace)
return function(*args, **kwargs)
finally:
# Restore previous namespace.
namespace_manager.set_namespace(current_namespace)
# Decorators.
def retry_on_timeout(retries=3, interval=1.0, exponent=2.0):
"""A decorator to retry a function that performs db operations in case a
``db.Timeout`` exception is raised. Example::
from tipfy import RequestHandler
from tipfy.appengine.db import retry_on_timeout
from mymodels import Contact
class EditContactHandler(RequestHandler):
def get(self, **kwargs):
# ... do the get stuff ...
# ...
pass
@retry_on_timeout()
def post(self, **kwargs):
# ... load entity and process form data ...
# ...
# Save the entity. This will be retried in case of timeouts.
entity.put()
This function derives from `Kay <http://code.google.com/p/kay-framework/>`_.
:param retries:
An integer value for the number of retries in case ``db.Timeout`` is
raised.
:param interval:
A float value for the number of seconds between each interval.
:param exponent:
A float exponent to be applied to each retry interval.
For example, if ``interval`` is set to 0.2 and exponent is 2.0,
retries intervals will be in seconds: 0.2, 0.4, 0.8, etc.
:returns:
A decorator wrapping the target function.
"""
def decorator(func):
def decorated(*args, **kwargs):
count = 0
while True:
try:
return func(*args, **kwargs)
except db.Timeout, e:
logging.debug(e)
if count >= retries:
raise e
else:
sleep_time = (exponent ** count) * interval
logging.warning("Retrying function %r in %d secs" %
(func, sleep_time))
time.sleep(sleep_time)
count += 1
return decorated
return decorator
def load_entity(model, kwarg_old, kwarg_new=None, fetch_mode=None):
"""A decorator that takes an entity key, key name or id from the request
handler keyword arguments, load an entity and add it to the arguments.
If not found, a ``NotFound`` exception is raised. Example::
from tipfy import RequestHandler
from tipfy.appengine.db import load_entity
from mymodels import Contact
class EditContactHandler(RequestHandler):
@load_entity(Contact, 'contact_id', 'contact', 'id')
def get(self, **kwargs):
# kwargs['contact_id'] is used to load a Contact entity using
# get_by_id(). The entity is then added to kwargs['contact'].
pass
@load_entity(Contact, 'contact_id', 'contact', 'id')
def post(self, **kwargs):
# kwargs['contact_id'] is used to load a Contact entity using
# get_by_id(). The entity is then added to kwargs['contact'].
pass
:param model:
A ``db.Model`` class to fetch an entity from.
:param kwarg_old:
The keyword argument, passed by the routing system to the
request handler, that contains the key, id or key_name of the entity
to be loaded. For example, ``contact_key``, ``contact_id`` or
``contact_key_name``.
:param kwarg_new:
The new keyword argument to be passed to the request handler.
This keyword is *added* to the arguments. If not set, uses kwarg_old
as base, removing the fetch mode sufix. For example, ``contact``.
:param fetch_mode:
The fetch mode. Can be either ``key``, ``id`` or
``key_name``, to fetch using ``db.Model.get()``,
``db.Model.get_by_id()`` or ``db.Model.get_by_key_name()``,
respectively. If not set, it will check if ``kwargs_old`` ends with
``_key``, ``_id`` or ``_key_name`` to guess the fetch mode.
:returns:
A decorator wrapping the target ``tipfy.RequestHandler`` method.
"""
if fetch_mode is None or kwarg_new is None:
for sufix in ('_key', '_id', '_key_name'):
if kwarg_old.endswith(sufix):
if kwarg_new is None:
kwarg_new = kwarg_old[:-len(sufix)]
if fetch_mode is None:
fetch_mode = sufix[1:]
break
else:
raise NotImplementedError('Invalid fetch_mode.')
def decorator(func):
def decorated(*args, **kwargs):
entity = None
key = kwargs.get(kwarg_old, None)
if key is not None:
if fetch_mode == 'key':
entity = get_or_404(key)
elif fetch_mode == 'id':
entity = get_by_id_or_404(model, key)
elif fetch_mode == 'key_name':
entity = get_by_key_name_or_404(model, key)
kwargs[kwarg_new] = entity
return func(*args, **kwargs)
return decorated
return decorator
def to_key(values):
"""Coerces a value or list of values to `db.Key` instances.
:param value:
A datastore key as string, `db.Model` or `db.Key` instances, or a list
of them. None values of model instances that still don't have a key
available will be appended to the result as None.
:returns:
A `db.Key` or a list of `db.Key` instances.
"""
if values is None:
return None
if not isinstance(values, list):
multiple = False
values = [values]
else:
multiple = True
res = []
for value in values:
if value is None:
res.append(None)
elif isinstance(value, db.Model):
if value.has_key():
res.append(value.key())
else:
res.append(None)
elif isinstance(value, basestring):
res.append(db.Key(value))
elif isinstance(value, db.Key):
res.append(value)
else:
raise datastore_errors.BadArgumentError('Expected model, key or '
'string.')
if multiple:
return res
return res[0]
class ModelMixin(object):
"""A base class for db.Model mixins. This allows to mix db properties
from several base classes in a single model. For example::
from google.appengine.ext import db
from tipfy.appengine.db import ModelMixin
class DateMixin(ModelMixin):
created = db.DateTimeProperty(auto_now_add=True)
updated = db.DateTimeProperty(auto_now=True)
class AuditMixin(ModelMixin):
created_by = db.UserProperty()
updated_by = db.UserProperty()
class Account(db.Model, DateMixin, AuditMixin):
name = db.StringProperty()
class SupportTicket(db.Model, DateMixin, AuditMixin):
title = db.StringProperty()
class Item(db.Model, DateMixin):
name = db.StringProperty()
description = db.StringProperty()
Read more about it in the
`tutorial <http://www.tipfy.org/wiki/cookbook/reusing-models-with-modelmixin/>`_.
"""
__metaclass__ = db.PropertiedClass
@classmethod
def kind(self):
"""Need to implement this because it is called by PropertiedClass
to register the kind name in _kind_map. We just return a dummy name.
"""
return '__model_mixin__'
from tipfy.appengine.db.properties import *
# Old name
get_property_dict = get_entity_dict
|
{
"content_hash": "ccca314f50d09afe00d45cf5af99eea5",
"timestamp": "",
"source": "github",
"line_count": 586,
"max_line_length": 105,
"avg_line_length": 33.2542662116041,
"alnum_prop": 0.6067121670857495,
"repo_name": "pombreda/tipfy",
"id": "1138810c8ed68dad92280e2d572c38da584689e0",
"size": "19511",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "tipfy/appengine/db/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "9758"
},
{
"name": "Gettext Catalog",
"bytes": "1314"
},
{
"name": "HTML",
"bytes": "15377"
},
{
"name": "JavaScript",
"bytes": "2954"
},
{
"name": "Python",
"bytes": "799413"
}
],
"symlink_target": ""
}
|
import argparse
import sys
import os
import time
import traceback
import sys
import ctypes
import subprocess
from subprocess import Popen, PIPE
import shutil
from optparse import OptionParser
from biokbase.workspace.client import Workspace
import urllib
import urllib2
import json
from biokbase import log
from biokbase.userandjobstate.client import UserAndJobState
from biokbase.Transform.util import Uploader
import datetime
desc1 = '''
NAME
trns_upload_Transform.Dummy -- not doing any action... dummy loader
SYNOPSIS
'''
desc2 = '''
DESCRIPTION
trns_upload_Transform.Dummy dummy uploader
'''
desc3 = '''
EXAMPLES
> trns_upload_Transform.Dummy --ws_url 'https://kbase.us/services/ws' --ws_id kbasetest:home --in_id '' --out_id 'my_tst_out'
SEE ALSO
trns_validate_hndlr, trns_import_hndlr
AUTHORS
First Last.
'''
if __name__ == "__main__":
# Parse options.
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, prog='trns_upload_Transform.Dummy', epilog=desc3)
#parser.add_argument('-u', '--ws_url', help='Workspace url', action='store', dest='ws_url', default='https://kbase.us/services/ws', required=True)
#parser.add_argument('-w', '--dst_ws_name', help='Destination workspace name', action='store', dest='ws_id', default=None, required=True)
#parser.add_argument('-o', '--out_id', help='Output workspace object name', action='store', dest='outobj_id', default=None, required=True)
parser.add_argument('-l', '--input_directory', help='Support directory', action='store', dest='sdir', default='lib', required=False)
parser.add_argument('-w', '--working_directory', help='Support directory', action='store', dest='sdir', default='lib', required=False)
parser.add_argument('-g', '--output_filename', help='Output prefix or file name', action='store', dest='otmp', default='outfile', required=False)
# for meta data
#parser.add_argument('-i', '--in_id', help='Input Shock node id for meta', action='store', dest='inobj_id', default='NotProvided', required=True)
#parser.add_argument('-e', '--ext_type', help='External object type', action='store', dest='etype', default=None, required=True)
#parser.add_argument('-j', '--job_id', help='UJS job id', action='store', dest='jid', default='NoJodID', required=False)
usage = parser.format_usage()
parser.description = desc1 + ' ' + usage + desc2
parser.usage = argparse.SUPPRESS
args = parser.parse_args()
#kb_token = os.environ.get('KB_AUTH_TOKEN')
## main loop
#jif = open("{}/{}".format(args.sdir,args.otmp, 'r'))
#data = json.loads(jif.read())
#jif.close()
#wsd = Workspace(url=args.ws_url, token=kb_token)
#wsd.save_objects({'workspace':args.ws_id, 'objects' : [ {
# 'type' : 'Transform.Dummy', 'data' : data, 'name' : args.outobj_id,
# 'meta' : { 'source_id' : args.inobj_id, 'source_type' : args.etype,
# 'ujs_job_id' : args.jid} } ]})
exit(0);
|
{
"content_hash": "32817291b387073da45573c88019862e",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 150,
"avg_line_length": 36.166666666666664,
"alnum_prop": 0.6672152732060567,
"repo_name": "mlhenderson/transform",
"id": "1c2a5813b94bb804470bc2c43b2196672d217e16",
"size": "3061",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "plugins/scripts/validate/trns_validate_Dummy.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "1713"
},
{
"name": "Java",
"bytes": "202592"
},
{
"name": "JavaScript",
"bytes": "4946"
},
{
"name": "Makefile",
"bytes": "8147"
},
{
"name": "Perl",
"bytes": "222229"
},
{
"name": "Perl6",
"bytes": "18136"
},
{
"name": "Python",
"bytes": "381086"
},
{
"name": "Shell",
"bytes": "6782"
}
],
"symlink_target": ""
}
|
import unittest
import numpy as np
import pandas as pd
from nowtrade import criteria
from testing_data import msft_data, msft_close_name
from nowtrade.symbol_list import Symbol
from nowtrade.action import Long, Short, LongExit, ShortExit
from nowtrade.strategy import LONG, SHORT, LONG_EXIT, SHORT_EXIT, NO_ACTION
class TestCriteria(unittest.TestCase):
def setUp(self):
self.data = pd.DataFrame([[0, 5, 10.0, 0, NO_ACTION, 0, 10, -0.10, 0.01],
[5, 4, 12.0, 5, LONG, 1, 20, -0.20, 0.02],
[10, 3, 8.0, 10, NO_ACTION, 1, 30, -0.30, 0.01],
[15, 2, 6.0, 15, LONG_EXIT, 0, 0, -0.20, 0.00],
[20, 1, 9.0, 20, NO_ACTION, 0, -10, -0.10, -0.01],
[25, 0, 10.0, 25, SHORT, -1, -20, 0.0, -0.02],
[30, -1, 11.0, 30, SHORT_EXIT, 0, 0, 0.10, -0.03]],
columns=['ONE', 'TWO', 'THREE', 'ONE_CLONE', 'ACTIONS_ONE', 'STATUS_ONE', 'PL_ONE', 'CHANGE_VALUE_ONE', 'CHANGE_PERCENT_ONE'],
index=pd.date_range('20100601', periods=7))
self.one = Symbol('ONE')
self.two = Symbol('TWO')
self.three = Symbol('THREE')
self.one_clone = Symbol('ONE_CLONE')
class TestBarsSinceAction(TestCriteria):
def test_bars_since_action(self):
crit = criteria.BarsSinceAction(self.one, Long(), 2)
self.assertEquals(str(crit), 'BarsSinceAction(symbol=ONE, action=1, periods=2, condition=NONE)')
crit = criteria.BarsSinceLongExit(self.one, 3)
self.assertTrue(crit.apply(self.data))
crit = criteria.BarsSinceShortExit(self.one, 2)
self.assertFalse(crit.apply(self.data))
crit = criteria.BarsSinceShortExit(self.one, 1)
self.assertFalse(crit.apply(self.data))
crit = criteria.BarsSinceShortExit(self.one, 0)
self.assertTrue(crit.apply(self.data))
crit = criteria.BarsSinceLong(self.one, 4)
self.assertFalse(crit.apply(self.data))
crit = criteria.BarsSinceLong(self.one, 5)
self.assertTrue(crit.apply(self.data))
crit = criteria.BarsSinceLong(self.one, 6)
self.assertFalse(crit.apply(self.data))
crit = criteria.BarsSinceShortExit(self.one, 0, 'under')
self.assertFalse(crit.apply(self.data))
crit = criteria.BarsSinceShortExit(self.one, 1, 'under')
self.assertTrue(crit.apply(self.data))
crit = criteria.BarsSinceShortExit(self.one, 2, 'under')
self.assertTrue(crit.apply(self.data))
crit = criteria.BarsSinceLong(self.one, 5, 'under')
self.assertFalse(crit.apply(self.data))
crit = criteria.BarsSinceLong(self.one, 6, 'under')
self.assertTrue(crit.apply(self.data))
crit = criteria.BarsSinceLong(self.one, 7, 'under')
self.assertTrue(crit.apply(self.data))
crit = criteria.BarsSinceShortExit(self.one, 1, 'over')
self.assertFalse(crit.apply(self.data))
crit = criteria.BarsSinceShortExit(self.one, 0, 'over')
self.assertFalse(crit.apply(self.data))
crit = criteria.BarsSinceShort(self.one, 0, 'over')
self.assertTrue(crit.apply(self.data))
crit = criteria.BarsSinceLong(self.one, 4, 'over')
self.assertTrue(crit.apply(self.data))
crit = criteria.BarsSinceLong(self.one, 5, 'over')
self.assertFalse(crit.apply(self.data))
crit = criteria.BarsSinceLong(self.one, 6, 'over')
self.assertFalse(crit.apply(self.data))
class TestInMarket(TestCriteria):
def test_in_market(self):
crit = criteria.InMarket(self.one)
self.assertEqual(crit.__repr__(), crit.label)
self.assertFalse(crit.apply(self.data))
self.assertTrue(crit.apply(self.data[:-1]))
self.assertTrue(crit.apply(self.data[:2]))
self.assertTrue(crit.apply(self.data[:3]))
class TestIsLong(TestCriteria):
def test_is_long(self):
crit = criteria.IsLong(self.one)
self.assertFalse(crit.apply(self.data))
self.assertFalse(crit.apply(self.data[:-1]))
self.assertTrue(crit.apply(self.data[:3]))
self.assertFalse(crit.apply(pd.DataFrame()))
class TestIsShort(TestCriteria):
def test_is_short(self):
crit = criteria.IsShort(self.one)
self.assertFalse(crit.apply(self.data))
self.assertTrue(crit.apply(self.data[:-1]))
self.assertFalse(crit.apply(self.data[:2]))
self.assertFalse(crit.apply(pd.DataFrame()))
class TestIsYear(TestCriteria):
def test_is_year(self):
crit = criteria.IsYear(2012)
self.assertEqual(crit.__repr__(), 'IsYear_2012')
self.assertFalse(crit.apply(self.data).any())
crit = criteria.IsYear(2010)
self.assertTrue(crit.apply(self.data).all())
class TestIsMonth(TestCriteria):
def test_is_month(self):
crit = criteria.IsMonth(6)
self.assertEqual(crit.__repr__(), 'IsMonth_6')
self.assertTrue(crit.apply(self.data).all())
crit = criteria.IsMonth(1)
self.assertFalse(crit.apply(self.data).any())
class TestIsDay(TestCriteria):
def test_is_day(self):
crit = criteria.IsDay(7)
self.assertEqual(crit.__repr__(), 'IsDay_7')
self.assertTrue(crit.apply(self.data)[-1])
crit = criteria.IsDay(8)
self.assertFalse(crit.apply(self.data)[-1])
class TestIsWeekDay(TestCriteria):
def test_is_week_day(self):
crit = criteria.IsWeekDay(0)
self.assertEqual(crit.__repr__(), 'IsWeekDay_0')
self.assertTrue(crit.apply(self.data)[-1])
crit = criteria.IsWeekDay(4)
self.assertTrue(crit.apply(self.data)[-4])
crit = criteria.IsWeekDay(3)
self.assertFalse(crit.apply(self.data)[-1])
class TestPositions(TestCriteria):
def test_position(self):
crit = criteria.Above('ONE', 5)
value = crit.apply(pd.DataFrame())
self.assertEqual(value, False)
value = crit.apply(self.data.head(2))
self.assertEqual(value, False)
value = crit.apply(self.data.head(3))
self.assertEqual(value, True)
crit = criteria.Above('TWO', 4, 3)
value = crit.apply(self.data.head(3))
self.assertEqual(value, True)
crit = criteria.Below('TWO', 6, 2)
value = crit.apply(pd.DataFrame())
self.assertFalse(value)
value = crit.apply(self.data)
self.assertEqual(value, True)
crit = criteria.Below('ONE', 5)
value = crit.apply(self.data.head(1))
self.assertEqual(value, True)
value = crit.apply(self.data.head(2))
self.assertEqual(value, False)
value = crit.apply(self.data.head(3))
self.assertEqual(value, False)
crit = criteria.Above('ONE', 'TWO')
value = crit.apply(self.data.head(1))
self.assertEqual(value, False)
value = crit.apply(self.data.head(2))
self.assertEqual(value, True)
crit = criteria.Below('ONE', 'TWO')
value = crit.apply(self.data.head(1))
self.assertEqual(value, True)
value = crit.apply(self.data.head(2))
self.assertEqual(value, False)
crit = criteria.Equals('ONE', 10)
value = crit.apply(pd.DataFrame())
self.assertEqual(value, False)
value = crit.apply(self.data.head(2))
self.assertEqual(value, False)
crit = criteria.Equal('ONE', 10)
value = crit.apply(self.data.head(2))
self.assertEqual(value, False)
value = crit.apply(self.data.head(3))
self.assertEqual(value, True)
value = crit.apply(self.data.head(4))
self.assertEqual(value, False)
crit = criteria.Above('ONE', 10, 1)
value = crit.apply(self.data.head(4))
self.assertEqual(value, True)
crit = criteria.Equals('ONE', 12, 3)
self.assertEqual(value, True)
crit = criteria.Equals('ONE', 'ONE_CLONE', 2)
value = crit.apply(self.data)
self.assertEqual(value, True)
class TestInRange(TestCriteria):
def test_in_range(self):
crit = criteria.InRange(str(self.one), 10, 20)
ret = crit.apply(self.data.head(2))
self.assertFalse(ret)
ret = crit.apply(self.data.head(3))
self.assertTrue(ret)
ret = crit.apply(self.data.head(4))
self.assertTrue(ret)
ret = crit.apply(self.data.head(5))
self.assertTrue(ret)
ret = crit.apply(self.data.head(6))
self.assertFalse(ret)
crit = criteria.InRange(str(self.one), str(self.two), str(self.three))
ret = crit.apply(self.data.head(1))
self.assertFalse(ret)
ret = crit.apply(self.data.head(2))
self.assertTrue(ret)
ret = crit.apply(self.data.head(3))
self.assertFalse(ret)
crit = criteria.InRange(str(self.one), -1, 1)
ret = crit.apply(self.data.head(1))
self.assertTrue(ret)
crit = criteria.InRange(str(self.two), 4, str(self.three))
ret = crit.apply(self.data.head(1))
self.assertTrue(ret)
crit = criteria.InRange(str(self.two), str(self.one), 6)
ret = crit.apply(self.data.head(1))
self.assertTrue(ret)
class TestCrossing(TestCriteria):
def test_crossing(self):
crit = criteria.CrossingAbove(str(self.one), str(self.two))
self.assertEquals(crit.__repr__(), 'CrossingAbove(param1=ONE, param2=TWO)')
self.assertTrue(crit.apply(self.data.head(2)))
crit = criteria.CrossingBelow(str(self.one), str(self.two))
self.assertEquals(crit.__repr__(), 'CrossingBelow(param1=ONE, param2=TWO)')
self.assertFalse(crit.apply(self.data.head(2)))
crit = criteria.CrossingBelow(str(self.three), str(self.one))
self.assertFalse(crit.apply(self.data.head(2)))
self.assertTrue(crit.apply(self.data.head(3)))
crit = criteria.CrossingAbove(str(self.one), 7)
self.assertFalse(crit.apply(self.data.head(2)))
crit = criteria.CrossingBelow(str(self.two), 2)
self.assertFalse(crit.apply(self.data.head(4)))
self.assertTrue(crit.apply(self.data.head(5)))
class TestNot(TestCriteria):
def test_not(self):
inRangeCriteria = criteria.InRange(str(self.one), 10, 20)
crit = criteria.Not(inRangeCriteria)
ret = crit.apply(self.data.head(2))
self.assertTrue(ret)
ret = crit.apply(self.data.head(3))
self.assertFalse(ret)
ret = crit.apply(self.data.head(4))
self.assertFalse(ret)
ret = crit.apply(self.data.head(5))
self.assertFalse(ret)
ret = crit.apply(self.data.head(6))
self.assertTrue(ret)
class TestStopLoss(TestCriteria):
def test_stop_loss(self):
crit = criteria.StopLoss(self.one, -0.2)
self.assertFalse(crit.apply(self.data))
self.assertFalse(crit.apply(self.data[:-2]))
self.assertTrue(crit.apply(self.data[:-3]))
crit = criteria.StopLoss(self.one, -0.02, percent=True)
self.assertTrue(crit.apply(self.data))
self.assertTrue(crit.apply(self.data[:-1]))
self.assertFalse(crit.apply(self.data[:-2]))
crit = criteria.StopLoss(self.one, -0.1, short=True)
self.assertTrue(crit.apply(self.data))
self.assertFalse(crit.apply(self.data[:-1]))
crit = criteria.StopLoss(self.one, -0.02, short=True, percent=True)
self.assertFalse(crit.apply(self.data))
self.assertFalse(crit.apply(self.data[:-1]))
self.assertTrue(crit.apply(self.data[:2]))
class TestTakeProfit(TestCriteria):
def test_take_profit(self):
crit = criteria.TakeProfit(self.one, 20)
self.assertEqual(crit.__repr__(), crit.label)
self.assertTrue(crit.apply(self.data[:2]))
self.assertFalse(crit.apply(self.data[:-1]))
crit_short = criteria.TakeProfit(self.one, 20, short=True)
self.assertFalse(crit_short.apply(self.data[:2]))
self.assertTrue(crit_short.apply(self.data[:-1]))
self.data['PL_ONE'] = np.nan
self.assertFalse(crit.apply(self.data))
class TestTrailingStop(TestCriteria):
def test_trailing_stop(self):
crit = criteria.TrailingStop(self.one, 0.009, short=False, percent=True)
self.assertFalse(crit.apply(self.data[:1]))
self.assertFalse(crit.apply(self.data[:2]))
self.assertTrue(crit.apply(self.data[:3]))
crit = criteria.TrailingStop(self.one, 0.019, short=False, percent=True)
self.assertFalse(crit.apply(self.data[:1]))
self.assertFalse(crit.apply(self.data[:2]))
self.assertFalse(crit.apply(self.data[:3]))
self.assertTrue(crit.apply(self.data[:4]))
crit = criteria.TrailingStop(self.one, 0.09, short=False, percent=False)
self.assertTrue(crit.apply(self.data[:1]))
crit = criteria.TrailingStop(self.one, 0.19, short=False, percent=False)
self.assertFalse(crit.apply(self.data[:1]))
self.assertTrue(crit.apply(self.data[:2]))
crit = criteria.TrailingStop(self.one, 0.009, short=True, percent=True)
self.assertTrue(crit.apply(self.data[:1]))
crit = criteria.TrailingStop(self.one, 0.019, short=True, percent=True)
self.assertFalse(crit.apply(self.data[:1]))
self.assertTrue(crit.apply(self.data[:2]))
crit = criteria.TrailingStop(self.one, 0.09, short=True, percent=False)
self.assertFalse(crit.apply(self.data[:1]))
self.assertFalse(crit.apply(self.data[:2]))
self.assertFalse(crit.apply(self.data[:3]))
self.assertTrue(crit.apply(self.data[:4]))
crit = criteria.TrailingStop(self.one, 0.19, short=True, percent=False)
self.assertFalse(crit.apply(self.data[:1]))
self.assertFalse(crit.apply(self.data[:2]))
self.assertFalse(crit.apply(self.data[:3]))
self.assertFalse(crit.apply(self.data[:4]))
self.assertTrue(crit.apply(self.data[:5]))
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "bec4a1f1e64d2204758fc30750910951",
"timestamp": "",
"source": "github",
"line_count": 309,
"max_line_length": 159,
"avg_line_length": 45.52750809061489,
"alnum_prop": 0.621907876030708,
"repo_name": "edouardpoitras/NowTrade",
"id": "d44f4cd9f597c78d6a9248b4d5047525fdf2586b",
"size": "14068",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_criteria.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "223131"
},
{
"name": "Shell",
"bytes": "386"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class MetaValidator(_plotly_utils.basevalidators.AnyValidator):
def __init__(self, plotly_name="meta", parent_name="scattergl", **kwargs):
super(MetaValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "plot"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
{
"content_hash": "bf630b9897701f0d3628892313554baa",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 78,
"avg_line_length": 37.07692307692308,
"alnum_prop": 0.5912863070539419,
"repo_name": "plotly/python-api",
"id": "821ffd572d1102606f6cdc98f3a5b997ae4e5f47",
"size": "482",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/scattergl/_meta.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
}
|
from google.appengine.ext import ndb
class MobileUser(ndb.Model):
'''
Used in workaround for null user id (helper/push_helper.py)
We can restructure this once regular oauth logins happen (https://github.com/the-blue-alliance/the-blue-alliance/issues/1069)
This is not a good long term solution, I don't think
'''
_use_memcache = False
_use_cache = False
user = ndb.UserProperty(required=True)
|
{
"content_hash": "d3b6eb0624c304c87e3f98cbc2738f64",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 129,
"avg_line_length": 30.642857142857142,
"alnum_prop": 0.7062937062937062,
"repo_name": "fangeugene/the-blue-alliance",
"id": "efc080fa94278008a0d4ff2d58d822085c1d2ac1",
"size": "429",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "models/mobile_user.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "299313"
},
{
"name": "Dockerfile",
"bytes": "1815"
},
{
"name": "HTML",
"bytes": "5829913"
},
{
"name": "JavaScript",
"bytes": "516241"
},
{
"name": "Less",
"bytes": "42810"
},
{
"name": "PHP",
"bytes": "10727"
},
{
"name": "Pug",
"bytes": "1857"
},
{
"name": "Python",
"bytes": "2857775"
},
{
"name": "Ruby",
"bytes": "4677"
},
{
"name": "Shell",
"bytes": "17229"
}
],
"symlink_target": ""
}
|
from django import forms
from hacks.models import Hackathon, CodeMania
class HackathonForm(forms.ModelForm):
class Meta:
model = Hackathon
fields = '__all__'
class CodeManiaForm(forms.ModelForm):
class Meta:
model = CodeMania
fields = '__all__'
|
{
"content_hash": "70253bfae6c59a267f0bc9418ccdf430",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 45,
"avg_line_length": 21.416666666666668,
"alnum_prop": 0.7315175097276264,
"repo_name": "hackjss/hackathon-2015",
"id": "45d169e78cac207ae7aacab55c1a271ee896418b",
"size": "257",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "hacks/forms.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "65346"
},
{
"name": "HTML",
"bytes": "107884"
},
{
"name": "JavaScript",
"bytes": "82462"
},
{
"name": "Python",
"bytes": "14703"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('projects', '0005_remove_information_photos'),
]
operations = [
migrations.AddField(
model_name='information',
name='photos',
field=django.contrib.postgres.fields.ArrayField(base_field=models.FileField(upload_to=b''), null=True, size=None),
),
]
|
{
"content_hash": "b2206b19e21e1b6d9f31d4023553e1b4",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 126,
"avg_line_length": 26.473684210526315,
"alnum_prop": 0.6500994035785288,
"repo_name": "ayushjain19/Technopedia-BPHC-website",
"id": "4fa847a7e80159185ea78be7b22ed510671d13a0",
"size": "574",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "projects/migrations/0006_information_photos.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "367"
},
{
"name": "HTML",
"bytes": "10667"
},
{
"name": "Python",
"bytes": "18290"
}
],
"symlink_target": ""
}
|
import os,sys
def main():
dir = os.getcwd()
#print dir
for root, dirs, files in os.walk(dir):
for fname in files:
#fullName = os.path.join(root,fname)
if fname.endswith('.md') or fname.endswith('.py') or fname.endswith('.csv'):
continue
if fname.endswith('.timbuk'):
fname = os.path.join(root, fname)
os.rename(fname, fname.replace('.timbuk','.fast'))
main()
|
{
"content_hash": "daf86b3f1a0a420555f878761fee9cc0",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 88,
"avg_line_length": 26.666666666666668,
"alnum_prop": 0.5208333333333334,
"repo_name": "lorisdanto/automatark",
"id": "abd173c566108e57f21f4c36fa2e508a987e6e9c",
"size": "480",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "symbolic-tree-automata/fastProgramsComp/uniformFormat.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "149632"
},
{
"name": "Java",
"bytes": "306917"
},
{
"name": "OCaml",
"bytes": "294926"
},
{
"name": "Python",
"bytes": "10553"
}
],
"symlink_target": ""
}
|
import os
import pytest
from azure.keyvault.secrets import ApiVersion
from azure.keyvault.secrets._shared.client_base import DEFAULT_VERSION
from devtools_testutils import AzureRecordedTestCase, is_live
def get_decorator(**kwargs):
"""returns a test decorator for test parameterization"""
return [(api_version) for api_version in ApiVersion]
class SecretsClientPreparer(AzureRecordedTestCase):
def __init__(self, **kwargs) -> None:
self.azure_keyvault_url = "https://vaultname.vault.azure.net"
if is_live():
self.azure_keyvault_url = os.environ["AZURE_KEYVAULT_URL"]
self.is_logging_enabled = kwargs.pop("logging_enable", True)
if is_live():
os.environ["AZURE_TENANT_ID"] = os.environ["KEYVAULT_TENANT_ID"]
os.environ["AZURE_CLIENT_ID"] = os.environ["KEYVAULT_CLIENT_ID"]
os.environ["AZURE_CLIENT_SECRET"] = os.environ["KEYVAULT_CLIENT_SECRET"]
def __call__(self, fn):
def _preparer(test_class, api_version, **kwargs):
self._skip_if_not_configured(api_version)
if not self.is_logging_enabled:
kwargs.update({"logging_enable": False})
client = self.create_client(self.azure_keyvault_url, **kwargs, api_version=api_version)
with client:
fn(test_class, client)
return _preparer
def create_client(self, vault_uri, **kwargs):
from azure.keyvault.secrets import SecretClient
credential = self.get_credential(SecretClient)
return self.create_client_from_credential(SecretClient, credential=credential, vault_url=vault_uri, **kwargs)
def _skip_if_not_configured(self, api_version, **kwargs):
if is_live() and api_version != DEFAULT_VERSION:
pytest.skip("This test only uses the default API version for live tests")
|
{
"content_hash": "7a07488624b1045d805ff9a26b83dc4c",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 117,
"avg_line_length": 40.5,
"alnum_prop": 0.6634460547504025,
"repo_name": "Azure/azure-sdk-for-python",
"id": "66220cefc8ce72e433d556843cfca7488b79c54b",
"size": "2014",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/keyvault/azure-keyvault-secrets/tests/_test_case.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
from zope.interface import implements
from twisted.cred import portal, checkers, credentials, error
from twisted.web import resource
class HttpRpcRealm(object):
implements(portal.IRealm)
def __init__(self, resource):
self._resource = resource
def requestAvatar(self, user, mind, *interfaces):
if resource.IResource in interfaces:
return (resource.IResource, self._resource, lambda: None)
raise NotImplementedError()
class StaticAuthChecker(object):
"""Checks that a username and password matches given static values.
"""
implements(checkers.ICredentialsChecker)
credentialInterfaces = (credentials.IUsernamePassword,)
def __init__(self, username, password):
self._username = username
self._password = password
def requestAvatarId(self, credentials):
authorized = all((credentials.username == self._username,
credentials.password == self._password))
if not authorized:
raise error.UnauthorizedLogin()
return self._username
|
{
"content_hash": "48a3e40f087c88c6de28e15bf750ff69",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 71,
"avg_line_length": 30.914285714285715,
"alnum_prop": 0.6802218114602587,
"repo_name": "TouK/vumi",
"id": "59f382c5031865188fea769bc0f5d85bda671c59",
"size": "1173",
"binary": false,
"copies": "4",
"ref": "refs/heads/touk-develop",
"path": "vumi/transports/httprpc/auth.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Erlang",
"bytes": "29735"
},
{
"name": "JavaScript",
"bytes": "5556"
},
{
"name": "Puppet",
"bytes": "2557"
},
{
"name": "Python",
"bytes": "2989989"
},
{
"name": "Shell",
"bytes": "3435"
}
],
"symlink_target": ""
}
|
'''Trains a stacked what-where autoencoder built on residual blocks on the
MNIST dataset. It exemplifies two influential methods that have been developed
in the past few years.
The first is the idea of properly 'unpooling.' During any max pool, the
exact location (the 'where') of the maximal value in a pooled receptive field
is lost, however it can be very useful in the overall reconstruction of an
input image. Therefore, if the 'where' is handed from the encoder
to the corresponding decoder layer, features being decoded can be 'placed' in
the right location, allowing for reconstructions of much higher fidelity.
References:
[1]
'Visualizing and Understanding Convolutional Networks'
Matthew D Zeiler, Rob Fergus
https://arxiv.org/abs/1311.2901v3
[2]
'Stacked What-Where Auto-encoders'
Junbo Zhao, Michael Mathieu, Ross Goroshin, Yann LeCun
https://arxiv.org/abs/1506.02351v8
The second idea exploited here is that of residual learning. Residual blocks
ease the training process by allowing skip connections that give the network
the ability to be as linear (or non-linear) as the data sees fit. This allows
for much deep networks to be easily trained. The residual element seems to
be advantageous in the context of this example as it allows a nice symmetry
between the encoder and decoder. Normally, in the decoder, the final
projection to the space where the image is reconstructed is linear, however
this does not have to be the case for a residual block as the degree to which
its output is linear or non-linear is determined by the data it is fed.
However, in order to cap the reconstruction in this example, a hard softmax is
applied as a bias because we know the MNIST digits are mapped to [0,1].
References:
[3]
'Deep Residual Learning for Image Recognition'
Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
https://arxiv.org/abs/1512.03385v1
[4]
'Identity Mappings in Deep Residual Networks'
Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
https://arxiv.org/abs/1603.05027v3
'''
from __future__ import print_function
import numpy as np
from keras.datasets import mnist
from keras.models import Model
from keras.layers import Activation
from keras.layers import UpSampling2D, Conv2D, MaxPooling2D
from keras.layers import Input, BatchNormalization, ELU
import matplotlib.pyplot as plt
import keras.backend as K
from keras import layers
def convresblock(x, nfeats=8, ksize=3, nskipped=2, elu=True):
"""The proposed residual block from [4].
Running with elu=True will use ELU nonlinearity and running with
elu=False will use BatchNorm + RELU nonlinearity. While ELU's are fast
due to the fact they do not suffer from BatchNorm overhead, they may
overfit because they do not offer the stochastic element of the batch
formation process of BatchNorm, which acts as a good regularizer.
# Arguments
x: 4D tensor, the tensor to feed through the block
nfeats: Integer, number of feature maps for conv layers.
ksize: Integer, width and height of conv kernels in first convolution.
nskipped: Integer, number of conv layers for the residual function.
elu: Boolean, whether to use ELU or BN+RELU.
# Input shape
4D tensor with shape:
`(batch, channels, rows, cols)`
# Output shape
4D tensor with shape:
`(batch, filters, rows, cols)`
"""
y0 = Conv2D(nfeats, ksize, padding='same')(x)
y = y0
for i in range(nskipped):
if elu:
y = ELU()(y)
else:
y = BatchNormalization(axis=1)(y)
y = Activation('relu')(y)
y = Conv2D(nfeats, 1, padding='same')(y)
return layers.add([y0, y])
def getwhere(x):
''' Calculate the 'where' mask that contains switches indicating which
index contained the max value when MaxPool2D was applied. Using the
gradient of the sum is a nice trick to keep everything high level.'''
y_prepool, y_postpool = x
return K.gradients(K.sum(y_postpool), y_prepool)
if K.backend() == 'tensorflow':
raise RuntimeError('This example can only run with the '
'Theano backend for the time being, '
'because it requires taking the gradient '
'of a gradient, which isn\'t '
'supported for all TF ops.')
# This example assume 'channels_first' data format.
K.set_image_data_format('channels_first')
# input image dimensions
img_rows, img_cols = 28, 28
# the data, shuffled and split between train and test sets
(x_train, _), (x_test, _) = mnist.load_data()
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# The size of the kernel used for the MaxPooling2D
pool_size = 2
# The total number of feature maps at each layer
nfeats = [8, 16, 32, 64, 128]
# The sizes of the pooling kernel at each layer
pool_sizes = np.array([1, 1, 1, 1, 1]) * pool_size
# The convolution kernel size
ksize = 3
# Number of epochs to train for
epochs = 5
# Batch size during training
batch_size = 128
if pool_size == 2:
# if using a 5 layer net of pool_size = 2
x_train = np.pad(x_train, [[0, 0], [0, 0], [2, 2], [2, 2]],
mode='constant')
x_test = np.pad(x_test, [[0, 0], [0, 0], [2, 2], [2, 2]], mode='constant')
nlayers = 5
elif pool_size == 3:
# if using a 3 layer net of pool_size = 3
x_train = x_train[:, :, :-1, :-1]
x_test = x_test[:, :, :-1, :-1]
nlayers = 3
else:
import sys
sys.exit('Script supports pool_size of 2 and 3.')
# Shape of input to train on (note that model is fully convolutional however)
input_shape = x_train.shape[1:]
# The final list of the size of axis=1 for all layers, including input
nfeats_all = [input_shape[0]] + nfeats
# First build the encoder, all the while keeping track of the 'where' masks
img_input = Input(shape=input_shape)
# We push the 'where' masks to the following list
wheres = [None] * nlayers
y = img_input
for i in range(nlayers):
y_prepool = convresblock(y, nfeats=nfeats_all[i + 1], ksize=ksize)
y = MaxPooling2D(pool_size=(pool_sizes[i], pool_sizes[i]))(y_prepool)
wheres[i] = layers.Lambda(
getwhere, output_shape=lambda x: x[0])([y_prepool, y])
# Now build the decoder, and use the stored 'where' masks to place the features
for i in range(nlayers):
ind = nlayers - 1 - i
y = UpSampling2D(size=(pool_sizes[ind], pool_sizes[ind]))(y)
y = layers.multiply([y, wheres[ind]])
y = convresblock(y, nfeats=nfeats_all[ind], ksize=ksize)
# Use hard_simgoid to clip range of reconstruction
y = Activation('hard_sigmoid')(y)
# Define the model and it's mean square error loss, and compile it with Adam
model = Model(img_input, y)
model.compile('adam', 'mse')
# Fit the model
model.fit(x_train, x_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, x_test))
# Plot
x_recon = model.predict(x_test[:25])
x_plot = np.concatenate((x_test[:25], x_recon), axis=1)
x_plot = x_plot.reshape((5, 10, input_shape[-2], input_shape[-1]))
x_plot = np.vstack([np.hstack(x) for x in x_plot])
plt.figure()
plt.axis('off')
plt.title('Test Samples: Originals/Reconstructions')
plt.imshow(x_plot, interpolation='none', cmap='gray')
plt.savefig('reconstructions.png')
|
{
"content_hash": "50d4f595e2f83def8ef7ebef917ab9d0",
"timestamp": "",
"source": "github",
"line_count": 203,
"max_line_length": 79,
"avg_line_length": 38.23152709359606,
"alnum_prop": 0.6755572735472233,
"repo_name": "baojianzhou/DLReadingGroup",
"id": "80b34e21658f72ce7c44a00a65733335066add7a",
"size": "7761",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "keras/examples/mnist_swwae.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "697"
},
{
"name": "Python",
"bytes": "1681423"
}
],
"symlink_target": ""
}
|
from buildbot.schedulers.basic import SingleBranchScheduler
from master import master_config
from master.factory import annotator_factory
m_annotator = annotator_factory.AnnotatorFactory()
defaults = {}
helper = master_config.Helper(defaults)
B = helper.Builder
F = helper.Factory
S = helper.Scheduler
T = helper.Triggerable
defaults['category'] = '5android'
android_dbg_archive = master_config.GetGSUtilUrl(
'chromium-android', 'android_main_dbg')
android_rel_archive = master_config.GetGSUtilUrl(
'chromium-android', 'android_main_rel')
#
# Main release scheduler for src/
#
S('android', branch='master', treeStableTimer=60)
#
# Triggerable scheduler for the builder
#
T('android_trigger_dbg')
T('android_trigger_rel')
#
# Android Builder
#
B('Android Arm64 Builder (dbg)', 'f_android_arm64_dbg', 'android', 'android',
auto_reboot=False, notify_on_missing=True)
F('f_android_arm64_dbg',
m_annotator.BaseFactory('chromium'))
B('Android Builder (dbg)', 'f_android_dbg', 'android', 'android',
auto_reboot=False, notify_on_missing=True)
F('f_android_dbg',
m_annotator.BaseFactory('chromium'))
B('Android Tests (dbg)', 'f_android_dbg_tests', 'android',
'android_trigger_dbg', notify_on_missing=True)
F('f_android_dbg_tests',
m_annotator.BaseFactory('chromium'))
B('Android Builder', 'f_android_rel', 'android', 'android',
notify_on_missing=True)
F('f_android_rel',
m_annotator.BaseFactory('chromium'))
B('Android Tests', 'f_android_rel_tests', 'android', 'android_trigger_rel',
notify_on_missing=True)
F('f_android_rel_tests',
m_annotator.BaseFactory('chromium'))
B('Android Clang Builder (dbg)', 'f_android_clang_dbg', 'android', 'android',
notify_on_missing=True)
F('f_android_clang_dbg',
m_annotator.BaseFactory('chromium'))
def Update(_config_arg, _active_master, c):
helper.Update(c)
specs = [
{'name': 'Android GN'},
{'name': 'Android GN (dbg)'},
{'name': 'Cast Android (dbg)'},
]
c['schedulers'].extend([
SingleBranchScheduler(name='android_gn',
branch='master',
treeStableTimer=60,
builderNames=[s['name'] for s in specs]),
])
c['builders'].extend([
{
'name': spec['name'],
'factory': m_annotator.BaseFactory(
spec.get('recipe', 'chromium'),
factory_properties=spec.get('factory_properties')),
'notify_on_missing': True,
'category': '5android',
} for spec in specs
])
|
{
"content_hash": "81dae1f2565caad7058c3b109726ae3a",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 77,
"avg_line_length": 27.293478260869566,
"alnum_prop": 0.6583034647550776,
"repo_name": "eunchong/build",
"id": "fb44c4222aa4d645298b25838936900d0854ecb0",
"size": "2678",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "masters/master.chromium.linux/master_android_cfg.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3128"
},
{
"name": "CSS",
"bytes": "211818"
},
{
"name": "HTML",
"bytes": "429981"
},
{
"name": "JavaScript",
"bytes": "75624"
},
{
"name": "Makefile",
"bytes": "21204"
},
{
"name": "Python",
"bytes": "6143109"
},
{
"name": "Shell",
"bytes": "23512"
}
],
"symlink_target": ""
}
|
from __future__ import (nested_scopes, generators, division, absolute_import, with_statement,
print_function, unicode_literals)
from abc import abstractmethod
from twitter.common.lang import AbstractClass
from pants.base.exceptions import TaskError
class Engine(AbstractClass):
"""An engine for running a pants command line."""
@staticmethod
def execution_order(goals):
"""Yields all goals needed to attempt the given goals in proper goal execution order."""
# Its key that we process goal dependencies depth first to maintain initial goal ordering as
# passed in when goal graphs are dependency disjoint. A breadth first sort could mix next
# order executions and violate the implied intent of the passed in goal ordering.
processed = set()
def order(_goals):
for goal in _goals:
if goal not in processed:
processed.add(goal)
for dep in order(goal.dependencies):
yield dep
yield goal
for ordered in order(goals):
yield ordered
def execute(self, context, goals):
"""Executes the supplied goals and their dependencies against the given context.
:param context: The pants run context.
:param list goals: A list of ``Goal`` objects representing the command line goals explicitly
requested.
:returns int: An exit code of 0 upon success and non-zero otherwise.
"""
try:
self.attempt(context, goals)
return 0
except TaskError as e:
message = '%s' % e
if message:
print('\nFAILURE: %s\n' % e)
else:
print('\nFAILURE\n')
return e.exit_code if isinstance(e, TaskError) else 1
@abstractmethod
def attempt(self, context, goals):
"""Given the target context and command line goals, attempt to achieve all goals.
:param context: The pants run context.
:param list goals: A list of ``Goal`` objects representing the command line goals explicitly
requested.
"""
|
{
"content_hash": "e8190858292bdd7ec109733fe243c40f",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 96,
"avg_line_length": 33.278688524590166,
"alnum_prop": 0.6669950738916256,
"repo_name": "Ervii/garage-time",
"id": "f33badbd6f8cf7ea19f0b27600edf66353d833a6",
"size": "2177",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "garage/src/python/pants/engine/engine.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "9347"
},
{
"name": "GAP",
"bytes": "4684"
},
{
"name": "HTML",
"bytes": "64603"
},
{
"name": "Java",
"bytes": "43275"
},
{
"name": "JavaScript",
"bytes": "9523"
},
{
"name": "Protocol Buffer",
"bytes": "4664"
},
{
"name": "Python",
"bytes": "2200035"
},
{
"name": "Scala",
"bytes": "6693"
},
{
"name": "Shell",
"bytes": "29352"
},
{
"name": "Thrift",
"bytes": "1946"
}
],
"symlink_target": ""
}
|
__author__ = 'sdk'
from time import strftime, gmtime, mktime
import datetime
from xml.dom import minidom
import numpy as np
import cx_Oracle
from password import databaseSCO as database
import pandas as pd
pd.options.mode.chained_assignment = None
tables = {"ASDM": "XML_ASDM_ENTITIES", "Main": "XML_MAINTABLE_ENTITIES",
"AlmaRadiometer": "XML_ALMARADIOMETERTAB_ENTITIES", "Antenna": "XML_ANTENNATABLE_ENTITIES",
"CalAmpli": "XML_CALAMPLITABLE_ENTITIES", "CalAtmosphere": "XML_CALATMOSPHERETABL_ENTITIES",
"CalCurve": "XML_CALCURVETABLE_ENTITIES", "CalSeeing": "XML_CALSEEINGTABLE_ENTITIES",
"CalWVR": "XML_CALWVRTABLE_ENTITIES", "CalData": "XML_CALDATATABLE_ENTITIES",
"CalDelay": "XML_CALDELAYTABLE_ENTITIES", "CalDevice": "XML_CALDEVICETABLE_ENTITIES",
"CalFlux": "XML_CALFLUXTABLE_ENTITIES", "CalPhase": "XML_CALPHASETABLE_ENTITIES",
"CalReduction": "XML_CALREDUCTIONTABLE_ENTITIES", "ConfigDescription": "XML_CONFIGDESCRIPTION_ENTITIES",
"CorrelatorMode": "XML_CORRELATORMODETAB_ENTITIES", "DataDescription": "XML_DATADESCRIPTIONTA_ENTITIES",
"ExecBlock": "XML_EXECBLOCKTABLE_ENTITIES", "Feed": "XML_FEEDTABLE_ENTITIES",
"Annotation": "XML_ANNOTATIONTABLE_ENTITIES", "Ephemeris": "XML_EPHEMERISTABLE_ENTITIES",
"Anotation": "XML_ANNOTATIONTABLE_ENTITIES", "CalBandpass": "XML_CALBANDPASSTABLE_ENTITIES",
"CalPointing": "XML_CALPOINTINGTABLE_ENTITIES", "Field": "XML_FIELDTABLE_ENTITIES",
"Flag": "XML_FLAGTABLE_ENTITIES", "Focus": "XML_FOCUSTABLE_ENTITIES",
"FocusModel": "XML_FOCUSMODELTABLE_ENTITIES", "Pointing": "XML_POINTINGTABLE_ENTITIES",
"PointingModel": "XML_POINTINGMODELTABL_ENTITIES", "Polarization": "XML_POLARIZATIONTABLE_ENTITIES",
"Processor": "XML_PROCESSORTABLE_ENTITIES", "Receiver": "XML_RECEIVERTABLE_ENTITIES",
"SBSummary": "XML_SBSUMMARYTABLE_ENTITIES", "Scan": "XML_SCANTABLE_ENTITIES",
"Source": "XML_SOURCETABLE_ENTITIES", "SpectralWindow": "XML_SPECTRALWINDOWTAB_ENTITIES",
"State": "XML_STATETABLE_ENTITIES", "Station": "XML_STATIONTABLE_ENTITIES", "Subscan": "XML_SUBSCANTABLE_ENTITIES",
"SquareLawDetector": "XML_SQUARELAWDETECTOR_ENTITIES", "SwitchCycle": "XML_SWITCHCYCLETABLE_ENTITIES",
"SysCal": "XML_SYSCALTABLE_ENTITIES", "Weather": "XML_WEATHERTABLE_ENTITIES",
"SchedBlock":"XML_SCHEDBLOCK_ENTITIES", "ObsProject":"XML_OBSPROJECT_ENTITIES"}
def sdmTimeString(number=None):
"""
Convert a time value (as used by ASDM, i.e. MJD in nanoseconds) into a FITS type string.
:param number:
"""
st = number/1000000000L
# decimal microseconds ...
number = (number-st*1000000000L)/1000
# number of seconds since 1970-01-01T00:00:00
st = st-3506716800L
return strftime("%Y-%m-%dT%H:%M:%S", gmtime(st))+(".%6.6d" % number)
def gtm(t=None):
"""
Convert a time value (as used by ASDM, i.e. MJD in nanoseconds) into a FITS type string.
:param t:
"""
st = t-3506716800000000000L
return st/1000000000L
def gtm2(number=None):
"""
Convert a time value (as used by ASDM, i.e. MJD in nanoseconds) into a FITS type string.
:param number:
"""
st = number/1000000000L
# decimal microseconds ...
number = (number-st*1000000000L)/1000
# number of seconds since 1970-01-01T00:00:00
st = st-3506716800L
return datetime.datetime.fromtimestamp(mktime(gmtime(st))).replace(microsecond=(number))
def returnMAXPWVC(pwv=None):
if pwv <= 0.472:
return 0.472
elif pwv <= 0.658:
return 0.658
elif pwv <= 0.913:
return 0.913
elif pwv <= 1.262:
return 1.262
elif pwv <= 1.796:
return 1.796
elif pwv <= 2.748:
return 2.748
else:
return 5.186
def findChannel(start=None, width=None, repFreq=None, nchan=None):
channel = 0
if width < 0:
for i in xrange(nchan):
if start > repFreq:
start = start + width
else:
channel = -1.*i
break
else:
for i in xrange(nchan):
if start < repFreq:
start = start + width
else:
channel = i
break
return channel
def RadianTo(num=None, unit=None):
"""
:param num:
:param unit:
:return:
"""
Deg = float(num)*180.0/np.pi
if unit == 'dms':
if Deg < 0:
Deg = -Deg
sign = '-'
else:
sign = '+'
g = int(Deg)
m = int((Deg-g)*60.)
s = (Deg-g-m/60.)*3600.
return sign+str(g)+":"+str(m)+":"+str('%5.5f' % s)
if unit == 'hms':
h = int(Deg/15.)
m = int((Deg/15.-h)*60.)
s = (Deg/15.-h-m/60.)*3600.
return str(h)+":"+str(m)+":"+str('%5.5f' % s)
def arrayParser(line=None, dimensions=None, castFloat=False):
"""
:param line: String to be formated
:param dimensions: dimensions of the array
:return: a list, or a list of list 1D o 2D arrays, no support for 3D arrays yet
"""
result = list()
line = line.strip()
if dimensions == 1:
elements = line.split(' ')[1]
splits = line.split(' ')[2:]
for i in splits:
result.append(float(i)) if castFloat else result.append(i)
if int(elements) == len(result):
return result
else:
return False
if dimensions == 2:
rows = int(line.split(' ')[1])
columns = int(line.split(' ')[2])
splits = line.split(' ')[3:]
for j in range(0, rows):
temp = list()
for i in range(0, columns):
temp.append(float(splits[i+(j*columns)])) if castFloat else temp.append(splits[i+(j*columns)])
result.append(temp)
return result
def GetXML(archiveUID=None,table=None):
"""
:param archiveUID: Archive UID
:param table: Table
:return: XML String
"""
sqlXML = "select XMLType.GetClobVal(xml) from ALMA.XXXXYYY where archive_uid='ZZZZCCCC' "
sqlXML = sqlXML.replace('XXXXYYY',tables[table]).replace('ZZZZCCCC',archiveUID)
try:
orcl = cx_Oracle.connect(database)
cursorXML = orcl.cursor()
cursorXML.execute(sqlXML)
XMLTable = cursorXML.fetchone()
return XMLTable[0].read()
except Exception as e:
print e
return False
return False
def getProjectUID(projectCode=None):
"""
:param projectCode:
:return:
"""
sql = "select prj_archive_uid from ALMA.BMMV_OBSPROJECT where prj_code = 'XXXYYY'"
sql = sql.replace('XXXYYY',projectCode)
try:
orcl = cx_Oracle.connect(database)
cursor = orcl.cursor()
cursor.execute(sql)
data = cursor.fetchall()
orcl.close()
return data[0][0]
except Exception as e:
print e
def getSBMOUS():
sql = "select DOMAIN_ENTITY_ID, PARENT_OBS_UNIT_SET_STATUS_ID from ALMA.SCHED_BLOCK_STATUS"
try:
orcl = cx_Oracle.connect(database)
cursor = orcl.cursor()
cursor.execute(sql)
data = cursor.fetchall()
status = list()
for i in data:
status.append((i[0],i[1]))
orcl.close()
return status
except Exception as e:
print e
def getSBNames():
sql = "select archive_uid, sb_name from ALMA.BMMV_SCHEDBLOCK"
try:
orcl = cx_Oracle.connect(database)
cursor = orcl.cursor()
cursor.execute(sql)
data = cursor.fetchall()
sbnames = list()
for i in data:
sbnames.append((i[0],i[1]))
orcl.close()
return sbnames
except Exception as e:
print e
def getProjectCodes(cycle=2):
cycle_code = dict()
cycle_code[0] = '2011._.%._'
cycle_code[1] = '2012._.%._'
cycle_code[2] = '2013._.%._'
cycle_code[3] = '2015._.%._'
sql = '''select al2.PRJ_ARCHIVE_UID, al2.code
from ALMA.OBS_PROJECT_STATUS al1,
ALMA.BMMV_OBSPROJECT al2
where al1.obs_project_id in (select prj_archive_uid from ALMA.BMMV_OBSPROJECT where prj_code like 'XXXYYYZZZ')
and al1.domain_entity_state in ('Ready', 'Canceled', 'InProgress', 'Broken','Completed', 'Repaired','Phase2Submitted')
and al1.OBS_PROJECT_ID = al2.PRJ_ARCHIVE_UID '''
sql = sql.replace('XXXYYYZZZ',cycle_code[int(cycle)])
try:
orcl = cx_Oracle.connect(database)
cursor = orcl.cursor()
cursor.execute(sql)
data = cursor.fetchall()
codes = list()
for i in data:
codes.append((i[0],i[1]))
orcl.close()
return codes
except Exception as e:
print e
def getSBs(prj_uid=None):
sql = '''with t1 as (
select status_entity_id as seid1 from ALMA.OBS_UNIT_SET_STATUS where OBS_PROJECT_ID = 'PPPRRRJJJ' and PARENT_OBS_UNIT_SET_STATUS_ID is null
),
t2 as (
select status_entity_id as seid2, PARENT_OBS_UNIT_SET_STATUS_ID as paid2, domain_entity_id from ALMA.OBS_UNIT_SET_STATUS where OBS_PROJECT_ID = 'PPPRRRJJJ'
),
t3 as (
select status_entity_id as seid3, PARENT_OBS_UNIT_SET_STATUS_ID as paid3 from ALMA.OBS_UNIT_SET_STATUS where OBS_PROJECT_ID = 'PPPRRRJJJ'
),
t4 as (
select status_entity_id as seid4, PARENT_OBS_UNIT_SET_STATUS_ID as paid4 from ALMA.OBS_UNIT_SET_STATUS where OBS_PROJECT_ID = 'PPPRRRJJJ'
),
t5 as (
select domain_entity_id as scheckblock_uid, PARENT_OBS_UNIT_SET_STATUS_ID as paid5 from ALMA.SCHED_BLOCK_STATUS where OBS_PROJECT_ID = 'PPPRRRJJJ'
)
SELECT t2.domain_entity_id, t5.scheckblock_uid
FROM t1,
t2,
t3,
t4,
t5
WHERE t1.seid1 = t2.paid2
AND t2.seid2 = t3.paid3
AND t3.seid3 = t4.paid4
AND t4.seid4 = t5.paid5
ORDER BY 1 ASC'''
sql = sql.replace('PPPRRRJJJ', prj_uid)
try:
orcl = cx_Oracle.connect(database)
cursor = orcl.cursor()
cursor.execute(sql)
sb = cursor.fetchall()
orcl.close()
return sb
except Exception as e:
orcl.close()
print e
def spectrals_sb(prj_uid=None, partid=None):
sql = '''with t1 as (
select status_entity_id as seid1 from ALMA.OBS_UNIT_SET_STATUS where OBS_PROJECT_ID = 'PPPRRRJJJ' and PARENT_OBS_UNIT_SET_STATUS_ID is null
),
t2 as (
select status_entity_id as seid2, PARENT_OBS_UNIT_SET_STATUS_ID as paid2, domain_entity_id from ALMA.OBS_UNIT_SET_STATUS where OBS_PROJECT_ID = 'PPPRRRJJJ'
),
t3 as (
select status_entity_id as seid3, PARENT_OBS_UNIT_SET_STATUS_ID as paid3 from ALMA.OBS_UNIT_SET_STATUS where OBS_PROJECT_ID = 'PPPRRRJJJ'
),
t4 as (
select status_entity_id as seid4, PARENT_OBS_UNIT_SET_STATUS_ID as paid4 from ALMA.OBS_UNIT_SET_STATUS where OBS_PROJECT_ID = 'PPPRRRJJJ'
),
t5 as (
select domain_entity_id as scheckblock_uid, PARENT_OBS_UNIT_SET_STATUS_ID as paid5 from ALMA.SCHED_BLOCK_STATUS where OBS_PROJECT_ID = 'PPPRRRJJJ'
)
SELECT t2.domain_entity_id, t5.scheckblock_uid
FROM t1,
t2,
t3,
t4,
t5
WHERE t1.seid1 = t2.paid2
AND t2.seid2 = t3.paid3
AND t3.seid3 = t4.paid4
AND t4.seid4 = t5.paid5
AND t2.domain_entity_id = 'ZZZXXXYYY'
ORDER BY 1 ASC'''
sql = sql.replace('PPPRRRJJJ', prj_uid).replace('ZZZXXXYYY', partid)
try:
orcl = cx_Oracle.connect(database)
cursor = orcl.cursor()
cursor.execute(sql)
sb = cursor.fetchall()
specscan = list()
for i in sb:
specscan.append((prj_uid,i[1],'SpectralScan'))
return specscan
except Exception as e:
print e
def is_spectralscan(prj_uid=None):
sql = '''select al1.archive_uid, x.*
from
ALMA.XML_OBSPROJECT_ENTITIES al1,
XMLTable('for $first in /*:ObsProject/*:ObsProgram/*:ScienceGoal return element i {
element pol { data($first/*:SpectralSetupParameters/@polarisation)},
element type { data($first/*:SpectralSetupParameters/@type)},
element partid { data($first/*:ObsUnitSetRef/@partId)}
}'
PASSING al1.XML COLUMNS
pol varchar2(50) PATH 'pol',
type varchar2(32) PATH 'type',
partid varchar2(20) PATH 'partid'
) x
where al1. archive_uid = 'XXXXYYYY'
order by al1.timestamp desc'''
sql = sql.replace('XXXXYYYY', prj_uid)
try:
orcl = cx_Oracle.connect(database)
cursor = orcl.cursor()
cursor.execute(sql)
science_goals = cursor.fetchall()
cursor.close()
return science_goals
except Exception as e:
print e
def is_band89(prj_uid=None):
sql = '''with t1 as (
select status_entity_id as seid1 from ALMA.OBS_UNIT_SET_STATUS where OBS_PROJECT_ID = 'XXXXYYYYZZZZ' and PARENT_OBS_UNIT_SET_STATUS_ID is null
),
t2 as (
select status_entity_id as seid2, PARENT_OBS_UNIT_SET_STATUS_ID as paid2, domain_entity_id from ALMA.OBS_UNIT_SET_STATUS where OBS_PROJECT_ID = 'XXXXYYYYZZZZ'
),
t3 as (
select status_entity_id as seid3, PARENT_OBS_UNIT_SET_STATUS_ID as paid3 from ALMA.OBS_UNIT_SET_STATUS where OBS_PROJECT_ID = 'XXXXYYYYZZZZ'
),
t4 as (
select status_entity_id as seid4, PARENT_OBS_UNIT_SET_STATUS_ID as paid4 from ALMA.OBS_UNIT_SET_STATUS where OBS_PROJECT_ID = 'XXXXYYYYZZZZ'
),
t5 as (
select domain_entity_id as schedblock_uid, PARENT_OBS_UNIT_SET_STATUS_ID as paid5 from ALMA.SCHED_BLOCK_STATUS where OBS_PROJECT_ID = 'XXXXYYYYZZZZ'
),
t6 as (
select archive_uid as sb_uid, receiver_band as band from ALMA.BMMV_SCHEDBLOCK where prj_ref = 'XXXXYYYYZZZZ'
)
SELECT t2.domain_entity_id, t5.schedblock_uid,t6.band
FROM t1,
t2,
t3,
t4,
t5,
t6
WHERE t1.seid1 = t2.paid2
AND t2.seid2 = t3.paid3
AND t3.seid3 = t4.paid4
AND t4.seid4 = t5.paid5
and t6.sb_uid = t5.schedblock_uid
ORDER BY 1 ASC'''
sql = sql.replace('XXXXYYYYZZZZ',prj_uid)
try:
orcl = cx_Oracle.connect(database)
cursor = orcl.cursor()
cursor.execute(sql)
sb = cursor.fetchall()
cursor.close()
return sb
except Exception as e:
print e
|
{
"content_hash": "4f0c25092b6b6e67182cd9a1e5f5935f",
"timestamp": "",
"source": "github",
"line_count": 420,
"max_line_length": 168,
"avg_line_length": 34.34047619047619,
"alnum_prop": 0.6104832559106982,
"repo_name": "SDK/metadatachecker",
"id": "5605832c9d234c003fb66981b57d8fb0acf3123e",
"size": "14423",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sacm/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "74129"
}
],
"symlink_target": ""
}
|
import os
from setuptools import find_packages, setup
__version__ = None
exec(open('opentaxii/_version.py').read())
def here(*path):
return os.path.join(os.path.dirname(__file__), *path)
def get_file_contents(filename):
with open(here(filename), encoding='utf8') as fp:
return fp.read()
# This is a quick and dirty way to include everything from
# requirements.txt as package dependencies.
install_requires = get_file_contents('requirements.txt').split()
setup(
name='opentaxii',
description='TAXII server implementation in Python from EclecticIQ',
long_description=get_file_contents('README.md'),
long_description_content_type='text/markdown',
url='https://github.com/EclecticIQ/OpenTAXII',
author='EclecticIQ',
author_email='opentaxii@eclecticiq.com',
version=__version__,
license='BSD License',
packages=find_packages(exclude=['tests']),
include_package_data=True,
package_data={
'opentaxii': ['*.yml']
},
entry_points={
'console_scripts': [
'opentaxii-run-dev = opentaxii.cli.run:run_in_dev_mode',
'opentaxii-create-account = opentaxii.cli.auth:create_account',
'opentaxii-update-account = opentaxii.cli.auth:update_account',
'opentaxii-sync-data = opentaxii.cli.persistence:sync_data_configuration',
'opentaxii-delete-blocks = opentaxii.cli.persistence:delete_content_blocks',
]
},
install_requires=install_requires,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Internet',
'Topic :: Security',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
|
{
"content_hash": "1b996196e0310f6c3272c5f1ff29b827",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 88,
"avg_line_length": 35.25,
"alnum_prop": 0.6391843971631206,
"repo_name": "EclecticIQ/OpenTAXII",
"id": "3dea2fde362cd726222c93af1badfb0be91b69ab",
"size": "2256",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "237268"
},
{
"name": "Shell",
"bytes": "3174"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
n = int(raw_input())
setA = set(map(int, raw_input().split()))
m = int(raw_input())
setB = set(map(int, raw_input().split()))
# Print the symmetric difference of two sets.
setC = setA ^ setB
print(*sorted(setC), sep="\n")
|
{
"content_hash": "f6ecf3e210aad36c58613a4429f3aa91",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 45,
"avg_line_length": 32.5,
"alnum_prop": 0.6615384615384615,
"repo_name": "zubie7a/CPP",
"id": "2ec9a4861f9eaca74a5001dd64eec0825aad61d1",
"size": "321",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "HackerRank/Python_Learn/04_Sets/02_Symmetric_Difference.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "290798"
}
],
"symlink_target": ""
}
|
from django.contrib.auth.decorators import login_required
from django.shortcuts import render, redirect
from realback_api import models, forms
@login_required
def front_page(request):
""" View lecturer front page """
# TODO forms
course_form = forms.CourseForm()
lecture_form = forms.LectureForm()
topic_form = forms.LectureTopicForm()
return render(request, 'lecturer/front_page.html', {
'course_form': course_form,
'lecture_form': lecture_form,
'topic_form': topic_form,
})
|
{
"content_hash": "406603c5ac91369570eaaf2fde6a7ac4",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 57,
"avg_line_length": 31.176470588235293,
"alnum_prop": 0.6886792452830188,
"repo_name": "martinlunde/RealBack",
"id": "cacb165cd5cc3f119e5854f720933faea97a59dc",
"size": "531",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "lecturer/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "16631"
},
{
"name": "HTML",
"bytes": "35985"
},
{
"name": "JavaScript",
"bytes": "55462"
},
{
"name": "Python",
"bytes": "82432"
}
],
"symlink_target": ""
}
|
"""Test segwit transactions and blocks on P2P network."""
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.script import *
from test_framework.blocktools import create_block, create_coinbase, add_witness_commitment, get_witness_script, WITNESS_COMMITMENT_HEADER
from test_framework.key import CECKey, CPubKey
import time
import random
from binascii import hexlify
# The versionbit bit used to signal activation of SegWit
VB_WITNESS_BIT = 1
VB_PERIOD = 144
VB_TOP_BITS = 0x20000000
MAX_SIGOP_COST = 80000
# Calculate the virtual size of a witness block:
# (base + witness/4)
def get_virtual_size(witness_block):
base_size = len(witness_block.serialize())
total_size = len(witness_block.serialize(with_witness=True))
# the "+3" is so we round up
vsize = int((3*base_size + total_size + 3)/4)
return vsize
class TestNode(NodeConnCB):
def set_test_params(self):
self.num_nodes = 3
self.getdataset = set()
def on_getdata(self, conn, message):
for inv in message.inv:
self.getdataset.add(inv.hash)
def announce_tx_and_wait_for_getdata(self, tx, timeout=60):
with mininode_lock:
self.last_message.pop("getdata", None)
self.send_message(msg_inv(inv=[CInv(1, tx.sha256)]))
self.wait_for_getdata(timeout)
def announce_block_and_wait_for_getdata(self, block, use_header, timeout=60):
with mininode_lock:
self.last_message.pop("getdata", None)
self.last_message.pop("getheaders", None)
msg = msg_headers()
msg.headers = [ CBlockHeader(block) ]
if use_header:
self.send_message(msg)
else:
self.send_message(msg_inv(inv=[CInv(2, block.sha256)]))
self.wait_for_getheaders()
self.send_message(msg)
self.wait_for_getdata()
def request_block(self, blockhash, inv_type, timeout=60):
with mininode_lock:
self.last_message.pop("block", None)
self.send_message(msg_getdata(inv=[CInv(inv_type, blockhash)]))
self.wait_for_block(blockhash, timeout)
return self.last_message["block"].block
def test_transaction_acceptance(self, tx, with_witness, accepted, reason=None):
tx_message = msg_tx(tx)
if with_witness:
tx_message = msg_witness_tx(tx)
self.send_message(tx_message)
self.sync_with_ping()
assert_equal(tx.hash in self.connection.rpc.getrawmempool(), accepted)
if (reason != None and not accepted):
# Check the rejection reason as well.
with mininode_lock:
assert_equal(self.last_message["reject"].reason, reason)
# Test whether a witness block had the correct effect on the tip
def test_witness_block(self, block, accepted, with_witness=True):
if with_witness:
self.send_message(msg_witness_block(block))
else:
self.send_message(msg_block(block))
self.sync_with_ping()
assert_equal(self.connection.rpc.getbestblockhash() == block.hash, accepted)
# Used to keep track of anyone-can-spend outputs that we can use in the tests
class UTXO(object):
def __init__(self, sha256, n, nValue):
self.sha256 = sha256
self.n = n
self.nValue = nValue
# Helper for getting the script associated with a P2PKH
def GetP2PKHScript(pubkeyhash):
return CScript([CScriptOp(OP_DUP), CScriptOp(OP_HASH160), pubkeyhash, CScriptOp(OP_EQUALVERIFY), CScriptOp(OP_CHECKSIG)])
# Add signature for a P2PK witness program.
def sign_P2PK_witness_input(script, txTo, inIdx, hashtype, value, key):
tx_hash = SegwitVersion1SignatureHash(script, txTo, inIdx, hashtype, value)
signature = key.sign(tx_hash) + chr(hashtype).encode('latin-1')
txTo.wit.vtxinwit[inIdx].scriptWitness.stack = [signature, script]
txTo.rehash()
class SegWitTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [["-whitelist=127.0.0.1"], ["-whitelist=127.0.0.1", "-acceptnonstdtxn=0"], ["-whitelist=127.0.0.1", "-vbparams=segwit:0:0"]]
def setup_network(self):
self.setup_nodes()
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[0], 2)
self.sync_all()
''' Helpers '''
# Build a block on top of node0's tip.
def build_next_block(self, nVersion=4):
tip = self.nodes[0].getbestblockhash()
height = self.nodes[0].getblockcount() + 1
block_time = self.nodes[0].getblockheader(tip)["mediantime"] + 1
block = create_block(int(tip, 16), create_coinbase(height), block_time)
block.nVersion = nVersion
block.rehash()
return block
# Adds list of transactions to block, adds witness commitment, then solves.
def update_witness_block_with_transactions(self, block, tx_list, nonce=0):
block.vtx.extend(tx_list)
add_witness_commitment(block, nonce)
block.solve()
return
''' Individual tests '''
def test_witness_services(self):
self.log.info("Verifying NODE_WITNESS service bit")
assert((self.test_node.connection.nServices & NODE_WITNESS) != 0)
# See if sending a regular transaction works, and create a utxo
# to use in later tests.
def test_non_witness_transaction(self):
# Mine a block with an anyone-can-spend coinbase,
# let it mature, then try to spend it.
self.log.info("Testing non-witness transaction")
block = self.build_next_block(nVersion=1)
block.solve()
self.test_node.send_message(msg_block(block))
self.test_node.sync_with_ping() # make sure the block was processed
txid = block.vtx[0].sha256
self.nodes[0].generate(99) # let the block mature
# Create a transaction that spends the coinbase
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(txid, 0), b""))
tx.vout.append(CTxOut(49*100000000, CScript([OP_TRUE])))
tx.calc_sha256()
# Check that serializing it with or without witness is the same
# This is a sanity check of our testing framework.
assert_equal(msg_tx(tx).serialize(), msg_witness_tx(tx).serialize())
self.test_node.send_message(msg_witness_tx(tx))
self.test_node.sync_with_ping() # make sure the tx was processed
assert(tx.hash in self.nodes[0].getrawmempool())
# Save this transaction for later
self.utxo.append(UTXO(tx.sha256, 0, 49*100000000))
self.nodes[0].generate(1)
# Verify that blocks with witnesses are rejected before activation.
def test_unnecessary_witness_before_segwit_activation(self):
self.log.info("Testing behavior of unnecessary witnesses")
# For now, rely on earlier tests to have created at least one utxo for
# us to use
assert(len(self.utxo) > 0)
assert(get_bip9_status(self.nodes[0], 'segwit')['status'] != 'active')
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-1000, CScript([OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)])]
# Verify the hash with witness differs from the txid
# (otherwise our testing framework must be broken!)
tx.rehash()
assert(tx.sha256 != tx.calc_sha256(with_witness=True))
# Construct a segwit-signaling block that includes the transaction.
block = self.build_next_block(nVersion=(VB_TOP_BITS|(1 << VB_WITNESS_BIT)))
self.update_witness_block_with_transactions(block, [tx])
# Sending witness data before activation is not allowed (anti-spam
# rule).
self.test_node.test_witness_block(block, accepted=False)
# TODO: fix synchronization so we can test reject reason
# Right now, bitcoind delays sending reject messages for blocks
# until the future, making synchronization here difficult.
#assert_equal(self.test_node.last_message["reject"].reason, "unexpected-witness")
# But it should not be permanently marked bad...
# Resend without witness information.
self.test_node.send_message(msg_block(block))
self.test_node.sync_with_ping()
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
sync_blocks(self.nodes)
# Create a p2sh output -- this is so we can pass the standardness
# rules (an anyone-can-spend OP_TRUE would be rejected, if not wrapped
# in P2SH).
p2sh_program = CScript([OP_TRUE])
p2sh_pubkey = hash160(p2sh_program)
scriptPubKey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
# Now check that unnecessary witnesses can't be used to blind a node
# to a transaction, eg by violating standardness checks.
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue-1000, scriptPubKey))
tx2.rehash()
self.test_node.test_transaction_acceptance(tx2, False, True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# We'll add an unnecessary witness to this transaction that would cause
# it to be non-standard, to test that violating policy with a witness before
# segwit activation doesn't blind a node to a transaction. Transactions
# rejected for having a witness before segwit activation shouldn't be added
# to the rejection cache.
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), CScript([p2sh_program])))
tx3.vout.append(CTxOut(tx2.vout[0].nValue-1000, scriptPubKey))
tx3.wit.vtxinwit.append(CTxInWitness())
tx3.wit.vtxinwit[0].scriptWitness.stack = [b'a'*400000]
tx3.rehash()
# Note that this should be rejected for the premature witness reason,
# rather than a policy check, since segwit hasn't activated yet.
self.std_node.test_transaction_acceptance(tx3, True, False, b'no-witness-yet')
# If we send without witness, it should be accepted.
self.std_node.test_transaction_acceptance(tx3, False, True)
# Now create a new anyone-can-spend utxo for the next test.
tx4 = CTransaction()
tx4.vin.append(CTxIn(COutPoint(tx3.sha256, 0), CScript([p2sh_program])))
tx4.vout.append(CTxOut(tx3.vout[0].nValue-1000, CScript([OP_TRUE])))
tx4.rehash()
self.test_node.test_transaction_acceptance(tx3, False, True)
self.test_node.test_transaction_acceptance(tx4, False, True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# Update our utxo list; we spent the first entry.
self.utxo.pop(0)
self.utxo.append(UTXO(tx4.sha256, 0, tx4.vout[0].nValue))
# Mine enough blocks for segwit's vb state to be 'started'.
def advance_to_segwit_started(self):
height = self.nodes[0].getblockcount()
# Will need to rewrite the tests here if we are past the first period
assert(height < VB_PERIOD - 1)
# Genesis block is 'defined'.
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'defined')
# Advance to end of period, status should now be 'started'
self.nodes[0].generate(VB_PERIOD-height-1)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'started')
# Mine enough blocks to lock in segwit, but don't activate.
# TODO: we could verify that lockin only happens at the right threshold of
# signalling blocks, rather than just at the right period boundary.
def advance_to_segwit_lockin(self):
height = self.nodes[0].getblockcount()
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'started')
# Advance to end of period, and verify lock-in happens at the end
self.nodes[0].generate(VB_PERIOD-1)
height = self.nodes[0].getblockcount()
assert((height % VB_PERIOD) == VB_PERIOD - 2)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'started')
self.nodes[0].generate(1)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'locked_in')
# Mine enough blocks to activate segwit.
# TODO: we could verify that activation only happens at the right threshold
# of signalling blocks, rather than just at the right period boundary.
def advance_to_segwit_active(self):
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'locked_in')
height = self.nodes[0].getblockcount()
self.nodes[0].generate(VB_PERIOD - (height%VB_PERIOD) - 2)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'locked_in')
self.nodes[0].generate(1)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'active')
# This test can only be run after segwit has activated
def test_witness_commitments(self):
self.log.info("Testing witness commitments")
# First try a correct witness commitment.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
# Test the test -- witness serialization should be different
assert(msg_witness_block(block).serialize() != msg_block(block).serialize())
# This empty block should be valid.
self.test_node.test_witness_block(block, accepted=True)
# Try to tweak the nonce
block_2 = self.build_next_block()
add_witness_commitment(block_2, nonce=28)
block_2.solve()
# The commitment should have changed!
assert(block_2.vtx[0].vout[-1] != block.vtx[0].vout[-1])
# This should also be valid.
self.test_node.test_witness_block(block_2, accepted=True)
# Now test commitments with actual transactions
assert (len(self.utxo) > 0)
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
# Let's construct a witness program
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
tx.vout.append(CTxOut(self.utxo[0].nValue-1000, scriptPubKey))
tx.rehash()
# tx2 will spend tx1, and send back to a regular anyone-can-spend address
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue-1000, witness_program))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx2.rehash()
block_3 = self.build_next_block()
self.update_witness_block_with_transactions(block_3, [tx, tx2], nonce=1)
# Add an extra OP_RETURN output that matches the witness commitment template,
# even though it has extra data after the incorrect commitment.
# This block should fail.
block_3.vtx[0].vout.append(CTxOut(0, CScript([OP_RETURN, WITNESS_COMMITMENT_HEADER + ser_uint256(2), 10])))
block_3.vtx[0].rehash()
block_3.hashMerkleRoot = block_3.calc_merkle_root()
block_3.rehash()
block_3.solve()
self.test_node.test_witness_block(block_3, accepted=False)
# Add a different commitment with different nonce, but in the
# right location, and with some funds burned(!).
# This should succeed (nValue shouldn't affect finding the
# witness commitment).
add_witness_commitment(block_3, nonce=0)
block_3.vtx[0].vout[0].nValue -= 1
block_3.vtx[0].vout[-1].nValue += 1
block_3.vtx[0].rehash()
block_3.hashMerkleRoot = block_3.calc_merkle_root()
block_3.rehash()
assert(len(block_3.vtx[0].vout) == 4) # 3 OP_returns
block_3.solve()
self.test_node.test_witness_block(block_3, accepted=True)
# Finally test that a block with no witness transactions can
# omit the commitment.
block_4 = self.build_next_block()
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.vout.append(CTxOut(tx.vout[0].nValue-1000, witness_program))
tx3.rehash()
block_4.vtx.append(tx3)
block_4.hashMerkleRoot = block_4.calc_merkle_root()
block_4.solve()
self.test_node.test_witness_block(block_4, with_witness=False, accepted=True)
# Update available utxo's for use in later test.
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
def test_block_malleability(self):
self.log.info("Testing witness block malleability")
# Make sure that a block that has too big a virtual size
# because of a too-large coinbase witness is not permanently
# marked bad.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.append(b'a'*5000000)
assert(get_virtual_size(block) > MAX_BLOCK_BASE_SIZE)
# We can't send over the p2p network, because this is too big to relay
# TODO: repeat this test with a block that can be relayed
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert(self.nodes[0].getbestblockhash() != block.hash)
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.pop()
assert(get_virtual_size(block) < MAX_BLOCK_BASE_SIZE)
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert(self.nodes[0].getbestblockhash() == block.hash)
# Now make sure that malleating the witness nonce doesn't
# result in a block permanently marked bad.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
# Change the nonce -- should not cause the block to be permanently
# failed
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ ser_uint256(1) ]
self.test_node.test_witness_block(block, accepted=False)
# Changing the witness nonce doesn't change the block hash
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ ser_uint256(0) ]
self.test_node.test_witness_block(block, accepted=True)
def test_witness_block_size(self):
self.log.info("Testing witness block size limit")
# TODO: Test that non-witness carrying blocks can't exceed 1MB
# Skipping this test for now; this is covered in p2p-fullblocktest.py
# Test that witness-bearing blocks are limited at ceil(base + wit/4) <= 1MB.
block = self.build_next_block()
assert(len(self.utxo) > 0)
# Create a P2WSH transaction.
# The witness program will be a bunch of OP_2DROP's, followed by OP_TRUE.
# This should give us plenty of room to tweak the spending tx's
# virtual size.
NUM_DROPS = 200 # 201 max ops per script!
NUM_OUTPUTS = 50
witness_program = CScript([OP_2DROP]*NUM_DROPS + [OP_TRUE])
witness_hash = uint256_from_str(sha256(witness_program))
scriptPubKey = CScript([OP_0, ser_uint256(witness_hash)])
prevout = COutPoint(self.utxo[0].sha256, self.utxo[0].n)
value = self.utxo[0].nValue
parent_tx = CTransaction()
parent_tx.vin.append(CTxIn(prevout, b""))
child_value = int(value/NUM_OUTPUTS)
for i in range(NUM_OUTPUTS):
parent_tx.vout.append(CTxOut(child_value, scriptPubKey))
parent_tx.vout[0].nValue -= 50000
assert(parent_tx.vout[0].nValue > 0)
parent_tx.rehash()
child_tx = CTransaction()
for i in range(NUM_OUTPUTS):
child_tx.vin.append(CTxIn(COutPoint(parent_tx.sha256, i), b""))
child_tx.vout = [CTxOut(value - 100000, CScript([OP_TRUE]))]
for i in range(NUM_OUTPUTS):
child_tx.wit.vtxinwit.append(CTxInWitness())
child_tx.wit.vtxinwit[-1].scriptWitness.stack = [b'a'*195]*(2*NUM_DROPS) + [witness_program]
child_tx.rehash()
self.update_witness_block_with_transactions(block, [parent_tx, child_tx])
vsize = get_virtual_size(block)
additional_bytes = (MAX_BLOCK_BASE_SIZE - vsize)*4
i = 0
while additional_bytes > 0:
# Add some more bytes to each input until we hit MAX_BLOCK_BASE_SIZE+1
extra_bytes = min(additional_bytes+1, 55)
block.vtx[-1].wit.vtxinwit[int(i/(2*NUM_DROPS))].scriptWitness.stack[i%(2*NUM_DROPS)] = b'a'*(195+extra_bytes)
additional_bytes -= extra_bytes
i += 1
block.vtx[0].vout.pop() # Remove old commitment
add_witness_commitment(block)
block.solve()
vsize = get_virtual_size(block)
assert_equal(vsize, MAX_BLOCK_BASE_SIZE + 1)
# Make sure that our test case would exceed the old max-network-message
# limit
assert(len(block.serialize(True)) > 2*1024*1024)
self.test_node.test_witness_block(block, accepted=False)
# Now resize the second transaction to make the block fit.
cur_length = len(block.vtx[-1].wit.vtxinwit[0].scriptWitness.stack[0])
block.vtx[-1].wit.vtxinwit[0].scriptWitness.stack[0] = b'a'*(cur_length-1)
block.vtx[0].vout.pop()
add_witness_commitment(block)
block.solve()
assert(get_virtual_size(block) == MAX_BLOCK_BASE_SIZE)
self.test_node.test_witness_block(block, accepted=True)
# Update available utxo's
self.utxo.pop(0)
self.utxo.append(UTXO(block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue))
# submitblock will try to add the nonce automatically, so that mining
# software doesn't need to worry about doing so itself.
def test_submit_block(self):
block = self.build_next_block()
# Try using a custom nonce and then don't supply it.
# This shouldn't possibly work.
add_witness_commitment(block, nonce=1)
block.vtx[0].wit = CTxWitness() # drop the nonce
block.solve()
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert(self.nodes[0].getbestblockhash() != block.hash)
# Now redo commitment with the standard nonce, but let bitcoind fill it in.
add_witness_commitment(block, nonce=0)
block.vtx[0].wit = CTxWitness()
block.solve()
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
# This time, add a tx with non-empty witness, but don't supply
# the commitment.
block_2 = self.build_next_block()
add_witness_commitment(block_2)
block_2.solve()
# Drop commitment and nonce -- submitblock should not fill in.
block_2.vtx[0].vout.pop()
block_2.vtx[0].wit = CTxWitness()
self.nodes[0].submitblock(bytes_to_hex_str(block_2.serialize(True)))
# Tip should not advance!
assert(self.nodes[0].getbestblockhash() != block_2.hash)
# Consensus tests of extra witness data in a transaction.
def test_extra_witness_data(self):
self.log.info("Testing extra witness data in tx")
assert(len(self.utxo) > 0)
block = self.build_next_block()
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
# First try extra witness data on a tx that doesn't require a witness
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-2000, scriptPubKey))
tx.vout.append(CTxOut(1000, CScript([OP_TRUE]))) # non-witness output
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([])]
tx.rehash()
self.update_witness_block_with_transactions(block, [tx])
# Extra witness data should not be allowed.
self.test_node.test_witness_block(block, accepted=False)
# Try extra signature data. Ok if we're not spending a witness output.
block.vtx[1].wit.vtxinwit = []
block.vtx[1].vin[0].scriptSig = CScript([OP_0])
block.vtx[1].rehash()
add_witness_commitment(block)
block.solve()
self.test_node.test_witness_block(block, accepted=True)
# Now try extra witness/signature data on an input that DOES require a
# witness
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) # witness output
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 1), b"")) # non-witness
tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE])))
tx2.wit.vtxinwit.extend([CTxInWitness(), CTxInWitness()])
tx2.wit.vtxinwit[0].scriptWitness.stack = [ CScript([CScriptNum(1)]), CScript([CScriptNum(1)]), witness_program ]
tx2.wit.vtxinwit[1].scriptWitness.stack = [ CScript([OP_TRUE]) ]
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
# This has extra witness data, so it should fail.
self.test_node.test_witness_block(block, accepted=False)
# Now get rid of the extra witness, but add extra scriptSig data
tx2.vin[0].scriptSig = CScript([OP_TRUE])
tx2.vin[1].scriptSig = CScript([OP_TRUE])
tx2.wit.vtxinwit[0].scriptWitness.stack.pop(0)
tx2.wit.vtxinwit[1].scriptWitness.stack = []
tx2.rehash()
add_witness_commitment(block)
block.solve()
# This has extra signature data for a witness input, so it should fail.
self.test_node.test_witness_block(block, accepted=False)
# Now get rid of the extra scriptsig on the witness input, and verify
# success (even with extra scriptsig data in the non-witness input)
tx2.vin[0].scriptSig = b""
tx2.rehash()
add_witness_commitment(block)
block.solve()
self.test_node.test_witness_block(block, accepted=True)
# Update utxo for later tests
self.utxo.pop(0)
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
def test_max_witness_push_length(self):
''' Should only allow up to 520 byte pushes in witness stack '''
self.log.info("Testing maximum witness push size")
MAX_SCRIPT_ELEMENT_SIZE = 520
assert(len(self.utxo))
block = self.build_next_block()
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-1000, scriptPubKey))
tx.rehash()
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue-1000, CScript([OP_TRUE])))
tx2.wit.vtxinwit.append(CTxInWitness())
# First try a 521-byte stack element
tx2.wit.vtxinwit[0].scriptWitness.stack = [ b'a'*(MAX_SCRIPT_ELEMENT_SIZE+1), witness_program ]
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx, tx2])
self.test_node.test_witness_block(block, accepted=False)
# Now reduce the length of the stack element
tx2.wit.vtxinwit[0].scriptWitness.stack[0] = b'a'*(MAX_SCRIPT_ELEMENT_SIZE)
add_witness_commitment(block)
block.solve()
self.test_node.test_witness_block(block, accepted=True)
# Update the utxo for later tests
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
def test_max_witness_program_length(self):
# Can create witness outputs that are long, but can't be greater than
# 10k bytes to successfully spend
self.log.info("Testing maximum witness program length")
assert(len(self.utxo))
MAX_PROGRAM_LENGTH = 10000
# This program is 19 max pushes (9937 bytes), then 64 more opcode-bytes.
long_witness_program = CScript([b'a'*520]*19 + [OP_DROP]*63 + [OP_TRUE])
assert(len(long_witness_program) == MAX_PROGRAM_LENGTH+1)
long_witness_hash = sha256(long_witness_program)
long_scriptPubKey = CScript([OP_0, long_witness_hash])
block = self.build_next_block()
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-1000, long_scriptPubKey))
tx.rehash()
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue-1000, CScript([OP_TRUE])))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a']*44 + [long_witness_program]
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx, tx2])
self.test_node.test_witness_block(block, accepted=False)
# Try again with one less byte in the witness program
witness_program = CScript([b'a'*520]*19 + [OP_DROP]*62 + [OP_TRUE])
assert(len(witness_program) == MAX_PROGRAM_LENGTH)
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
tx.vout[0] = CTxOut(tx.vout[0].nValue, scriptPubKey)
tx.rehash()
tx2.vin[0].prevout.hash = tx.sha256
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a']*43 + [witness_program]
tx2.rehash()
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx, tx2])
self.test_node.test_witness_block(block, accepted=True)
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
def test_witness_input_length(self):
''' Ensure that vin length must match vtxinwit length '''
self.log.info("Testing witness input length")
assert(len(self.utxo))
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
# Create a transaction that splits our utxo into many outputs
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
nValue = self.utxo[0].nValue
for i in range(10):
tx.vout.append(CTxOut(int(nValue/10), scriptPubKey))
tx.vout[0].nValue -= 1000
assert(tx.vout[0].nValue >= 0)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
# Try various ways to spend tx that should all break.
# This "broken" transaction serializer will not normalize
# the length of vtxinwit.
class BrokenCTransaction(CTransaction):
def serialize_with_witness(self):
flags = 0
if not self.wit.is_null():
flags |= 1
r = b""
r += struct.pack("<i", self.nVersion)
if flags:
dummy = []
r += ser_vector(dummy)
r += struct.pack("<B", flags)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
if flags & 1:
r += self.wit.serialize()
r += struct.pack("<I", self.nLockTime)
return r
tx2 = BrokenCTransaction()
for i in range(10):
tx2.vin.append(CTxIn(COutPoint(tx.sha256, i), b""))
tx2.vout.append(CTxOut(nValue-3000, CScript([OP_TRUE])))
# First try using a too long vtxinwit
for i in range(11):
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[i].scriptWitness.stack = [b'a', witness_program]
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
self.test_node.test_witness_block(block, accepted=False)
# Now try using a too short vtxinwit
tx2.wit.vtxinwit.pop()
tx2.wit.vtxinwit.pop()
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
self.test_node.test_witness_block(block, accepted=False)
# Now make one of the intermediate witnesses be incorrect
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [b'a', witness_program]
tx2.wit.vtxinwit[5].scriptWitness.stack = [ witness_program ]
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
self.test_node.test_witness_block(block, accepted=False)
# Fix the broken witness and the block should be accepted.
tx2.wit.vtxinwit[5].scriptWitness.stack = [b'a', witness_program]
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
self.test_node.test_witness_block(block, accepted=True)
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
def test_witness_tx_relay_before_segwit_activation(self):
self.log.info("Testing relay of witness transactions")
# Generate a transaction that doesn't require a witness, but send it
# with a witness. Should be rejected for premature-witness, but should
# not be added to recently rejected list.
assert(len(self.utxo))
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-1000, CScript([OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [ b'a' ]
tx.rehash()
tx_hash = tx.sha256
tx_value = tx.vout[0].nValue
# Verify that if a peer doesn't set nServices to include NODE_WITNESS,
# the getdata is just for the non-witness portion.
self.old_node.announce_tx_and_wait_for_getdata(tx)
assert(self.old_node.last_message["getdata"].inv[0].type == 1)
# Since we haven't delivered the tx yet, inv'ing the same tx from
# a witness transaction ought not result in a getdata.
try:
self.test_node.announce_tx_and_wait_for_getdata(tx, timeout=2)
self.log.error("Error: duplicate tx getdata!")
assert(False)
except AssertionError as e:
pass
# Delivering this transaction with witness should fail (no matter who
# its from)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
self.old_node.test_transaction_acceptance(tx, with_witness=True, accepted=False)
self.test_node.test_transaction_acceptance(tx, with_witness=True, accepted=False)
# But eliminating the witness should fix it
self.test_node.test_transaction_acceptance(tx, with_witness=False, accepted=True)
# Cleanup: mine the first transaction and update utxo
self.nodes[0].generate(1)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.utxo.pop(0)
self.utxo.append(UTXO(tx_hash, 0, tx_value))
# After segwit activates, verify that mempool:
# - rejects transactions with unnecessary/extra witnesses
# - accepts transactions with valid witnesses
# and that witness transactions are relayed to non-upgraded peers.
def test_tx_relay_after_segwit_activation(self):
self.log.info("Testing relay of witness transactions")
# Generate a transaction that doesn't require a witness, but send it
# with a witness. Should be rejected because we can't use a witness
# when spending a non-witness output.
assert(len(self.utxo))
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-1000, CScript([OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [ b'a' ]
tx.rehash()
tx_hash = tx.sha256
# Verify that unnecessary witnesses are rejected.
self.test_node.announce_tx_and_wait_for_getdata(tx)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.test_node.test_transaction_acceptance(tx, with_witness=True, accepted=False)
# Verify that removing the witness succeeds.
self.test_node.announce_tx_and_wait_for_getdata(tx)
self.test_node.test_transaction_acceptance(tx, with_witness=False, accepted=True)
# Now try to add extra witness data to a valid witness tx.
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx_hash, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue-1000, scriptPubKey))
tx2.rehash()
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.wit.vtxinwit.append(CTxInWitness())
# Add too-large for IsStandard witness and check that it does not enter reject filter
p2sh_program = CScript([OP_TRUE])
p2sh_pubkey = hash160(p2sh_program)
witness_program2 = CScript([b'a'*400000])
tx3.vout.append(CTxOut(tx2.vout[0].nValue-1000, CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])))
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program2]
tx3.rehash()
# Node will not be blinded to the transaction
self.std_node.announce_tx_and_wait_for_getdata(tx3)
self.std_node.test_transaction_acceptance(tx3, True, False, b'tx-size')
self.std_node.announce_tx_and_wait_for_getdata(tx3)
self.std_node.test_transaction_acceptance(tx3, True, False, b'tx-size')
# Remove witness stuffing, instead add extra witness push on stack
tx3.vout[0] = CTxOut(tx2.vout[0].nValue-1000, CScript([OP_TRUE]))
tx3.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)]), witness_program ]
tx3.rehash()
self.test_node.test_transaction_acceptance(tx2, with_witness=True, accepted=True)
self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=False)
# Get rid of the extra witness, and verify acceptance.
tx3.wit.vtxinwit[0].scriptWitness.stack = [ witness_program ]
# Also check that old_node gets a tx announcement, even though this is
# a witness transaction.
self.old_node.wait_for_inv([CInv(1, tx2.sha256)]) # wait until tx2 was inv'ed
self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=True)
self.old_node.wait_for_inv([CInv(1, tx3.sha256)])
# Test that getrawtransaction returns correct witness information
# hash, size, vsize
raw_tx = self.nodes[0].getrawtransaction(tx3.hash, 1)
assert_equal(int(raw_tx["hash"], 16), tx3.calc_sha256(True))
assert_equal(raw_tx["size"], len(tx3.serialize_with_witness()))
vsize = (len(tx3.serialize_with_witness()) + 3*len(tx3.serialize_without_witness()) + 3) / 4
assert_equal(raw_tx["vsize"], vsize)
assert_equal(len(raw_tx["vin"][0]["txinwitness"]), 1)
assert_equal(raw_tx["vin"][0]["txinwitness"][0], hexlify(witness_program).decode('ascii'))
assert(vsize != raw_tx["size"])
# Cleanup: mine the transactions and update utxo for next test
self.nodes[0].generate(1)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
# Test that block requests to NODE_WITNESS peer are with MSG_WITNESS_FLAG
# This is true regardless of segwit activation.
# Also test that we don't ask for blocks from unupgraded peers
def test_block_relay(self, segwit_activated):
self.log.info("Testing block relay")
blocktype = 2|MSG_WITNESS_FLAG
# test_node has set NODE_WITNESS, so all getdata requests should be for
# witness blocks.
# Test announcing a block via inv results in a getdata, and that
# announcing a version 4 or random VB block with a header results in a getdata
block1 = self.build_next_block()
block1.solve()
self.test_node.announce_block_and_wait_for_getdata(block1, use_header=False)
assert(self.test_node.last_message["getdata"].inv[0].type == blocktype)
self.test_node.test_witness_block(block1, True)
block2 = self.build_next_block(nVersion=4)
block2.solve()
self.test_node.announce_block_and_wait_for_getdata(block2, use_header=True)
assert(self.test_node.last_message["getdata"].inv[0].type == blocktype)
self.test_node.test_witness_block(block2, True)
block3 = self.build_next_block(nVersion=(VB_TOP_BITS | (1<<15)))
block3.solve()
self.test_node.announce_block_and_wait_for_getdata(block3, use_header=True)
assert(self.test_node.last_message["getdata"].inv[0].type == blocktype)
self.test_node.test_witness_block(block3, True)
# Check that we can getdata for witness blocks or regular blocks,
# and the right thing happens.
if segwit_activated == False:
# Before activation, we should be able to request old blocks with
# or without witness, and they should be the same.
chain_height = self.nodes[0].getblockcount()
# Pick 10 random blocks on main chain, and verify that getdata's
# for MSG_BLOCK, MSG_WITNESS_BLOCK, and rpc getblock() are equal.
all_heights = list(range(chain_height+1))
random.shuffle(all_heights)
all_heights = all_heights[0:10]
for height in all_heights:
block_hash = self.nodes[0].getblockhash(height)
rpc_block = self.nodes[0].getblock(block_hash, False)
block_hash = int(block_hash, 16)
block = self.test_node.request_block(block_hash, 2)
wit_block = self.test_node.request_block(block_hash, 2|MSG_WITNESS_FLAG)
assert_equal(block.serialize(True), wit_block.serialize(True))
assert_equal(block.serialize(), hex_str_to_bytes(rpc_block))
else:
# After activation, witness blocks and non-witness blocks should
# be different. Verify rpc getblock() returns witness blocks, while
# getdata respects the requested type.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [])
# This gives us a witness commitment.
assert(len(block.vtx[0].wit.vtxinwit) == 1)
assert(len(block.vtx[0].wit.vtxinwit[0].scriptWitness.stack) == 1)
self.test_node.test_witness_block(block, accepted=True)
# Now try to retrieve it...
rpc_block = self.nodes[0].getblock(block.hash, False)
non_wit_block = self.test_node.request_block(block.sha256, 2)
wit_block = self.test_node.request_block(block.sha256, 2|MSG_WITNESS_FLAG)
assert_equal(wit_block.serialize(True), hex_str_to_bytes(rpc_block))
assert_equal(wit_block.serialize(False), non_wit_block.serialize())
assert_equal(wit_block.serialize(True), block.serialize(True))
# Test size, vsize, weight
rpc_details = self.nodes[0].getblock(block.hash, True)
assert_equal(rpc_details["size"], len(block.serialize(True)))
assert_equal(rpc_details["strippedsize"], len(block.serialize(False)))
weight = 3*len(block.serialize(False)) + len(block.serialize(True))
assert_equal(rpc_details["weight"], weight)
# Upgraded node should not ask for blocks from unupgraded
block4 = self.build_next_block(nVersion=4)
block4.solve()
self.old_node.getdataset = set()
# Blocks can be requested via direct-fetch (immediately upon processing the announcement)
# or via parallel download (with an indeterminate delay from processing the announcement)
# so to test that a block is NOT requested, we could guess a time period to sleep for,
# and then check. We can avoid the sleep() by taking advantage of transaction getdata's
# being processed after block getdata's, and announce a transaction as well,
# and then check to see if that particular getdata has been received.
# Since 0.14, inv's will only be responded to with a getheaders, so send a header
# to announce this block.
msg = msg_headers()
msg.headers = [ CBlockHeader(block4) ]
self.old_node.send_message(msg)
self.old_node.announce_tx_and_wait_for_getdata(block4.vtx[0])
assert(block4.sha256 not in self.old_node.getdataset)
# V0 segwit outputs should be standard after activation, but not before.
def test_standardness_v0(self, segwit_activated):
self.log.info("Testing standardness of v0 outputs (%s activation)" % ("after" if segwit_activated else "before"))
assert(len(self.utxo))
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
p2sh_pubkey = hash160(witness_program)
p2sh_scriptPubKey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
# First prepare a p2sh output (so that spending it will pass standardness)
p2sh_tx = CTransaction()
p2sh_tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")]
p2sh_tx.vout = [CTxOut(self.utxo[0].nValue-1000, p2sh_scriptPubKey)]
p2sh_tx.rehash()
# Mine it on test_node to create the confirmed output.
self.test_node.test_transaction_acceptance(p2sh_tx, with_witness=True, accepted=True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# Now test standardness of v0 P2WSH outputs.
# Start by creating a transaction with two outputs.
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))]
tx.vout = [CTxOut(p2sh_tx.vout[0].nValue-10000, scriptPubKey)]
tx.vout.append(CTxOut(8000, scriptPubKey)) # Might burn this later
tx.rehash()
self.std_node.test_transaction_acceptance(tx, with_witness=True, accepted=segwit_activated)
# Now create something that looks like a P2PKH output. This won't be spendable.
scriptPubKey = CScript([OP_0, hash160(witness_hash)])
tx2 = CTransaction()
if segwit_activated:
# if tx was accepted, then we spend the second output.
tx2.vin = [CTxIn(COutPoint(tx.sha256, 1), b"")]
tx2.vout = [CTxOut(7000, scriptPubKey)]
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
else:
# if tx wasn't accepted, we just re-spend the p2sh output we started with.
tx2.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))]
tx2.vout = [CTxOut(p2sh_tx.vout[0].nValue-1000, scriptPubKey)]
tx2.rehash()
self.std_node.test_transaction_acceptance(tx2, with_witness=True, accepted=segwit_activated)
# Now update self.utxo for later tests.
tx3 = CTransaction()
if segwit_activated:
# tx and tx2 were both accepted. Don't bother trying to reclaim the
# P2PKH output; just send tx's first output back to an anyone-can-spend.
sync_mempools([self.nodes[0], self.nodes[1]])
tx3.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")]
tx3.vout = [CTxOut(tx.vout[0].nValue-1000, CScript([OP_TRUE]))]
tx3.wit.vtxinwit.append(CTxInWitness())
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx3.rehash()
self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=True)
else:
# tx and tx2 didn't go anywhere; just clean up the p2sh_tx output.
tx3.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))]
tx3.vout = [CTxOut(p2sh_tx.vout[0].nValue-1000, witness_program)]
tx3.rehash()
self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
assert_equal(len(self.nodes[1].getrawmempool()), 0)
# Verify that future segwit upgraded transactions are non-standard,
# but valid in blocks. Can run this before and after segwit activation.
def test_segwit_versions(self):
self.log.info("Testing standardness/consensus for segwit versions (0-16)")
assert(len(self.utxo))
NUM_TESTS = 17 # will test OP_0, OP1, ..., OP_16
if (len(self.utxo) < NUM_TESTS):
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
split_value = (self.utxo[0].nValue - 4000) // NUM_TESTS
for i in range(NUM_TESTS):
tx.vout.append(CTxOut(split_value, CScript([OP_TRUE])))
tx.rehash()
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
self.utxo.pop(0)
for i in range(NUM_TESTS):
self.utxo.append(UTXO(tx.sha256, i, split_value))
sync_blocks(self.nodes)
temp_utxo = []
tx = CTransaction()
count = 0
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
for version in list(range(OP_1, OP_16+1)) + [OP_0]:
count += 1
# First try to spend to a future version segwit scriptPubKey.
scriptPubKey = CScript([CScriptOp(version), witness_hash])
tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")]
tx.vout = [CTxOut(self.utxo[0].nValue-1000, scriptPubKey)]
tx.rehash()
self.std_node.test_transaction_acceptance(tx, with_witness=True, accepted=False)
self.test_node.test_transaction_acceptance(tx, with_witness=True, accepted=True)
self.utxo.pop(0)
temp_utxo.append(UTXO(tx.sha256, 0, tx.vout[0].nValue))
self.nodes[0].generate(1) # Mine all the transactions
sync_blocks(self.nodes)
assert(len(self.nodes[0].getrawmempool()) == 0)
# Finally, verify that version 0 -> version 1 transactions
# are non-standard
scriptPubKey = CScript([CScriptOp(OP_1), witness_hash])
tx2 = CTransaction()
tx2.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")]
tx2.vout = [CTxOut(tx.vout[0].nValue-1000, scriptPubKey)]
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [ witness_program ]
tx2.rehash()
# Gets accepted to test_node, because standardness of outputs isn't
# checked with fRequireStandard
self.test_node.test_transaction_acceptance(tx2, with_witness=True, accepted=True)
self.std_node.test_transaction_acceptance(tx2, with_witness=True, accepted=False)
temp_utxo.pop() # last entry in temp_utxo was the output we just spent
temp_utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
# Spend everything in temp_utxo back to an OP_TRUE output.
tx3 = CTransaction()
total_value = 0
for i in temp_utxo:
tx3.vin.append(CTxIn(COutPoint(i.sha256, i.n), b""))
tx3.wit.vtxinwit.append(CTxInWitness())
total_value += i.nValue
tx3.wit.vtxinwit[-1].scriptWitness.stack = [witness_program]
tx3.vout.append(CTxOut(total_value - 1000, CScript([OP_TRUE])))
tx3.rehash()
# Spending a higher version witness output is not allowed by policy,
# even with fRequireStandard=false.
self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=False)
self.test_node.sync_with_ping()
with mininode_lock:
assert(b"reserved for soft-fork upgrades" in self.test_node.last_message["reject"].reason)
# Building a block with the transaction must be valid, however.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2, tx3])
self.test_node.test_witness_block(block, accepted=True)
sync_blocks(self.nodes)
# Add utxo to our list
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
def test_premature_coinbase_witness_spend(self):
self.log.info("Testing premature coinbase witness spend")
block = self.build_next_block()
# Change the output of the block to be a witness output.
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
block.vtx[0].vout[0].scriptPubKey = scriptPubKey
# This next line will rehash the coinbase and update the merkle
# root, and solve.
self.update_witness_block_with_transactions(block, [])
self.test_node.test_witness_block(block, accepted=True)
spend_tx = CTransaction()
spend_tx.vin = [CTxIn(COutPoint(block.vtx[0].sha256, 0), b"")]
spend_tx.vout = [CTxOut(block.vtx[0].vout[0].nValue, witness_program)]
spend_tx.wit.vtxinwit.append(CTxInWitness())
spend_tx.wit.vtxinwit[0].scriptWitness.stack = [ witness_program ]
spend_tx.rehash()
# Now test a premature spend.
self.nodes[0].generate(98)
sync_blocks(self.nodes)
block2 = self.build_next_block()
self.update_witness_block_with_transactions(block2, [spend_tx])
self.test_node.test_witness_block(block2, accepted=False)
# Advancing one more block should allow the spend.
self.nodes[0].generate(1)
block2 = self.build_next_block()
self.update_witness_block_with_transactions(block2, [spend_tx])
self.test_node.test_witness_block(block2, accepted=True)
sync_blocks(self.nodes)
def test_signature_version_1(self):
self.log.info("Testing segwit signature hash version 1")
key = CECKey()
key.set_secretbytes(b"9")
pubkey = CPubKey(key.get_pubkey())
witness_program = CScript([pubkey, CScriptOp(OP_CHECKSIG)])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
# First create a witness output for use in the tests.
assert(len(self.utxo))
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-1000, scriptPubKey))
tx.rehash()
self.test_node.test_transaction_acceptance(tx, with_witness=True, accepted=True)
# Mine this transaction in preparation for following tests.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
sync_blocks(self.nodes)
self.utxo.pop(0)
# Test each hashtype
prev_utxo = UTXO(tx.sha256, 0, tx.vout[0].nValue)
for sigflag in [ 0, SIGHASH_ANYONECANPAY ]:
for hashtype in [SIGHASH_ALL, SIGHASH_NONE, SIGHASH_SINGLE]:
hashtype |= sigflag
block = self.build_next_block()
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b""))
tx.vout.append(CTxOut(prev_utxo.nValue - 1000, scriptPubKey))
tx.wit.vtxinwit.append(CTxInWitness())
# Too-large input value
sign_P2PK_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue+1, key)
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=False)
# Too-small input value
sign_P2PK_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue-1, key)
block.vtx.pop() # remove last tx
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=False)
# Now try correct value
sign_P2PK_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue, key)
block.vtx.pop()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
prev_utxo = UTXO(tx.sha256, 0, tx.vout[0].nValue)
# Test combinations of signature hashes.
# Split the utxo into a lot of outputs.
# Randomly choose up to 10 to spend, sign with different hashtypes, and
# output to a random number of outputs. Repeat NUM_TESTS times.
# Ensure that we've tested a situation where we use SIGHASH_SINGLE with
# an input index > number of outputs.
NUM_TESTS = 500
temp_utxos = []
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b""))
split_value = prev_utxo.nValue // NUM_TESTS
for i in range(NUM_TESTS):
tx.vout.append(CTxOut(split_value, scriptPubKey))
tx.wit.vtxinwit.append(CTxInWitness())
sign_P2PK_witness_input(witness_program, tx, 0, SIGHASH_ALL, prev_utxo.nValue, key)
for i in range(NUM_TESTS):
temp_utxos.append(UTXO(tx.sha256, i, split_value))
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
block = self.build_next_block()
used_sighash_single_out_of_bounds = False
for i in range(NUM_TESTS):
# Ping regularly to keep the connection alive
if (not i % 100):
self.test_node.sync_with_ping()
# Choose random number of inputs to use.
num_inputs = random.randint(1, 10)
# Create a slight bias for producing more utxos
num_outputs = random.randint(1, 11)
random.shuffle(temp_utxos)
assert(len(temp_utxos) > num_inputs)
tx = CTransaction()
total_value = 0
for i in range(num_inputs):
tx.vin.append(CTxIn(COutPoint(temp_utxos[i].sha256, temp_utxos[i].n), b""))
tx.wit.vtxinwit.append(CTxInWitness())
total_value += temp_utxos[i].nValue
split_value = total_value // num_outputs
for i in range(num_outputs):
tx.vout.append(CTxOut(split_value, scriptPubKey))
for i in range(num_inputs):
# Now try to sign each input, using a random hashtype.
anyonecanpay = 0
if random.randint(0, 1):
anyonecanpay = SIGHASH_ANYONECANPAY
hashtype = random.randint(1, 3) | anyonecanpay
sign_P2PK_witness_input(witness_program, tx, i, hashtype, temp_utxos[i].nValue, key)
if (hashtype == SIGHASH_SINGLE and i >= num_outputs):
used_sighash_single_out_of_bounds = True
tx.rehash()
for i in range(num_outputs):
temp_utxos.append(UTXO(tx.sha256, i, split_value))
temp_utxos = temp_utxos[num_inputs:]
block.vtx.append(tx)
# Test the block periodically, if we're close to maxblocksize
if (get_virtual_size(block) > MAX_BLOCK_BASE_SIZE - 1000):
self.update_witness_block_with_transactions(block, [])
self.test_node.test_witness_block(block, accepted=True)
block = self.build_next_block()
if (not used_sighash_single_out_of_bounds):
self.log.info("WARNING: this test run didn't attempt SIGHASH_SINGLE with out-of-bounds index value")
# Test the transactions we've added to the block
if (len(block.vtx) > 1):
self.update_witness_block_with_transactions(block, [])
self.test_node.test_witness_block(block, accepted=True)
# Now test witness version 0 P2PKH transactions
pubkeyhash = hash160(pubkey)
scriptPKH = CScript([OP_0, pubkeyhash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(temp_utxos[0].sha256, temp_utxos[0].n), b""))
tx.vout.append(CTxOut(temp_utxos[0].nValue, scriptPKH))
tx.wit.vtxinwit.append(CTxInWitness())
sign_P2PK_witness_input(witness_program, tx, 0, SIGHASH_ALL, temp_utxos[0].nValue, key)
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE])))
script = GetP2PKHScript(pubkeyhash)
sig_hash = SegwitVersion1SignatureHash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue)
signature = key.sign(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
# Check that we can't have a scriptSig
tx2.vin[0].scriptSig = CScript([signature, pubkey])
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx, tx2])
self.test_node.test_witness_block(block, accepted=False)
# Move the signature to the witness.
block.vtx.pop()
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [signature, pubkey]
tx2.vin[0].scriptSig = b""
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx2])
self.test_node.test_witness_block(block, accepted=True)
temp_utxos.pop(0)
# Update self.utxos for later tests. Just spend everything in
# temp_utxos to a corresponding entry in self.utxos
tx = CTransaction()
index = 0
for i in temp_utxos:
# Just spend to our usual anyone-can-spend output
# Use SIGHASH_SINGLE|SIGHASH_ANYONECANPAY so we can build up
# the signatures as we go.
tx.vin.append(CTxIn(COutPoint(i.sha256, i.n), b""))
tx.vout.append(CTxOut(i.nValue, CScript([OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
sign_P2PK_witness_input(witness_program, tx, index, SIGHASH_SINGLE|SIGHASH_ANYONECANPAY, i.nValue, key)
index += 1
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
for i in range(len(tx.vout)):
self.utxo.append(UTXO(tx.sha256, i, tx.vout[i].nValue))
# Test P2SH wrapped witness programs.
def test_p2sh_witness(self, segwit_activated):
self.log.info("Testing P2SH witness transactions")
assert(len(self.utxo))
# Prepare the p2sh-wrapped witness output
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
p2wsh_pubkey = CScript([OP_0, witness_hash])
p2sh_witness_hash = hash160(p2wsh_pubkey)
scriptPubKey = CScript([OP_HASH160, p2sh_witness_hash, OP_EQUAL])
scriptSig = CScript([p2wsh_pubkey]) # a push of the redeem script
# Fund the P2SH output
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-1000, scriptPubKey))
tx.rehash()
# Verify mempool acceptance and block validity
self.test_node.test_transaction_acceptance(tx, with_witness=False, accepted=True)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True, with_witness=segwit_activated)
sync_blocks(self.nodes)
# Now test attempts to spend the output.
spend_tx = CTransaction()
spend_tx.vin.append(CTxIn(COutPoint(tx.sha256, 0), scriptSig))
spend_tx.vout.append(CTxOut(tx.vout[0].nValue-1000, CScript([OP_TRUE])))
spend_tx.rehash()
# This transaction should not be accepted into the mempool pre- or
# post-segwit. Mempool acceptance will use SCRIPT_VERIFY_WITNESS which
# will require a witness to spend a witness program regardless of
# segwit activation. Note that older bitcoind's that are not
# segwit-aware would also reject this for failing CLEANSTACK.
self.test_node.test_transaction_acceptance(spend_tx, with_witness=False, accepted=False)
# Try to put the witness script in the scriptSig, should also fail.
spend_tx.vin[0].scriptSig = CScript([p2wsh_pubkey, b'a'])
spend_tx.rehash()
self.test_node.test_transaction_acceptance(spend_tx, with_witness=False, accepted=False)
# Now put the witness script in the witness, should succeed after
# segwit activates.
spend_tx.vin[0].scriptSig = scriptSig
spend_tx.rehash()
spend_tx.wit.vtxinwit.append(CTxInWitness())
spend_tx.wit.vtxinwit[0].scriptWitness.stack = [ b'a', witness_program ]
# Verify mempool acceptance
self.test_node.test_transaction_acceptance(spend_tx, with_witness=True, accepted=segwit_activated)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [spend_tx])
# If we're before activation, then sending this without witnesses
# should be valid. If we're after activation, then sending this with
# witnesses should be valid.
if segwit_activated:
self.test_node.test_witness_block(block, accepted=True)
else:
self.test_node.test_witness_block(block, accepted=True, with_witness=False)
# Update self.utxo
self.utxo.pop(0)
self.utxo.append(UTXO(spend_tx.sha256, 0, spend_tx.vout[0].nValue))
# Test the behavior of starting up a segwit-aware node after the softfork
# has activated. As segwit requires different block data than pre-segwit
# nodes would have stored, this requires special handling.
# To enable this test, pass --oldbinary=<path-to-pre-segwit-bitcoind> to
# the test.
def test_upgrade_after_activation(self, node_id):
self.log.info("Testing software upgrade after softfork activation")
assert(node_id != 0) # node0 is assumed to be a segwit-active bitcoind
# Make sure the nodes are all up
sync_blocks(self.nodes)
# Restart with the new binary
self.stop_node(node_id)
self.start_node(node_id, extra_args=[])
connect_nodes(self.nodes[0], node_id)
sync_blocks(self.nodes)
# Make sure that this peer thinks segwit has activated.
assert(get_bip9_status(self.nodes[node_id], 'segwit')['status'] == "active")
# Make sure this peers blocks match those of node0.
height = self.nodes[node_id].getblockcount()
while height >= 0:
block_hash = self.nodes[node_id].getblockhash(height)
assert_equal(block_hash, self.nodes[0].getblockhash(height))
assert_equal(self.nodes[0].getblock(block_hash), self.nodes[node_id].getblock(block_hash))
height -= 1
def test_witness_sigops(self):
'''Ensure sigop counting is correct inside witnesses.'''
self.log.info("Testing sigops limit")
assert(len(self.utxo))
# Keep this under MAX_OPS_PER_SCRIPT (201)
witness_program = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKMULTISIG]*5 + [OP_CHECKSIG]*193 + [OP_ENDIF])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
sigops_per_script = 20*5 + 193*1
# We'll produce 2 extra outputs, one with a program that would take us
# over max sig ops, and one with a program that would exactly reach max
# sig ops
outputs = (MAX_SIGOP_COST // sigops_per_script) + 2
extra_sigops_available = MAX_SIGOP_COST % sigops_per_script
# We chose the number of checkmultisigs/checksigs to make this work:
assert(extra_sigops_available < 100) # steer clear of MAX_OPS_PER_SCRIPT
# This script, when spent with the first
# N(=MAX_SIGOP_COST//sigops_per_script) outputs of our transaction,
# would push us just over the block sigop limit.
witness_program_toomany = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG]*(extra_sigops_available + 1) + [OP_ENDIF])
witness_hash_toomany = sha256(witness_program_toomany)
scriptPubKey_toomany = CScript([OP_0, witness_hash_toomany])
# If we spend this script instead, we would exactly reach our sigop
# limit (for witness sigops).
witness_program_justright = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG]*(extra_sigops_available) + [OP_ENDIF])
witness_hash_justright = sha256(witness_program_justright)
scriptPubKey_justright = CScript([OP_0, witness_hash_justright])
# First split our available utxo into a bunch of outputs
split_value = self.utxo[0].nValue // outputs
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
for i in range(outputs):
tx.vout.append(CTxOut(split_value, scriptPubKey))
tx.vout[-2].scriptPubKey = scriptPubKey_toomany
tx.vout[-1].scriptPubKey = scriptPubKey_justright
tx.rehash()
block_1 = self.build_next_block()
self.update_witness_block_with_transactions(block_1, [tx])
self.test_node.test_witness_block(block_1, accepted=True)
tx2 = CTransaction()
# If we try to spend the first n-1 outputs from tx, that should be
# too many sigops.
total_value = 0
for i in range(outputs-1):
tx2.vin.append(CTxIn(COutPoint(tx.sha256, i), b""))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [ witness_program ]
total_value += tx.vout[i].nValue
tx2.wit.vtxinwit[-1].scriptWitness.stack = [ witness_program_toomany ]
tx2.vout.append(CTxOut(total_value, CScript([OP_TRUE])))
tx2.rehash()
block_2 = self.build_next_block()
self.update_witness_block_with_transactions(block_2, [tx2])
self.test_node.test_witness_block(block_2, accepted=False)
# Try dropping the last input in tx2, and add an output that has
# too many sigops (contributing to legacy sigop count).
checksig_count = (extra_sigops_available // 4) + 1
scriptPubKey_checksigs = CScript([OP_CHECKSIG]*checksig_count)
tx2.vout.append(CTxOut(0, scriptPubKey_checksigs))
tx2.vin.pop()
tx2.wit.vtxinwit.pop()
tx2.vout[0].nValue -= tx.vout[-2].nValue
tx2.rehash()
block_3 = self.build_next_block()
self.update_witness_block_with_transactions(block_3, [tx2])
self.test_node.test_witness_block(block_3, accepted=False)
# If we drop the last checksig in this output, the tx should succeed.
block_4 = self.build_next_block()
tx2.vout[-1].scriptPubKey = CScript([OP_CHECKSIG]*(checksig_count-1))
tx2.rehash()
self.update_witness_block_with_transactions(block_4, [tx2])
self.test_node.test_witness_block(block_4, accepted=True)
# Reset the tip back down for the next test
sync_blocks(self.nodes)
for x in self.nodes:
x.invalidateblock(block_4.hash)
# Try replacing the last input of tx2 to be spending the last
# output of tx
block_5 = self.build_next_block()
tx2.vout.pop()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, outputs-1), b""))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [ witness_program_justright ]
tx2.rehash()
self.update_witness_block_with_transactions(block_5, [tx2])
self.test_node.test_witness_block(block_5, accepted=True)
# TODO: test p2sh sigop counting
def test_getblocktemplate_before_lockin(self):
self.log.info("Testing getblocktemplate setting of segwit versionbit (before lockin)")
# Node0 is segwit aware, node2 is not.
for node in [self.nodes[0], self.nodes[2]]:
gbt_results = node.getblocktemplate()
block_version = gbt_results['version']
# If we're not indicating segwit support, we will still be
# signalling for segwit activation.
assert_equal((block_version & (1 << VB_WITNESS_BIT) != 0), node == self.nodes[0])
# If we don't specify the segwit rule, then we won't get a default
# commitment.
assert('default_witness_commitment' not in gbt_results)
# Workaround:
# Can either change the tip, or change the mempool and wait 5 seconds
# to trigger a recomputation of getblocktemplate.
txid = int(self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1), 16)
# Using mocktime lets us avoid sleep()
sync_mempools(self.nodes)
self.nodes[0].setmocktime(int(time.time())+10)
self.nodes[2].setmocktime(int(time.time())+10)
for node in [self.nodes[0], self.nodes[2]]:
gbt_results = node.getblocktemplate({"rules" : ["segwit"]})
block_version = gbt_results['version']
if node == self.nodes[2]:
# If this is a non-segwit node, we should still not get a witness
# commitment, nor a version bit signalling segwit.
assert_equal(block_version & (1 << VB_WITNESS_BIT), 0)
assert('default_witness_commitment' not in gbt_results)
else:
# For segwit-aware nodes, check the version bit and the witness
# commitment are correct.
assert(block_version & (1 << VB_WITNESS_BIT) != 0)
assert('default_witness_commitment' in gbt_results)
witness_commitment = gbt_results['default_witness_commitment']
# Check that default_witness_commitment is present.
witness_root = CBlock.get_merkle_root([ser_uint256(0),
ser_uint256(txid)])
script = get_witness_script(witness_root, 0)
assert_equal(witness_commitment, bytes_to_hex_str(script))
# undo mocktime
self.nodes[0].setmocktime(0)
self.nodes[2].setmocktime(0)
# Uncompressed pubkeys are no longer supported in default relay policy,
# but (for now) are still valid in blocks.
def test_uncompressed_pubkey(self):
self.log.info("Testing uncompressed pubkeys")
# Segwit transactions using uncompressed pubkeys are not accepted
# under default policy, but should still pass consensus.
key = CECKey()
key.set_secretbytes(b"9")
key.set_compressed(False)
pubkey = CPubKey(key.get_pubkey())
assert_equal(len(pubkey), 65) # This should be an uncompressed pubkey
assert(len(self.utxo) > 0)
utxo = self.utxo.pop(0)
# Test 1: P2WPKH
# First create a P2WPKH output that uses an uncompressed pubkey
pubkeyhash = hash160(pubkey)
scriptPKH = CScript([OP_0, pubkeyhash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(utxo.sha256, utxo.n), b""))
tx.vout.append(CTxOut(utxo.nValue-1000, scriptPKH))
tx.rehash()
# Confirm it in a block.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
# Now try to spend it. Send it to a P2WSH output, which we'll
# use in the next test.
witness_program = CScript([pubkey, CScriptOp(OP_CHECKSIG)])
witness_hash = sha256(witness_program)
scriptWSH = CScript([OP_0, witness_hash])
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue-1000, scriptWSH))
script = GetP2PKHScript(pubkeyhash)
sig_hash = SegwitVersion1SignatureHash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue)
signature = key.sign(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [ signature, pubkey ]
tx2.rehash()
# Should fail policy test.
self.test_node.test_transaction_acceptance(tx2, True, False, b'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
# But passes consensus.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
self.test_node.test_witness_block(block, accepted=True)
# Test 2: P2WSH
# Try to spend the P2WSH output created in last test.
# Send it to a P2SH(P2WSH) output, which we'll use in the next test.
p2sh_witness_hash = hash160(scriptWSH)
scriptP2SH = CScript([OP_HASH160, p2sh_witness_hash, OP_EQUAL])
scriptSig = CScript([scriptWSH])
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.vout.append(CTxOut(tx2.vout[0].nValue-1000, scriptP2SH))
tx3.wit.vtxinwit.append(CTxInWitness())
sign_P2PK_witness_input(witness_program, tx3, 0, SIGHASH_ALL, tx2.vout[0].nValue, key)
# Should fail policy test.
self.test_node.test_transaction_acceptance(tx3, True, False, b'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
# But passes consensus.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx3])
self.test_node.test_witness_block(block, accepted=True)
# Test 3: P2SH(P2WSH)
# Try to spend the P2SH output created in the last test.
# Send it to a P2PKH output, which we'll use in the next test.
scriptPubKey = GetP2PKHScript(pubkeyhash)
tx4 = CTransaction()
tx4.vin.append(CTxIn(COutPoint(tx3.sha256, 0), scriptSig))
tx4.vout.append(CTxOut(tx3.vout[0].nValue-1000, scriptPubKey))
tx4.wit.vtxinwit.append(CTxInWitness())
sign_P2PK_witness_input(witness_program, tx4, 0, SIGHASH_ALL, tx3.vout[0].nValue, key)
# Should fail policy test.
self.test_node.test_transaction_acceptance(tx4, True, False, b'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx4])
self.test_node.test_witness_block(block, accepted=True)
# Test 4: Uncompressed pubkeys should still be valid in non-segwit
# transactions.
tx5 = CTransaction()
tx5.vin.append(CTxIn(COutPoint(tx4.sha256, 0), b""))
tx5.vout.append(CTxOut(tx4.vout[0].nValue-1000, CScript([OP_TRUE])))
(sig_hash, err) = SignatureHash(scriptPubKey, tx5, 0, SIGHASH_ALL)
signature = key.sign(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
tx5.vin[0].scriptSig = CScript([signature, pubkey])
tx5.rehash()
# Should pass policy and consensus.
self.test_node.test_transaction_acceptance(tx5, True, True)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx5])
self.test_node.test_witness_block(block, accepted=True)
self.utxo.append(UTXO(tx5.sha256, 0, tx5.vout[0].nValue))
def test_non_standard_witness(self):
self.log.info("Testing detection of non-standard P2WSH witness")
pad = chr(1).encode('latin-1')
# Create scripts for tests
scripts = []
scripts.append(CScript([OP_DROP] * 100))
scripts.append(CScript([OP_DROP] * 99))
scripts.append(CScript([pad * 59] * 59 + [OP_DROP] * 60))
scripts.append(CScript([pad * 59] * 59 + [OP_DROP] * 61))
p2wsh_scripts = []
assert(len(self.utxo))
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
# For each script, generate a pair of P2WSH and P2SH-P2WSH output.
outputvalue = (self.utxo[0].nValue - 1000) // (len(scripts) * 2)
for i in scripts:
p2wsh = CScript([OP_0, sha256(i)])
p2sh = hash160(p2wsh)
p2wsh_scripts.append(p2wsh)
tx.vout.append(CTxOut(outputvalue, p2wsh))
tx.vout.append(CTxOut(outputvalue, CScript([OP_HASH160, p2sh, OP_EQUAL])))
tx.rehash()
txid = tx.sha256
self.test_node.test_transaction_acceptance(tx, with_witness=False, accepted=True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# Creating transactions for tests
p2wsh_txs = []
p2sh_txs = []
for i in range(len(scripts)):
p2wsh_tx = CTransaction()
p2wsh_tx.vin.append(CTxIn(COutPoint(txid,i*2)))
p2wsh_tx.vout.append(CTxOut(outputvalue - 5000, CScript([OP_0, hash160(hex_str_to_bytes(""))])))
p2wsh_tx.wit.vtxinwit.append(CTxInWitness())
p2wsh_tx.rehash()
p2wsh_txs.append(p2wsh_tx)
p2sh_tx = CTransaction()
p2sh_tx.vin.append(CTxIn(COutPoint(txid,i*2+1), CScript([p2wsh_scripts[i]])))
p2sh_tx.vout.append(CTxOut(outputvalue - 5000, CScript([OP_0, hash160(hex_str_to_bytes(""))])))
p2sh_tx.wit.vtxinwit.append(CTxInWitness())
p2sh_tx.rehash()
p2sh_txs.append(p2sh_tx)
# Testing native P2WSH
# Witness stack size, excluding witnessScript, over 100 is non-standard
p2wsh_txs[0].wit.vtxinwit[0].scriptWitness.stack = [pad] * 101 + [scripts[0]]
self.std_node.test_transaction_acceptance(p2wsh_txs[0], True, False, b'bad-witness-nonstandard')
# Non-standard nodes should accept
self.test_node.test_transaction_acceptance(p2wsh_txs[0], True, True)
# Stack element size over 80 bytes is non-standard
p2wsh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 81] * 100 + [scripts[1]]
self.std_node.test_transaction_acceptance(p2wsh_txs[1], True, False, b'bad-witness-nonstandard')
# Non-standard nodes should accept
self.test_node.test_transaction_acceptance(p2wsh_txs[1], True, True)
# Standard nodes should accept if element size is not over 80 bytes
p2wsh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 80] * 100 + [scripts[1]]
self.std_node.test_transaction_acceptance(p2wsh_txs[1], True, True)
# witnessScript size at 3600 bytes is standard
p2wsh_txs[2].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, scripts[2]]
self.test_node.test_transaction_acceptance(p2wsh_txs[2], True, True)
self.std_node.test_transaction_acceptance(p2wsh_txs[2], True, True)
# witnessScript size at 3601 bytes is non-standard
p2wsh_txs[3].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, pad, scripts[3]]
self.std_node.test_transaction_acceptance(p2wsh_txs[3], True, False, b'bad-witness-nonstandard')
# Non-standard nodes should accept
self.test_node.test_transaction_acceptance(p2wsh_txs[3], True, True)
# Repeating the same tests with P2SH-P2WSH
p2sh_txs[0].wit.vtxinwit[0].scriptWitness.stack = [pad] * 101 + [scripts[0]]
self.std_node.test_transaction_acceptance(p2sh_txs[0], True, False, b'bad-witness-nonstandard')
self.test_node.test_transaction_acceptance(p2sh_txs[0], True, True)
p2sh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 81] * 100 + [scripts[1]]
self.std_node.test_transaction_acceptance(p2sh_txs[1], True, False, b'bad-witness-nonstandard')
self.test_node.test_transaction_acceptance(p2sh_txs[1], True, True)
p2sh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 80] * 100 + [scripts[1]]
self.std_node.test_transaction_acceptance(p2sh_txs[1], True, True)
p2sh_txs[2].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, scripts[2]]
self.test_node.test_transaction_acceptance(p2sh_txs[2], True, True)
self.std_node.test_transaction_acceptance(p2sh_txs[2], True, True)
p2sh_txs[3].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, pad, scripts[3]]
self.std_node.test_transaction_acceptance(p2sh_txs[3], True, False, b'bad-witness-nonstandard')
self.test_node.test_transaction_acceptance(p2sh_txs[3], True, True)
self.nodes[0].generate(1) # Mine and clean up the mempool of non-standard node
# Valid but non-standard transactions in a block should be accepted by standard node
sync_blocks(self.nodes)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
self.utxo.pop(0)
def run_test(self):
# Setup the p2p connections and start up the network thread.
self.test_node = TestNode() # sets NODE_WITNESS|NODE_NETWORK
self.old_node = TestNode() # only NODE_NETWORK
self.std_node = TestNode() # for testing node1 (fRequireStandard=true)
self.p2p_connections = [self.test_node, self.old_node]
self.connections = []
self.connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.test_node, services=NODE_NETWORK|NODE_WITNESS))
self.connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.old_node, services=NODE_NETWORK))
self.connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], self.std_node, services=NODE_NETWORK|NODE_WITNESS))
self.test_node.add_connection(self.connections[0])
self.old_node.add_connection(self.connections[1])
self.std_node.add_connection(self.connections[2])
NetworkThread().start() # Start up network handling in another thread
# Keep a place to store utxo's that can be used in later tests
self.utxo = []
# Test logic begins here
self.test_node.wait_for_verack()
self.log.info("Starting tests before segwit lock in:")
self.test_witness_services() # Verifies NODE_WITNESS
self.test_non_witness_transaction() # non-witness tx's are accepted
self.test_unnecessary_witness_before_segwit_activation()
self.test_block_relay(segwit_activated=False)
# Advance to segwit being 'started'
self.advance_to_segwit_started()
sync_blocks(self.nodes)
self.test_getblocktemplate_before_lockin()
sync_blocks(self.nodes)
# At lockin, nothing should change.
self.log.info("Testing behavior post lockin, pre-activation")
self.advance_to_segwit_lockin()
# Retest unnecessary witnesses
self.test_unnecessary_witness_before_segwit_activation()
self.test_witness_tx_relay_before_segwit_activation()
self.test_block_relay(segwit_activated=False)
self.test_p2sh_witness(segwit_activated=False)
self.test_standardness_v0(segwit_activated=False)
sync_blocks(self.nodes)
# Now activate segwit
self.log.info("Testing behavior after segwit activation")
self.advance_to_segwit_active()
sync_blocks(self.nodes)
# Test P2SH witness handling again
self.test_p2sh_witness(segwit_activated=True)
self.test_witness_commitments()
self.test_block_malleability()
self.test_witness_block_size()
self.test_submit_block()
self.test_extra_witness_data()
self.test_max_witness_push_length()
self.test_max_witness_program_length()
self.test_witness_input_length()
self.test_block_relay(segwit_activated=True)
self.test_tx_relay_after_segwit_activation()
self.test_standardness_v0(segwit_activated=True)
self.test_segwit_versions()
self.test_premature_coinbase_witness_spend()
self.test_uncompressed_pubkey()
self.test_signature_version_1()
self.test_non_standard_witness()
sync_blocks(self.nodes)
self.test_upgrade_after_activation(node_id=2)
self.test_witness_sigops()
if __name__ == '__main__':
SegWitTest().main()
|
{
"content_hash": "07354f47f0b61d56c644bfe3c31c2d93",
"timestamp": "",
"source": "github",
"line_count": 1945,
"max_line_length": 150,
"avg_line_length": 46.21182519280206,
"alnum_prop": 0.6343650564072896,
"repo_name": "21E14/bitcoin",
"id": "943bc2c6d2111eb7bd52b2a643a83ebbcdf6ebbf",
"size": "90091",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "test/functional/p2p-segwit.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28456"
},
{
"name": "C",
"bytes": "693313"
},
{
"name": "C++",
"bytes": "5014904"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "HTML",
"bytes": "50622"
},
{
"name": "Java",
"bytes": "30290"
},
{
"name": "M4",
"bytes": "186414"
},
{
"name": "Makefile",
"bytes": "109332"
},
{
"name": "Objective-C",
"bytes": "3892"
},
{
"name": "Objective-C++",
"bytes": "7240"
},
{
"name": "Protocol Buffer",
"bytes": "2328"
},
{
"name": "Python",
"bytes": "1119486"
},
{
"name": "QMake",
"bytes": "756"
},
{
"name": "Shell",
"bytes": "48894"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import sys
import os
if sys.version_info > (2, 7, 0):
import unittest
else:
import unittest2 as unittest
from mock import Mock
from tempfile import NamedTemporaryFile
sys.path.append(os.path.join(os.path.dirname(__file__), '../bin'))
import qds
import qds_sdk
from qds_sdk.connection import Connection
from test_base import print_command
from test_base import QdsCliTestCase
class TestCommandCheck(QdsCliTestCase):
def test_hivecmd(self):
sys.argv = ['qds.py', 'hivecmd', 'check', '123']
print_command()
Connection._api_call = Mock(return_value={})
qds.main()
Connection._api_call.assert_called_with("GET", "commands/123", params=None)
def test_sparkcmd(self):
sys.argv = ['qds.py', 'sparkcmd', 'check', '123']
print_command()
Connection._api_call = Mock(return_value={})
qds.main()
Connection._api_call.assert_called_with("GET", "commands/123", params=None)
def test_hadoopcmd(self):
sys.argv = ['qds.py', 'hadoopcmd', 'check', '123']
print_command()
Connection._api_call = Mock(return_value={})
qds.main()
Connection._api_call.assert_called_with("GET", "commands/123", params=None)
def test_prestocmd(self):
sys.argv = ['qds.py', 'prestocmd', 'check', '123']
print_command()
Connection._api_call = Mock(return_value={})
qds.main()
Connection._api_call.assert_called_with("GET", "commands/123", params=None)
def test_pigcmd(self):
sys.argv = ['qds.py', 'pigcmd', 'check', '123']
print_command()
Connection._api_call = Mock(return_value={})
qds.main()
Connection._api_call.assert_called_with("GET", "commands/123", params=None)
def test_shellcmd(self):
sys.argv = ['qds.py', 'shellcmd', 'check', '123']
print_command()
Connection._api_call = Mock(return_value={})
qds.main()
Connection._api_call.assert_called_with("GET", "commands/123", params=None)
def test_dbexportcmd(self):
sys.argv = ['qds.py', 'dbexportcmd', 'check', '123']
print_command()
Connection._api_call = Mock(return_value={})
qds.main()
Connection._api_call.assert_called_with("GET", "commands/123", params=None)
def test_dbimportcmd(self):
sys.argv = ['qds.py', 'dbimportcmd', 'check', '123']
print_command()
Connection._api_call = Mock(return_value={})
qds.main()
Connection._api_call.assert_called_with("GET", "commands/123", params=None)
def test_dbtapquerycmd(self):
sys.argv = ['qds.py', 'dbtapquerycmd', 'check', '123']
print_command()
Connection._api_call = Mock(return_value={})
qds.main()
Connection._api_call.assert_called_with("GET", "commands/123", params=None)
class TestCommandCancel(QdsCliTestCase):
def test_hivecmd(self):
sys.argv = ['qds.py', 'hivecmd', 'cancel', '123']
print_command()
Connection._api_call = Mock(return_value={'kill_succeeded': True})
qds.main()
Connection._api_call.assert_called_with("PUT", "commands/123",
{'status': 'kill'})
def test_sparkcmd(self):
sys.argv = ['qds.py', 'sparkcmd', 'cancel', '123']
print_command()
Connection._api_call = Mock(return_value={'kill_succeeded': True})
qds.main()
Connection._api_call.assert_called_with("PUT", "commands/123",
{'status': 'kill'})
def test_hadoopcmd(self):
sys.argv = ['qds.py', 'hadoopcmd', 'cancel', '123']
print_command()
Connection._api_call = Mock(return_value={'kill_succeeded': True})
qds.main()
Connection._api_call.assert_called_with("PUT", "commands/123",
{'status': 'kill'})
def test_prestocmd(self):
sys.argv = ['qds.py', 'prestocmd', 'cancel', '123']
print_command()
Connection._api_call = Mock(return_value={'kill_succeeded': True})
qds.main()
Connection._api_call.assert_called_with("PUT", "commands/123",
{'status': 'kill'})
def test_pigcmd(self):
sys.argv = ['qds.py', 'pigcmd', 'cancel', '123']
print_command()
Connection._api_call = Mock(return_value={'kill_succeeded': True})
qds.main()
Connection._api_call.assert_called_with("PUT", "commands/123",
{'status': 'kill'})
def test_shellcmd(self):
sys.argv = ['qds.py', 'shellcmd', 'cancel', '123']
print_command()
Connection._api_call = Mock(return_value={'kill_succeeded': True})
qds.main()
Connection._api_call.assert_called_with("PUT", "commands/123",
{'status': 'kill'})
def test_dbexportcmd(self):
sys.argv = ['qds.py', 'dbexportcmd', 'cancel', '123']
print_command()
Connection._api_call = Mock(return_value={'kill_succeeded': True})
qds.main()
Connection._api_call.assert_called_with("PUT", "commands/123",
{'status': 'kill'})
def test_dbimportcmd(self):
sys.argv = ['qds.py', 'dbimportcmd', 'cancel', '123']
print_command()
Connection._api_call = Mock(return_value={'kill_succeeded': True})
qds.main()
Connection._api_call.assert_called_with("PUT", "commands/123",
{'status': 'kill'})
def test_dbtapquerycmd(self):
sys.argv = ['qds.py', 'dbtapquerycmd', 'cancel', '123']
print_command()
Connection._api_call = Mock(return_value={'kill_succeeded': True})
qds.main()
Connection._api_call.assert_called_with("PUT", "commands/123",
{'status': 'kill'})
class TestCommandGetJobs(QdsCliTestCase):
def test_running(self):
sys.argv = ['qds.py', 'hivecmd', 'getjobs', '123']
print_command()
Connection._api_call = Mock(return_value={'id':123, 'status': 'running'})
Connection._api_call_raw = Mock()
qds.main()
Connection._api_call.assert_called_with('GET', 'commands/123', params=None),
assert not Connection._api_call_raw.called
def test_done(self):
sys.argv = ['qds.py', 'hivecmd', 'getjobs', '123']
print_command()
Connection._api_call = Mock(return_value={'id':123, 'status': "done"})
jobs_response = Mock(text='[{"url":"https://blah","job_stats":{},"job_id":"job_blah"}]')
Connection._api_call_raw = Mock(return_value=jobs_response)
qds.main()
Connection._api_call.assert_called_with('GET', 'commands/123', params=None),
Connection._api_call_raw.assert_called_with('GET', 'commands/123/jobs', params=None),
class TestHiveCommand(QdsCliTestCase):
def test_submit_query(self):
sys.argv = ['qds.py', 'hivecmd', 'submit', '--query', 'show tables']
print_command()
Connection._api_call = Mock(return_value={'id': 1234})
qds.main()
Connection._api_call.assert_called_with('POST', 'commands',
{'macros': None,
'label': None,
'tags': None,
'sample_size': None,
'name': None,
'query': 'show tables',
'command_type': 'HiveCommand',
'can_notify': False,
'script_location': None})
def test_submit_script_location(self):
sys.argv = ['qds.py', 'hivecmd', 'submit', '--script_location', 's3://bucket/path-to-script']
print_command()
Connection._api_call = Mock(return_value={'id': 1234})
qds.main()
Connection._api_call.assert_called_with('POST', 'commands',
{'macros': None,
'label': None,
'tags': None,
'sample_size': None,
'name': None,
'query': None,
'command_type': 'HiveCommand',
'can_notify': False,
'script_location': 's3://bucket/path-to-script'})
def test_submit_none(self):
sys.argv = ['qds.py', 'hivecmd', 'submit']
print_command()
with self.assertRaises(qds_sdk.exception.ParseError):
qds.main()
def test_submit_both(self):
sys.argv = ['qds.py', 'hivecmd', 'submit', '--query', 'show tables',
'--script_location', 's3://bucket/path-to-script']
print_command()
with self.assertRaises(qds_sdk.exception.ParseError):
qds.main()
def test_submit_macros(self):
sys.argv = ['qds.py', 'hivecmd', 'submit', '--script_location', 's3://bucket/path-to-script',
'--macros', '[{"key1":"11","key2":"22"}, {"key3":"key1+key2"}]']
print_command()
Connection._api_call = Mock(return_value={'id': 1234})
qds.main()
Connection._api_call.assert_called_with('POST', 'commands',
{'macros': [{"key1":"11","key2":"22"}, {"key3":"key1+key2"}],
'label': None,
'tags': None,
'sample_size': None,
'name': None,
'query': None,
'command_type': 'HiveCommand',
'can_notify': False,
'script_location': 's3://bucket/path-to-script'})
def test_submit_tags(self):
sys.argv = ['qds.py', 'hivecmd', 'submit', '--script_location', 's3://bucket/path-to-script',
'--tags', 'abc,def']
print_command()
Connection._api_call = Mock(return_value={'id': 1234})
qds.main()
Connection._api_call.assert_called_with('POST', 'commands',
{'macros': None,
'label': None,
'tags': ["abc", "def"],
'sample_size': None,
'name': None,
'query': None,
'command_type': 'HiveCommand',
'can_notify': False,
'script_location': 's3://bucket/path-to-script'})
def test_submit_cluster_label(self):
sys.argv = ['qds.py', 'hivecmd', 'submit', '--query', 'show tables',
'--cluster-label', 'test_label']
print_command()
Connection._api_call = Mock(return_value={'id': 1234})
qds.main()
Connection._api_call.assert_called_with('POST', 'commands',
{'macros': None,
'label': 'test_label',
'tags': None,
'sample_size': None,
'name': None,
'query': 'show tables',
'command_type': 'HiveCommand',
'can_notify': False,
'script_location': None})
def test_submit_name(self):
sys.argv = ['qds.py', 'hivecmd', 'submit', '--query', 'show tables',
'--name', 'test_name']
print_command()
Connection._api_call = Mock(return_value={'id': 1234})
qds.main()
Connection._api_call.assert_called_with('POST', 'commands',
{'macros': None,
'label': None,
'tags': None,
'sample_size': None,
'name': 'test_name',
'query': 'show tables',
'command_type': 'HiveCommand',
'can_notify': False,
'script_location': None})
def test_submit_notify(self):
sys.argv = ['qds.py', 'hivecmd', 'submit', '--query', 'show tables',
'--notify']
print_command()
Connection._api_call = Mock(return_value={'id': 1234})
qds.main()
Connection._api_call.assert_called_with('POST', 'commands',
{'macros': None,
'label': None,
'tags': None,
'sample_size': None,
'name': None,
'query': 'show tables',
'command_type': 'HiveCommand',
'can_notify': True,
'script_location': None})
def test_submit_sample_size(self):
sys.argv = ['qds.py', 'hivecmd', 'submit', '--query', 'show tables',
'--sample_size', '1024']
print_command()
Connection._api_call = Mock(return_value={'id': 1234})
qds.main()
Connection._api_call.assert_called_with('POST', 'commands',
{'macros': None,
'label': None,
'tags': None,
'sample_size': '1024',
'name': None,
'query': 'show tables',
'command_type': 'HiveCommand',
'can_notify': False,
'script_location': None})
class TestSparkCommand(QdsCliTestCase):
def test_submit_query(self):
sys.argv = ['qds.py', 'sparkcmd', 'submit', '--cmdline', '/usr/lib/spark/bin/spark-submit --class Test Test.jar']
print_command()
Connection._api_call = Mock(return_value={'id': 1234})
qds.main()
Connection._api_call.assert_called_with('POST', 'commands',
{'macros': None,
'label': None,
'language': None,
'tags': None,
'name': None,
'sql': None,
'program': None,
'cmdline':'/usr/lib/spark/bin/spark-submit --class Test Test.jar',
'command_type': 'SparkCommand',
'arguments': None,
'user_program_arguments': None,
'can_notify': False,
'script_location': None})
def test_submit_script_location_aws(self):
sys.argv = ['qds.py', 'sparkcmd', 'submit', '--script_location', 's3://bucket/path-to-script']
print_command()
with self.assertRaises(qds_sdk.exception.ParseError):
qds.main()
def test_submit_script_location_local_py(self):
with NamedTemporaryFile(suffix=".py") as tmp:
tmp.write('print "Hello World!"'.encode("utf8"))
tmp.seek(0)
sys.argv = ['qds.py', 'sparkcmd' , 'submit', '--script_location' , tmp.name]
print_command()
Connection._api_call = Mock(return_value={'id': 1234})
qds.main()
Connection._api_call.assert_called_with('POST', 'commands',
{'macros': None,
'label': None,
'language': "python",
'tags': None,
'name': None,
'sql': None,
'program':'print "Hello World!"',
'cmdline':None,
'command_type': 'SparkCommand',
'arguments': None,
'user_program_arguments': None,
'can_notify': False,
'script_location': None})
def test_submit_script_location_local_scala(self):
with NamedTemporaryFile(suffix=".scala") as tmp:
tmp.write('println("hello, world!")'.encode("utf8"))
tmp.seek(0)
sys.argv = ['qds.py', 'sparkcmd' , 'submit', '--script_location' , tmp.name]
print_command()
Connection._api_call = Mock(return_value={'id': 1234})
qds.main()
Connection._api_call.assert_called_with('POST', 'commands',
{'macros': None,
'label': None,
'language': "scala",
'tags': None,
'name': None,
'sql': None,
'program': "println(\"hello, world!\")",
'cmdline':None,
'command_type': 'SparkCommand',
'arguments': None,
'user_program_arguments': None,
'can_notify': False,
'script_location': None})
def test_submit_script_location_local_java(self):
with NamedTemporaryFile(suffix=".java") as tmp:
tmp.write('println("hello, world!")'.encode("utf8"))
tmp.seek(0)
sys.argv = ['qds.py', 'sparkcmd' , 'submit', '--script_location' , tmp.name]
print_command()
with self.assertRaises(qds_sdk.exception.ParseError):
qds.main()
def test_submit_sql(self):
sys.argv = ['qds.py', 'sparkcmd', 'submit', '--sql', 'show dummy']
print_command()
Connection._api_call = Mock(return_value={'id': 1234})
qds.main()
Connection._api_call.assert_called_with('POST', 'commands',
{'macros': None,
'label': None,
'language': None,
'tags': None,
'name': None,
'sql': 'show dummy',
'program': None,
'cmdline':None,
'command_type': 'SparkCommand',
'arguments': None,
'user_program_arguments': None,
'can_notify': False,
'script_location': None})
def test_submit_sql_with_language(self):
sys.argv = ['qds.py', 'sparkcmd', 'submit', '--language','python', '--sql', 'show dummy']
print_command()
with self.assertRaises(qds_sdk.exception.ParseError):
qds.main()
def test_submit_none(self):
sys.argv = ['qds.py', 'sparkcmd', 'submit']
print_command()
with self.assertRaises(qds_sdk.exception.ParseError):
qds.main()
def test_submit_both(self):
sys.argv = ['qds.py', 'sparkcmd', 'submit', '--cmdline', '/usr/lib/spark/bin/spark-submit --class Test Test.jar',
'--script_location', 'home/path-to-script']
print_command()
with self.assertRaises(qds_sdk.exception.ParseError):
qds.main()
def test_submit_all_three(self):
sys.argv = ['qds.py', 'sparkcmd', 'submit', '--cmdline', '/usr/lib/spark/bin/spark-submit --class Test Test.jar',
'--script_location', '/home/path-to-script', 'program', 'println("hello, world!")']
print_command()
with self.assertRaises(qds_sdk.exception.ParseError):
qds.main()
def test_language(self):
sys.argv = ['qds.py', 'sparkcmd', 'submit', '--program', 'println("hello, world!")',
'--language', 'java']
print_command()
with self.assertRaises(qds_sdk.exception.ParseError):
qds.main()
def test_program_no_language(self):
sys.argv = ['qds.py', 'sparkcmd', 'submit', '--program', 'println("hello, world!")']
print_command()
with self.assertRaises(qds_sdk.exception.ParseError):
qds.main()
def test_submit_macros(self):
sys.argv = ['qds.py', 'sparkcmd', 'submit', '--program',"println(\"hello, world!\")" ,'--language', 'scala',
'--macros', '[{"key1":"11","key2":"22"}, {"key3":"key1+key2"}]']
print_command()
Connection._api_call = Mock(return_value={'id': 1234})
qds.main()
Connection._api_call.assert_called_with('POST', 'commands',
{'macros': [{"key1":"11","key2":"22"}, {"key3":"key1+key2"}],
'label': None,
'language': "scala",
'tags': None,
'name': None,
'sql': None,
'arguments': None,
'user_program_arguments': None,
'program': "println(\"hello, world!\")",
'command_type': 'SparkCommand',
'cmdline': None,
'can_notify': False,
'script_location': None})
def test_submit_tags(self):
sys.argv = ['qds.py', 'sparkcmd', 'submit', '--language','scala','--program',"println(\"hello, world!\")",
'--tags', 'abc,def']
print_command()
Connection._api_call = Mock(return_value={'id': 1234})
qds.main()
Connection._api_call.assert_called_with('POST', 'commands',
{'macros': None,
'label': None,
'language': 'scala',
'tags': ["abc", "def"],
'name': None,
'sql': None,
'program':"println(\"hello, world!\")" ,
'command_type': 'SparkCommand',
'arguments': None,
'user_program_arguments': None,
'cmdline': None,
'can_notify': False,
'script_location': None})
def test_submit_cluster_label(self):
sys.argv = ['qds.py', 'sparkcmd', 'submit', '--cmdline', '/usr/lib/spark/bin/spark-submit --class Test Test.jar',
'--cluster-label', 'test_label']
print_command()
Connection._api_call = Mock(return_value={'id': 1234})
qds.main()
Connection._api_call.assert_called_with('POST', 'commands',
{'macros': None,
'label': 'test_label',
'language' : None,
'cmdline': '/usr/lib/spark/bin/spark-submit --class Test Test.jar',
'tags': None,
'name': None,
'sql': None,
'program' : None,
'arguments': None,
'user_program_arguments': None,
'command_type': 'SparkCommand',
'can_notify': False,
'script_location': None})
def test_submit_name(self):
sys.argv = ['qds.py', 'sparkcmd', 'submit', '--cmdline', '/usr/lib/spark/bin/spark-submit --class Test Test.jar',
'--name', 'test_name']
print_command()
Connection._api_call = Mock(return_value={'id': 1234})
qds.main()
Connection._api_call.assert_called_with('POST', 'commands',
{'macros': None,
'label': None,
'language' : None,
'cmdline' : '/usr/lib/spark/bin/spark-submit --class Test Test.jar',
'tags': None,
'name': 'test_name',
'sql': None,
'arguments': None,
'user_program_arguments': None,
'program': None,
'command_type': 'SparkCommand',
'can_notify': False,
'script_location': None})
def test_submit_notify(self):
sys.argv = ['qds.py', 'sparkcmd', 'submit', '--cmdline', '/usr/lib/spark/bin/spark-submit --class Test Test.jar',
'--notify']
print_command()
Connection._api_call = Mock(return_value={'id': 1234})
qds.main()
Connection._api_call.assert_called_with('POST', 'commands',
{'macros': None,
'label': None,
'language' : None,
'tags': None,
'name': None,
'sql': None,
'program': None,
'cmdline': '/usr/lib/spark/bin/spark-submit --class Test Test.jar',
'command_type': 'SparkCommand',
'arguments': None,
'user_program_arguments': None,
'can_notify': True,
'script_location': None})
def test_submit_python_program(self):
sys.argv = ['qds.py', 'sparkcmd', 'submit', '--language','python','--program', 'print "hello, world!"']
print_command()
Connection._api_call = Mock(return_value={'id': 1234})
qds.main()
Connection._api_call.assert_called_with('POST', 'commands',
{'macros': None,
'label': None,
'language' : 'python',
'tags': None,
'name': None,
'sql': None,
'program': "print \"hello, world!\"",
'cmdline': None,
'command_type': 'SparkCommand',
'arguments': None,
'user_program_arguments': None,
'can_notify': False,
'script_location': None})
def test_submit_user_program_arguments(self):
sys.argv = ['qds.py', 'sparkcmd', 'submit', '--language','scala','--program',
"object HelloWorld {\n\n def main(args: Array[String]) {\n \n println(\"Hello, \" + args(0))\n \n }\n}\n",
'--arguments', '--class HelloWorld',
'--user_program_arguments', 'world']
print_command()
Connection._api_call = Mock(return_value={'id': 1234})
qds.main()
Connection._api_call.assert_called_with('POST', 'commands',
{'macros': None,
'label': None,
'language' : 'scala',
'tags': None,
'name': None,
'sql': None,
'program': "object HelloWorld {\n\n def main(args: Array[String]) {\n \n println(\"Hello, \" + args(0))\n \n }\n}\n" ,
'cmdline': None,
'command_type': 'SparkCommand',
'arguments': '--class HelloWorld',
'user_program_arguments': 'world',
'can_notify': False,
'script_location': None})
def test_submit_scala_program(self):
sys.argv = ['qds.py', 'sparkcmd', 'submit', '--language','scala','--program', 'println("hello, world!")']
print_command()
Connection._api_call = Mock(return_value={'id': 1234})
qds.main()
Connection._api_call.assert_called_with('POST', 'commands',
{'macros': None,
'label': None,
'language' : 'scala',
'tags': None,
'name': None,
'sql': None,
'program': "println(\"hello, world!\")",
'cmdline': None,
'command_type': 'SparkCommand',
'arguments': None,
'user_program_arguments': None,
'can_notify': False,
'script_location': None})
class TestPrestoCommand(QdsCliTestCase):
def test_submit_query(self):
sys.argv = ['qds.py', 'prestocmd', 'submit', '--query', 'show tables']
print_command()
Connection._api_call = Mock(return_value={'id': 1234})
qds.main()
Connection._api_call.assert_called_with('POST', 'commands',
{'macros': None,
'tags': None,
'label': None,
'name': None,
'query': 'show tables',
'command_type': 'PrestoCommand',
'can_notify': False,
'script_location': None})
def test_submit_script_location(self):
sys.argv = ['qds.py', 'prestocmd', 'submit', '--script_location', 's3://bucket/path-to-script']
print_command()
Connection._api_call = Mock(return_value={'id': 1234})
qds.main()
Connection._api_call.assert_called_with('POST', 'commands',
{'macros': None,
'label': None,
'tags': None,
'name': None,
'query': None,
'command_type': 'PrestoCommand',
'can_notify': False,
'script_location': 's3://bucket/path-to-script'})
def test_submit_none(self):
sys.argv = ['qds.py', 'prestocmd', 'submit']
print_command()
with self.assertRaises(qds_sdk.exception.ParseError):
qds.main()
def test_submit_both(self):
sys.argv = ['qds.py', 'prestocmd', 'submit', '--query', 'show tables',
'--script_location', 's3://bucket/path-to-script']
print_command()
with self.assertRaises(qds_sdk.exception.ParseError):
qds.main()
def test_submit_macros(self):
sys.argv = ['qds.py', 'prestocmd', 'submit', '--script_location', 's3://bucket/path-to-script',
'--macros', '[{"key1":"11","key2":"22"}, {"key3":"key1+key2"}]']
print_command()
Connection._api_call = Mock(return_value={'id': 1234})
qds.main()
Connection._api_call.assert_called_with('POST', 'commands',
{'macros': [{"key1":"11","key2":"22"}, {"key3":"key1+key2"}],
'tags': None,
'label': None,
'name': None,
'query': None,
'command_type': 'PrestoCommand',
'can_notify': False,
'script_location': 's3://bucket/path-to-script'})
def test_submit_tags(self):
sys.argv = ['qds.py', 'prestocmd', 'submit', '--script_location', 's3://bucket/path-to-script',
'--tags', 't1,t2']
print_command()
Connection._api_call = Mock(return_value={'id': 1234})
qds.main()
Connection._api_call.assert_called_with('POST', 'commands',
{'macros': None,
'tags': ["t1", "t2"],
'label': None,
'name': None,
'query': None,
'command_type': 'PrestoCommand',
'can_notify': False,
'script_location': 's3://bucket/path-to-script'})
def test_submit_cluster_label(self):
sys.argv = ['qds.py', 'prestocmd', 'submit', '--query', 'show tables',
'--cluster-label', 'test_label']
print_command()
Connection._api_call = Mock(return_value={'id': 1234})
qds.main()
Connection._api_call.assert_called_with('POST', 'commands',
{'macros': None,
'label': 'test_label',
'tags': None,
'name': None,
'query': 'show tables',
'command_type': 'PrestoCommand',
'can_notify': False,
'script_location': None})
def test_submit_name(self):
sys.argv = ['qds.py', 'prestocmd', 'submit', '--query', 'show tables',
'--name', 'test_name']
print_command()
Connection._api_call = Mock(return_value={'id': 1234})
qds.main()
Connection._api_call.assert_called_with('POST', 'commands',
{'macros': None,
'tags': None,
'label': None,
'name': 'test_name',
'query': 'show tables',
'command_type': 'PrestoCommand',
'can_notify': False,
'script_location': None})
def test_submit_notify(self):
sys.argv = ['qds.py', 'prestocmd', 'submit', '--query', 'show tables',
'--notify']
print_command()
Connection._api_call = Mock(return_value={'id': 1234})
qds.main()
Connection._api_call.assert_called_with('POST', 'commands',
{'macros': None,
'tags': None,
'label': None,
'name': None,
'query': 'show tables',
'command_type': 'PrestoCommand',
'can_notify': True,
'script_location': None})
class TestHadoopCommand(QdsCliTestCase):
def test_submit_jar(self):
sys.argv = ['qds.py', 'hadoopcmd', 'submit', 'jar', 's3://bucket/path-to-jar']
print_command()
Connection._api_call = Mock(return_value={'id': 1234})
qds.main()
Connection._api_call.assert_called_with('POST', 'commands',
{'sub_command': 'jar',
'sub_command_args': "'s3://bucket/path-to-jar'",
'name': None,
'label': None,
'tags': None,
'command_type': 'HadoopCommand',
'can_notify': False})
def test_submit_jar_invalid(self):
sys.argv = ['qds.py', 'hadoopcmd', 'submit', 'jar']
print_command()
with self.assertRaises(qds_sdk.exception.ParseError):
qds.main()
def test_submit_s3distcp(self):
sys.argv = ['qds.py', 'hadoopcmd', 'submit', 's3distcp', '--src', 'source', '--dest', 'destincation']
print_command()
Connection._api_call = Mock(return_value={'id': 1234})
qds.main()
Connection._api_call.assert_called_with('POST', 'commands',
{'sub_command': 's3distcp',
'sub_command_args': "'--src' 'source' '--dest' 'destincation'",
'name': None,
'label': None,
'tags': None,
'command_type': 'HadoopCommand',
'can_notify': False})
def test_submit_s3distcp_invalid(self):
sys.argv = ['qds.py', 'hadoopcmd', 'submit', 's3distcp']
print_command()
with self.assertRaises(qds_sdk.exception.ParseError):
qds.main()
def test_submit_streaming(self):
sys.argv = ['qds.py', 'hadoopcmd', 'submit', 'streaming',
'-files', 's3n://location-of-mapper.py,s3n://location-of-reducer.py',
'-input', 'myInputDirs',
'-output', 'myOutputDir',
'-mapper', 'mapper.py',
'-reducer', 'reducer.py']
print_command()
Connection._api_call = Mock(return_value={'id': 1234})
qds.main()
Connection._api_call.assert_called_with('POST', 'commands',
{'sub_command': 'streaming',
'sub_command_args': "'-files' 's3n://location-of-mapper.py,s3n://location-of-reducer.py' '-input' 'myInputDirs' '-output' 'myOutputDir' '-mapper' 'mapper.py' '-reducer' 'reducer.py'",
'name': None,
'label': None,
'tags': None,
'command_type': 'HadoopCommand',
'can_notify': False})
def test_submit_streaming_invalid(self):
sys.argv = ['qds.py', 'hadoopcmd', 'submit', 'streaming']
print_command()
with self.assertRaises(qds_sdk.exception.ParseError):
qds.main()
def test_submit_jar_cluster_label(self):
sys.argv = ['qds.py', 'hadoopcmd', 'submit', '--cluster-label', 'test_label', 'jar', 's3://bucket/path-to-jar']
print_command()
Connection._api_call = Mock(return_value={'id': 1234})
qds.main()
Connection._api_call.assert_called_with('POST', 'commands',
{'sub_command': 'jar',
'sub_command_args': "'s3://bucket/path-to-jar'",
'name': None,
'label': 'test_label',
'tags': None,
'command_type': 'HadoopCommand',
'can_notify': False})
def test_submit_jar_name(self):
sys.argv = ['qds.py', 'hadoopcmd', 'submit', '--name', 'test_name', 'jar', 's3://bucket/path-to-jar']
print_command()
Connection._api_call = Mock(return_value={'id': 1234})
qds.main()
Connection._api_call.assert_called_with('POST', 'commands',
{'sub_command': 'jar',
'sub_command_args': "'s3://bucket/path-to-jar'",
'name': 'test_name',
'label': None,
'tags': None,
'command_type': 'HadoopCommand',
'can_notify': False})
def test_submit_jar_notify(self):
sys.argv = ['qds.py', 'hadoopcmd', 'submit', '--notify', 'jar', 's3://bucket/path-to-jar']
print_command()
Connection._api_call = Mock(return_value={'id': 1234})
qds.main()
Connection._api_call.assert_called_with('POST', 'commands',
{'sub_command': 'jar',
'sub_command_args': "'s3://bucket/path-to-jar'",
'name': None,
'label': None,
'tags': None,
'command_type': 'HadoopCommand',
'can_notify': True})
def test_submit_tags(self):
sys.argv = ['qds.py', 'hadoopcmd', 'submit', '--name', 'test_name', '--tags', 'abc,def', 'jar', 's3://bucket/path-to-jar']
print_command()
Connection._api_call = Mock(return_value={'id': 1234})
qds.main()
Connection._api_call.assert_called_with('POST', 'commands',
{'sub_command': 'jar',
'sub_command_args': "'s3://bucket/path-to-jar'",
'name': 'test_name',
'tags': ['abc', 'def'],
'label': None,
'command_type': 'HadoopCommand',
'can_notify': False})
class TestShellCommand(QdsCliTestCase):
def test_stub(self):
pass
class TestPigCommand(QdsCliTestCase):
def test_stub(self):
pass
class TestDbExportCommand(QdsCliTestCase):
def test_submit_command(self):
sys.argv = ['qds.py', 'dbexportcmd', 'submit', '--mode', '1', '--dbtap_id', '1',
'--db_table', 'mydbtable', '--hive_table', 'myhivetable']
print_command()
Connection._api_call = Mock(return_value={'id': 1234})
qds.main()
Connection._api_call.assert_called_with('POST', 'commands',
{'export_dir': None,
'name': None,
'db_update_keys': None,
'partition_spec': None,
'fields_terminated_by': None,
'hive_table': 'myhivetable',
'db_table': 'mydbtable',
'mode': '1',
'tags': None,
'command_type': 'DbExportCommand',
'dbtap_id': '1',
'can_notify': False,
'db_update_mode': None})
def test_submit_fail_with_no_parameters(self):
sys.argv = ['qds.py', 'dbexportcmd', 'submit']
print_command()
with self.assertRaises(qds_sdk.exception.ParseError):
qds.main()
def test_submit_with_notify(self):
sys.argv = ['qds.py', 'dbexportcmd', 'submit', '--mode', '1', '--dbtap_id', '1',
'--db_table', 'mydbtable', '--hive_table', 'myhivetable', '--notify']
print_command()
Connection._api_call = Mock(return_value={'id': 1234})
qds.main()
Connection._api_call.assert_called_with('POST', 'commands',
{'export_dir': None,
'name': None,
'db_update_keys': None,
'partition_spec': None,
'fields_terminated_by': None,
'hive_table': 'myhivetable',
'db_table': 'mydbtable',
'mode': '1',
'tags': None,
'command_type': 'DbExportCommand',
'dbtap_id': '1',
'can_notify': True,
'db_update_mode': None})
def test_submit_with_name(self):
sys.argv = ['qds.py', 'dbexportcmd', 'submit', '--mode', '1', '--dbtap_id', '1',
'--db_table', 'mydbtable', '--hive_table', 'myhivetable', '--name', 'commandname']
print_command()
Connection._api_call = Mock(return_value={'id': 1234})
qds.main()
Connection._api_call.assert_called_with('POST', 'commands',
{'export_dir': None,
'name': 'commandname',
'db_update_keys': None,
'partition_spec': None,
'fields_terminated_by': None,
'hive_table': 'myhivetable',
'db_table': 'mydbtable',
'mode': '1',
'tags': None,
'command_type': 'DbExportCommand',
'dbtap_id': '1',
'can_notify': False,
'db_update_mode': None})
def test_submit_with_update_mode_and_keys(self):
sys.argv = ['qds.py', 'dbexportcmd', 'submit', '--mode', '1', '--dbtap_id', '1',
'--db_table', 'mydbtable', '--hive_table', 'myhivetable',
'--db_update_mode', 'updateonly', '--db_update_keys', 'key1']
print_command()
Connection._api_call = Mock(return_value={'id': 1234})
qds.main()
Connection._api_call.assert_called_with('POST', 'commands',
{'export_dir': None,
'name': None,
'db_update_keys': 'key1',
'partition_spec': None,
'fields_terminated_by': None,
'hive_table': 'myhivetable',
'db_table': 'mydbtable',
'mode': '1',
'tags': None,
'command_type': 'DbExportCommand',
'dbtap_id': '1',
'can_notify': False,
'db_update_mode': 'updateonly'})
def test_submit_with_mode_2(self):
sys.argv = ['qds.py', 'dbexportcmd', 'submit', '--mode', '2', '--dbtap_id', '1',
'--db_table', 'mydbtable', '--hive_table', 'myhivetable',
'--export_dir', 's3:///export-path/']
print_command()
Connection._api_call = Mock(return_value={'id': 1234})
qds.main()
Connection._api_call.assert_called_with('POST', 'commands',
{'export_dir': 's3:///export-path/',
'name': None,
'db_update_keys': None,
'partition_spec': None,
'fields_terminated_by': None,
'hive_table': 'myhivetable',
'db_table': 'mydbtable',
'mode': '2',
'tags': None,
'command_type': 'DbExportCommand',
'dbtap_id': '1',
'can_notify': False,
'db_update_mode': None})
class TestDbImportCommand(QdsCliTestCase):
# Not much point adding more test cases as the semantic check in main code is still remaining.
# The test cases might give false positivies
def test_submit_command(self):
sys.argv = ['qds.py', 'dbimportcmd', 'submit', '--mode', '1', '--dbtap_id', '1',
'--db_table', 'mydbtable', '--hive_table', 'myhivetable']
print_command()
Connection._api_call = Mock(return_value={'id': 1234})
qds.main()
Connection._api_call.assert_called_with('POST', 'commands',
{'db_parallelism': None,
'name': None,
'dbtap_id': '1',
'db_where': None,
'db_boundary_query': None,
'mode': '1',
'tags': None,
'command_type': 'DbImportCommand',
'db_split_column': None,
'can_notify': False,
'hive_table': 'myhivetable',
'db_table': 'mydbtable',
'db_extract_query': None})
class TestDbTapQueryCommand(QdsCliTestCase):
def test_submit(self):
sys.argv = ['qds.py', 'dbtapquerycmd', 'submit', '--query', 'show tables', '--db_tap_id', 1]
print_command()
Connection._api_call = Mock(return_value={'id': 1234})
qds.main()
Connection._api_call.assert_called_with('POST', 'commands',
{'db_tap_id': 1,
'query': 'show tables',
'name': None,
'tags': None,
'macros': None,
'command_type': 'DbTapQueryCommand',
'can_notify': False})
def test_submit_fail_with_no_parameters(self):
sys.argv = ['qds.py', 'dbtapquerycmd', 'submit']
print_command()
with self.assertRaises(qds_sdk.exception.ParseError):
qds.main()
def test_submit_fail_with_only_query_passed(self):
sys.argv = ['qds.py', 'dbtapquerycmd', 'submit', '--query', 'show tables']
print_command()
with self.assertRaises(qds_sdk.exception.ParseError):
qds.main()
def test_submit_fail_with_only_db_tap_id_passed(self):
sys.argv = ['qds.py', 'dbtapquerycmd', 'submit', '--db_tap_id', 1]
print_command()
with self.assertRaises(qds_sdk.exception.ParseError):
qds.main()
def test_submit_with_notify(self):
sys.argv = ['qds.py', 'dbtapquerycmd', 'submit', '--query', 'show tables', '--db_tap_id', 1, '--notify']
print_command()
Connection._api_call = Mock(return_value={'id': 1})
qds.main()
Connection._api_call.assert_called_with('POST', 'commands',
{'db_tap_id': 1,
'query': 'show tables',
'tags': None,
'name': None,
'macros': None,
'command_type': 'DbTapQueryCommand',
'can_notify': True})
def test_submit_with_name(self):
sys.argv = ['qds.py', 'dbtapquerycmd', 'submit', '--query', 'show tables', '--db_tap_id', 1, '--name', 'test_name']
print_command()
Connection._api_call = Mock(return_value={'id': 1})
qds.main()
Connection._api_call.assert_called_with('POST', 'commands',
{'db_tap_id': 1,
'query': 'show tables',
'tags': None,
'name': 'test_name',
'macros': None,
'command_type': 'DbTapQueryCommand',
'can_notify': False})
def test_submit_with_macros(self):
sys.argv = ['qds.py', 'dbtapquerycmd', 'submit', '--query', "select * from table_1 limit \$limit\$",
'--db_tap_id', 1, '--macros', '[{"a": "1", "b" : "4", "limit":"a + b"}]']
print_command()
Connection._api_call = Mock(return_value={'id': 1234})
qds.main()
Connection._api_call.assert_called_with('POST', 'commands',
{'macros': [{"a": "1", "b" : "4", "limit":"a + b"}],
'db_tap_id': 1,
'query': "select * from table_1 limit \$limit\$",
'tags': None,
'name': None,
'command_type': 'DbTapQueryCommand',
'can_notify': False})
def test_submit_with_tags(self):
sys.argv = ['qds.py', 'dbtapquerycmd', 'submit', '--query', "select * from table_1 limit \$limit\$",
'--db_tap_id', 1, '--tags', 'tag1,tag2']
print_command()
Connection._api_call = Mock(return_value={'id': 1234})
qds.main()
Connection._api_call.assert_called_with('POST', 'commands',
{'macros': None,
'db_tap_id': 1,
'query': "select * from table_1 limit \$limit\$",
'tags': ["tag1", "tag2"],
'name': None,
'command_type': 'DbTapQueryCommand',
'can_notify': False})
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "3c3a0d40e5b53ce20f7e3cb81a07547c",
"timestamp": "",
"source": "github",
"line_count": 1139,
"max_line_length": 200,
"avg_line_length": 42.12115891132572,
"alnum_prop": 0.48165749541437386,
"repo_name": "rohitpruthi95/qds-sdk-py",
"id": "424001d8fb763a40ab06252ddebdbe829f1e63f7",
"size": "47976",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_command.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "321987"
}
],
"symlink_target": ""
}
|
from functools import wraps
from typing import Any, Callable
from ngraph.impl import Node
from ngraph.utils.types import NodeInput, as_node, as_nodes
def _set_node_name(node: Node, **kwargs: Any) -> Node:
if "name" in kwargs:
node.name = kwargs["name"]
return node
def nameable_op(node_factory_function: Callable) -> Callable:
"""Set the name to the ngraph operator returned by the wrapped function."""
@wraps(node_factory_function)
def wrapper(*args: Any, **kwargs: Any) -> Node:
node = node_factory_function(*args, **kwargs)
node = _set_node_name(node, **kwargs)
return node
return wrapper
def unary_op(node_factory_function: Callable) -> Callable:
"""Convert the first input value to a Constant Node if a numeric value is detected."""
@wraps(node_factory_function)
def wrapper(input_value: NodeInput, *args: Any, **kwargs: Any) -> Node:
input_node = as_node(input_value)
node = node_factory_function(input_node, *args, **kwargs)
node = _set_node_name(node, **kwargs)
return node
return wrapper
def binary_op(node_factory_function: Callable) -> Callable:
"""Convert the first two input values to Constant Nodes if numeric values are detected."""
@wraps(node_factory_function)
def wrapper(left: NodeInput, right: NodeInput, *args: Any, **kwargs: Any) -> Node:
left, right = as_nodes(left, right)
node = node_factory_function(left, right, *args, **kwargs)
node = _set_node_name(node, **kwargs)
return node
return wrapper
|
{
"content_hash": "1d750108caa03d55645af8aa0d3437bf",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 94,
"avg_line_length": 32.40816326530612,
"alnum_prop": 0.6612090680100756,
"repo_name": "NervanaSystems/ngraph",
"id": "f1665a9c041d7b0d3c771730d7ce4923d49edb2c",
"size": "2336",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/src/ngraph/utils/decorators.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "3147"
},
{
"name": "C++",
"bytes": "19522833"
},
{
"name": "CMake",
"bytes": "223605"
},
{
"name": "Dockerfile",
"bytes": "2036"
},
{
"name": "Groovy",
"bytes": "13002"
},
{
"name": "MLIR",
"bytes": "55258"
},
{
"name": "Makefile",
"bytes": "13532"
},
{
"name": "Python",
"bytes": "331191"
},
{
"name": "Shell",
"bytes": "43252"
}
],
"symlink_target": ""
}
|
response.logo = A(B('web',SPAN(2),'py'),XML('™ '),
_class="navbar-brand",_href="http://www.web2py.com/",
_id="web2py-logo")
response.title = request.application.replace('_',' ').title()
response.subtitle = ''
## read more at http://dev.w3.org/html5/markup/meta.name.html
response.meta.author = 'Your Name <you@example.com>'
response.meta.description = 'a cool new app'
response.meta.keywords = 'web2py, python, framework'
response.meta.generator = 'Web2py Web Framework'
## your http://google.com/analytics id
response.google_analytics_id = None
#########################################################################
## this is the main application menu add/remove items as required
#########################################################################
response.menu = [
(T('Home'), False, URL('default', 'index'), [])
]
DEVELOPMENT_MENU = True
#########################################################################
## provide shortcuts for development. remove in production
#########################################################################
def _():
# shortcuts
app = request.application
ctr = request.controller
# useful links to internal and external resources
response.menu += [
(T('My Sites'), False, URL('admin', 'default', 'site')),
(T('This App'), False, '#', [
(T('Design'), False, URL('admin', 'default', 'design/%s' % app)),
LI(_class="divider"),
(T('Controller'), False,
URL(
'admin', 'default', 'edit/%s/controllers/%s.py' % (app, ctr))),
(T('View'), False,
URL(
'admin', 'default', 'edit/%s/views/%s' % (app, response.view))),
(T('DB Model'), False,
URL(
'admin', 'default', 'edit/%s/models/db.py' % app)),
(T('Menu Model'), False,
URL(
'admin', 'default', 'edit/%s/models/menu.py' % app)),
(T('Config.ini'), False,
URL(
'admin', 'default', 'edit/%s/private/appconfig.ini' % app)),
(T('Layout'), False,
URL(
'admin', 'default', 'edit/%s/views/layout.html' % app)),
(T('Stylesheet'), False,
URL(
'admin', 'default', 'edit/%s/static/css/web2py-bootstrap3.css' % app)),
(T('Database'), False, URL(app, 'appadmin', 'index')),
(T('Errors'), False, URL(
'admin', 'default', 'errors/' + app)),
(T('About'), False, URL(
'admin', 'default', 'about/' + app)),
]),
('web2py.com', False, '#', [
(T('Download'), False,
'http://www.web2py.com/examples/default/download'),
(T('Support'), False,
'http://www.web2py.com/examples/default/support'),
(T('Demo'), False, 'http://web2py.com/demo_admin'),
(T('Quick Examples'), False,
'http://web2py.com/examples/default/examples'),
(T('FAQ'), False, 'http://web2py.com/AlterEgo'),
(T('Videos'), False,
'http://www.web2py.com/examples/default/videos/'),
(T('Free Applications'),
False, 'http://web2py.com/appliances'),
(T('Plugins'), False, 'http://web2py.com/plugins'),
(T('Recipes'), False, 'http://web2pyslices.com/'),
]),
(T('Documentation'), False, '#', [
(T('Online book'), False, 'http://www.web2py.com/book'),
LI(_class="divider"),
(T('Preface'), False,
'http://www.web2py.com/book/default/chapter/00'),
(T('Introduction'), False,
'http://www.web2py.com/book/default/chapter/01'),
(T('Python'), False,
'http://www.web2py.com/book/default/chapter/02'),
(T('Overview'), False,
'http://www.web2py.com/book/default/chapter/03'),
(T('The Core'), False,
'http://www.web2py.com/book/default/chapter/04'),
(T('The Views'), False,
'http://www.web2py.com/book/default/chapter/05'),
(T('Database'), False,
'http://www.web2py.com/book/default/chapter/06'),
(T('Forms and Validators'), False,
'http://www.web2py.com/book/default/chapter/07'),
(T('Email and SMS'), False,
'http://www.web2py.com/book/default/chapter/08'),
(T('Access Control'), False,
'http://www.web2py.com/book/default/chapter/09'),
(T('Services'), False,
'http://www.web2py.com/book/default/chapter/10'),
(T('Ajax Recipes'), False,
'http://www.web2py.com/book/default/chapter/11'),
(T('Components and Plugins'), False,
'http://www.web2py.com/book/default/chapter/12'),
(T('Deployment Recipes'), False,
'http://www.web2py.com/book/default/chapter/13'),
(T('Other Recipes'), False,
'http://www.web2py.com/book/default/chapter/14'),
(T("Buy web2py's book"), False,
'http://stores.lulu.com/web2py'),
]),
(T('Community'), False, None, [
(T('Groups'), False,
'http://www.web2py.com/examples/default/usergroups'),
(T('Twitter'), False, 'http://twitter.com/web2py'),
(T('Live Chat'), False,
'http://webchat.freenode.net/?channels=web2py'),
]),
]
if DEVELOPMENT_MENU: _()
if "auth" in locals(): auth.wikimenu()
|
{
"content_hash": "aa527be81797df00cfb47a1578ea6276",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 86,
"avg_line_length": 44.46511627906977,
"alnum_prop": 0.4769874476987448,
"repo_name": "shashisp/blumix-webpy",
"id": "bffc021c9297df1ff9b19e90b3c7c5557d0d644a",
"size": "6043",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/applications/welcome/models/menu.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "65078"
},
{
"name": "HTML",
"bytes": "350557"
},
{
"name": "JavaScript",
"bytes": "242695"
},
{
"name": "Makefile",
"bytes": "13284"
},
{
"name": "Perl",
"bytes": "1688"
},
{
"name": "Python",
"bytes": "5966723"
},
{
"name": "Shell",
"bytes": "111239"
}
],
"symlink_target": ""
}
|
"""
Django settings for ohmycommand project.
Generated by 'django-admin startproject' using Django 1.9.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'k%f#%)o6uj6#$mf!&d42h=a)doc^wd9=c8476xp=frsb6k^d^z'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
THIRD_PARTY = [
'django_extensions',
'rest_framework',
'rest_framework.authtoken'
]
PROJ_APPS = [
'apps.commands.apps.CommandsConfig',
]
INSTALLED_APPS += THIRD_PARTY + PROJ_APPS
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'apps', 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'apps', 'static'),
]
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.TokenAuthentication',
)
}
|
{
"content_hash": "18c806c88e39a3bfe4fd1f5454ebd27b",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 91,
"avg_line_length": 26.04861111111111,
"alnum_prop": 0.6883497733937617,
"repo_name": "gopar/OhMyCommand",
"id": "b15a3a3fa5fb001041ba601802c035be657cf684",
"size": "3751",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "config/settings/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1061"
},
{
"name": "HTML",
"bytes": "16943"
},
{
"name": "JavaScript",
"bytes": "12446"
},
{
"name": "Python",
"bytes": "19698"
}
],
"symlink_target": ""
}
|
import csv
from datetime import datetime as dt, timedelta as td
import logging
import sys
import re
from copy import deepcopy
import sys
from dateutil.parser import parse
from volttron.platform.messaging import topics
from volttron.platform.agent import utils
from volttron.platform.agent.utils import jsonapi, setup_logging
from volttron.platform.vip.agent import Agent, Core
from volttron.platform.jsonrpc import RemoteError
from volttron.platform.agent.driven import ConversionMapper
from volttron.platform.messaging import (headers as headers_mod, topics)
__version__ = '3.0.0'
__author1__ = 'Craig Allwardt <craig.allwardt@pnnl.gov>'
__author2__ = 'Robert Lutes <robert.lutes@pnnl.gov>'
__author3__ = 'Poorva Sharma <poorva.sharma@pnnl.gov>'
__copyright__ = 'Copyright (c) 2015, Battelle Memorial Institute'
__license__ = 'FreeBSD'
DATE_FORMAT = '%m-%d-%y %H:%M'
utils.setup_logging()
_log = logging.getLogger(__name__)
logging.basicConfig(level=logging.debug,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%m-%d-%y %H:%M')
def driven_agent(config_path, **kwargs):
"""Reads agent configuration and converts it to run driven agent.
:param kwargs: Any driver specific parameters"""
config = utils.load_config(config_path)
arguments = config.get('arguments')
mode = True if config.get('mode', 'PASSIVE') == 'ACTIVE' else False
multiple_devices = isinstance(config['device']['unit'], dict)
campus_building_config = config['device']
analysis_name = campus_building_config.get('analysis_name', 'analysis_name')
analysis_dict = {'analysis_name': analysis_name}
arguments.update(analysis_dict)
agent_id = config.get('agentid', None)
actuator_id = agent_id if agent_id is not None else analysis_name
campus_building = dict((key, campus_building_config[key]) for key in ['campus', 'building'])
analysis = deepcopy(campus_building)
analysis.update(analysis_dict)
device_config = config['device']['unit']
command_devices = device_config.keys()
device_topic_dict = {}
device_topic_list = []
subdevices_list = []
from_file = config.get('from_file')
for device_name in device_config:
device_topic = topics.DEVICES_VALUE(campus=campus_building.get('campus'),
building=campus_building.get('building'),
unit=device_name,
path='',
point='all')
device_topic_dict.update({device_topic: device_name})
device_topic_list.append(device_name)
if multiple_devices:
for subdevice in device_config[device_name]['subdevices']:
subdevices_list.append(subdevice)
subdevice_topic = topics.DEVICES_VALUE(campus=campus_building.get('campus'),
building=campus_building.get('building'),
unit=device_name,
path=subdevice,
point='all')
subdevice_name = device_name + "/" + subdevice
device_topic_dict.update({subdevice_topic: subdevice_name})
device_topic_list.append(subdevice_name)
base_actuator_path = topics.RPC_DEVICE_PATH(campus=campus_building.get('campus', ''),
building=campus_building.get('building', ''),
unit=None,
path='',
point=None)
conversion_map = config.get('conversion_map')
map_names = {}
for key, value in conversion_map.items():
map_names[key.lower() if isinstance(key, str) else key] = value
application = config.get('application')
validation_error = ''
if not application:
validation_error = 'Invalid application specified in config\n'
if validation_error:
_log.error(validation_error)
raise ValueError(validation_error)
config.update(config.get('arguments'))
converter = ConversionMapper()
output_file_prefix = config.get('output_file')
#unittype_map = config.get('unittype_map', None)
#assert unittype_map
klass = _get_class(application)
# This instances is used to call the applications run method when
# data comes in on the message bus. It is constructed here
# so that_process_results each time run is called the application
# can keep it state.
app_instance = klass(**arguments)
class DrivenAgent(Agent):
"""Agent listens to message bus device and runs when data is published.
"""
def __init__(self, **kwargs):
"""
Initializes agent
:param kwargs: Any driver specific parameters"""
super(DrivenAgent, self).__init__(**kwargs)
# master is where we copy from to get a poppable list of
# subdevices that should be present before we run the analysis.
self._master_devices = device_topic_list
self._needed_devices = []
self._device_values = {}
self._initialize_devices()
self.received_input_datetime = None
self._kwargs = kwargs
self._header_written = False
self.file_creation_set = set()
def _initialize_devices(self):
self._needed_devices = deepcopy(self._master_devices)
self._device_values = {}
@Core.receiver('onstart')
def starup(self, sender, **kwargs):
"""
Starts up the agent and subscribes to device topics
based on agent configuration.
:param sender:
:param kwargs: Any driver specific parameters
:type sender: str"""
self._initialize_devices()
for device_topic in device_topic_dict:
_log.debug('Subscribing to ' + device_topic)
self.vip.pubsub.subscribe(peer='pubsub',
prefix=device_topic,
callback=self.on_analysis_message)
def _should_run_now(self):
"""
Checks if messages from all the devices are received
before running application
:returns: True or False based on received messages.
:rtype: boolean"""
# Assumes the unit/all values will have values.
if not len(self._device_values.keys()) > 0:
return False
return not len(self._needed_devices) > 0
def on_analysis_message(self, peer, sender, bus, topic, headers, message):
"""
Subscribe to device data and assemble data set to pass
to applications.
:param peer:
:param sender: device name
:param bus:
:param topic: device path topic
:param headers: message headers
:param message: message containing points and values dict
from device with point type
:type peer: str
:type sender: str
:type bus: str
:type topic: str
:type headers: dict
:type message: dict"""
device_data = message[0]
if isinstance(device_data, list):
device_data = device_data[0]
def aggregate_subdevice(device_data):
tagged_device_data = {}
device_tag = device_topic_dict[topic]
if device_tag not in self._needed_devices:
return False
for key, value in device_data.items():
device_data_tag = '&'.join([key, device_tag])
tagged_device_data[device_data_tag] = value
self._device_values.update(tagged_device_data)
self._needed_devices.remove(device_tag)
return True
device_needed = aggregate_subdevice(device_data)
if not device_needed:
_log.error("Warning device values already present, "
"reinitializing")
if self._should_run_now():
field_names = {}
for k, v in self._device_values.items():
field_names[k.lower() if isinstance(k, str) else k] = v
if not converter.initialized and conversion_map is not None:
converter.setup_conversion_map(map_names, field_names)
if from_file:
_timestamp = parse(headers.get('Date'))
self.received_input_datetime = _timestamp
else:
_timestamp = dt.now()
self.received_input_datetime = dt.utcnow()
device_data = converter.process_row(field_names)
results = app_instance.run(_timestamp, device_data)
# results = app_instance.run(
# dateutil.parser.parse(self._subdevice_values['Timestamp'],
# fuzzy=True), self._subdevice_values)
self._process_results(results)
self._initialize_devices()
else:
_log.info("Still need {} before running.".format(self._needed_devices))
def _process_results(self, results):
"""
Runs driven application with converted data. Calls appropriate
methods to process commands, log and table_data in results.
:param results: Results object containing commands for devices,
log messages and table data.
:type results: Results object \\volttron.platform.agent.driven
:returns: Same as results param.
:rtype: Results object \\volttron.platform.agent.driven"""
def make_actuator_request(command_dict, results):
for device_tag, new_value in command_dict.items():
_log.debug("COMMAND TABLE: {}->{}".format(device_tag, new_value))
if mode:
_log.debug("ACTUATE ON DEVICE.")
results, actuator_error = self.actuator_request(results)
if not actuator_error:
self.actuator_set(results)
return results
_log.debug('Processing Results!')
for device, point_value_dict in results.devices.items():
make_actuator_request(point_value_dict, results)
make_actuator_request(results.commands, results)
for value in results.log_messages:
_log.debug("LOG: {}".format(value))
for key, value in results.table_output.items():
_log.debug("TABLE: {}->{}".format(key, value))
if output_file_prefix is not None:
results = self.create_file_output(results)
if len(results.table_output.keys()):
results = self.publish_analysis_results(results)
return results
def publish_analysis_results(self, results):
"""
Publish table_data in analysis results to the message bus for
capture by the data historian.
:param results: Results object containing commands for devices,
log messages and table data.
:type results: Results object \\volttron.platform.agent.driven
:returns: Same as results param.
:rtype: Results object \\volttron.platform.agent.driven"""
headers = {
headers_mod.CONTENT_TYPE: headers_mod.CONTENT_TYPE.JSON,
headers_mod.DATE: str(self.received_input_datetime),
}
for app, analysis_table in results.table_output.items():
try:
name_timestamp = app.split('&')
_name = name_timestamp[0]
timestamp = name_timestamp[1]
except:
_name = app
timestamp = str(self.received_input_datetime)
headers = {
headers_mod.CONTENT_TYPE: headers_mod.CONTENT_TYPE.JSON,
headers_mod.DATE: timestamp,
}
for entry in analysis_table:
for key, value in entry.items():
for _device in command_devices:
analysis['unit'] = _device
analysis_topic = topics.ANALYSIS_VALUE(point=key, **analysis)
datatype = 'float'
if isinstance(value, int):
datatype = 'int'
kbase = key[key.rfind('/') + 1:]
message = [{kbase: value},
{kbase: {'tz': 'US/Pacific',
'type': datatype,
'units': 'float',
}
}]
self.vip.pubsub.publish(
'pubsub', analysis_topic, headers, message)
return results
def create_file_output(self, results):
"""
Create results/data files for testing and algorithm validation
if table data is present in the results.
:param results: Results object containing commands for devices,
log messages and table data.
:type results: Results object \\volttron.platform.agent.driven
:returns: Same as results param.
:rtype: Results object \\volttron.platform.agent.driven"""
for key, value in results.table_output.items():
name_timestamp = key.split('&')
_name = name_timestamp[0]
timestamp = name_timestamp[1]
file_name = output_file_prefix + "-" + _name + ".csv"
if file_name not in self.file_creation_set:
self._header_written = False
self.file_creation_set.update([file_name])
for row in value:
with open(file_name, 'a+') as file_to_write:
row.update({'Timestamp': timestamp})
_keys = row.keys()
file_output = csv.DictWriter(file_to_write, _keys)
if not self._header_written:
file_output.writeheader()
self._header_written = True
file_output.writerow(row)
file_to_write.close()
return results
def actuator_request(self, results):
"""
Calls the actuator's request_new_schedule method to get
device schedule
:param results: Results object containing commands for devices,
log messages and table data.
:type results: Results object \\volttron.platform.agent.driven
:returns: Return result from request_new_schedule method
and True or False for error in scheduling device.
:rtype: dict and boolean
:Return Values:
The return values has the following format:
result = {'info': u'', 'data': {}, 'result': 'SUCCESS'}
request_error = True/False
warning:: Calling without previously scheduling a device and not within
the time allotted will raise a LockError"""
_now = dt.now()
str_now = _now.strftime(DATE_FORMAT)
_end = _now + td(minutes=1)
str_end = _end.strftime(DATE_FORMAT)
for _device in command_devices:
actuation_device = base_actuator_path(unit=_device, point='')
schedule_request = [[actuation_device, str_now, str_end]]
try:
result = self.vip.rpc.call('platform.actuator',
'request_new_schedule',
actuator_id, _device, 'HIGH',
schedule_request).get(timeout=4)
except RemoteError as ex:
_log.warning("Failed to schedule device {} (RemoteError): {}".format(_device, str(ex)))
request_error = True
if result['result'] == 'FAILURE':
if result['info'] =='TASK_ID_ALREADY_EXISTS':
_log.info('Task to schedule device already exists ' + _device)
request_error = False
else:
_log.warn('Failed to schedule device (unavailable) ' + _device)
request_error = True
else:
request_error = False
return results, request_error
def actuator_set(self, results):
"""
Calls the actuator's set_point method to set point on device
:param results: Results object containing commands for devices,
log messages and table data.
:type results: Results object \\volttron.platform.agent.driven"""
def make_actuator_set(device, point_value_dict):
for point, new_value in point_value_dict.items():
point_path = base_actuator_path(unit=device, point=point)
try:
result = self.vip.rpc.call('platform.actuator', 'set_point',
actuator_id, point_path,
new_value).get(timeout=4)
_log.debug("Set point {} to {}".format(point_path, new_value))
except RemoteError as ex:
_log.warning("Failed to set {} to {}: {}".format(point_path, new_value, str(ex)))
continue
for device, point_value_dict in results.devices.items():
make_actuator_set(device, point_value_dict)
for device in command_devices:
make_actuator_set(device, results.commands)
DrivenAgent.__name__ = 'DrivenLoggerAgent'
return DrivenAgent(**kwargs)
def _get_class(kls):
"""Get driven application information."""
parts = kls.split('.')
module = ".".join(parts[:-1])
main_mod = __import__(module)
for comp in parts[1:]:
main_mod = getattr(main_mod, comp)
return main_mod
def main(argv=sys.argv):
''' Main method.'''
utils.vip_main(driven_agent)
if __name__ == '__main__':
# Entry point for script
try:
sys.exit(main())
except KeyboardInterrupt:
pass
|
{
"content_hash": "f61e10913e69db46edd04a2d758d26b4",
"timestamp": "",
"source": "github",
"line_count": 430,
"max_line_length": 107,
"avg_line_length": 45.3046511627907,
"alnum_prop": 0.5243570658590422,
"repo_name": "hlngo/volttron-applications",
"id": "ba46d8619ae0da5adee6e34685bfaac7785d8de8",
"size": "22382",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "pnnl/FakeDrivenMatlabAgent/drivenmatlab/drivenagent.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "221216"
},
{
"name": "CSS",
"bytes": "11595"
},
{
"name": "Gnuplot",
"bytes": "2486"
},
{
"name": "HTML",
"bytes": "11677"
},
{
"name": "JavaScript",
"bytes": "168327"
},
{
"name": "Makefile",
"bytes": "2413"
},
{
"name": "Objective-C",
"bytes": "1042"
},
{
"name": "Python",
"bytes": "983238"
},
{
"name": "Shell",
"bytes": "270"
}
],
"symlink_target": ""
}
|
from django.utils.translation import ugettext_lazy as _
import horizon
class FreezerDR(horizon.PanelGroup):
slug = "freezerdr"
name = _("Backup and Restore")
panels = ('jobs', 'actions', 'sessions', 'clients', 'backups')
class Freezer(horizon.Dashboard):
name = _("Disaster Recovery")
slug = "disaster_recovery"
panels = (FreezerDR,)
default_panel = 'jobs'
horizon.register(Freezer)
|
{
"content_hash": "933396d6836389709448fe0dcb328c04",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 66,
"avg_line_length": 22,
"alnum_prop": 0.6770334928229665,
"repo_name": "openstack/freezer-web-ui",
"id": "9ce559f48ff92e89694b727123da9085f459e76e",
"size": "1032",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "disaster_recovery/dashboard.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "195"
},
{
"name": "HTML",
"bytes": "9076"
},
{
"name": "JavaScript",
"bytes": "18110"
},
{
"name": "Python",
"bytes": "141934"
},
{
"name": "Shell",
"bytes": "21749"
}
],
"symlink_target": ""
}
|
__author__ = 'Bruno Clermont'
__maintainer__ = 'Bruno Clermont'
__email__ = 'bruno@robotinfra.com'
import logging
import os
import re
from UserList import UserList
from UserDict import IterableUserDict, UserDict
import yaml
logger = logging.getLogger(__name__)
MINE_DATA_FUNC_NAME = 'monitoring.data'
MINE_DATA_KEY = 'checks'
NSCA_D = '/etc/nagios/nsca.d'
def _yaml(filename):
with open(filename, 'r') as stream:
try:
return yaml.load(stream)
except Exception, err:
logger.critical("YAML data from failed to parse for '%s'",
filename, exc_info=True)
stream.seek(0)
logger.debug("failed YAML content of '%s' is '%s'", filename,
stream.read())
raise err
def list_check_formulas():
'''
List all formula that have a monitoring check on this minions.
'''
output = []
logger.debug("Check for yaml file in %s", NSCA_D)
for filename in __salt__['file.find'](NSCA_D, type='f'):
basename = os.path.basename(filename)
output.append(os.path.splitext(basename)[0])
return output
def load_check(formula, remove_sensitive_data=True):
'''
Load monitoring data for a single formula.
'''
basename = '%s.yml' % formula
filename = os.path.join(NSCA_D, basename)
try:
check = _yaml(filename)
# append formula name to check data, use for custom macro in
# shinken event handler
salt_env = __salt__['pillar.get']('branch', 'master')
for data in check.itervalues():
data['formula'] = formula
data['salt_env'] = salt_env
logger.debug("monitoring checks: %s", check)
except Exception, err:
logger.error("Can't load '%s': %s", filename, err)
return {}
if remove_sensitive_data:
# Remove the key that hold NRPE command that is executed as
# check.
# That must not be copied in salt mine as it's not used by
# shinken and it might contains sensible information.
# same with subkey 'context' that hold context passed to NRPE
# check.
for subkey in ('command', 'arguments'):
for key in check:
try:
del check[key][subkey]
except KeyError:
pass
return check
def list_checks(remove_sensitive_data=True):
'''
Return all monitor check data for all formula for this minion.
'''
checks = {}
for formula in list_check_formulas():
checks.update(load_check(formula, remove_sensitive_data))
return checks
def list_nrpe_checks():
output = {}
all_checks = list_checks(False)
for check in all_checks:
if 'command' in all_checks[check]:
output.update({check: all_checks[check]})
return output
class _DontExistData(object):
pass
def get_first_ip(interface, addr_type):
'''
Get the first public or private IP of an interface
If that interface does not exist, grab from all interfaces
'''
default_ip = __salt__['network.ip_addrs']()[0]
try:
if addr_type == 'public':
return next(
(public_ip
for public_ip in __salt__['network.ip_addrs'](interface)
if not __salt__['network.is_private'](public_ip)),
default_ip)
else:
return next(
(private_ip
for private_ip in __salt__['network.ip_addrs'](interface)
if __salt__['network.is_private'](private_ip)),
default_ip)
except IndexError:
return default_ip
def data():
'''
Return data specific for this minion required for monitoring.
'''
output = {
'shinken_pollers': __salt__['pillar.get']('shinken_pollers', []),
'roles': __salt__['pillar.get']('roles', []),
'checks': list_checks(),
'monitor': __salt__['pillar.get']('monitor', True),
'parents': __salt__['pillar.get']('parent_hosts', [])
}
if 'availabilityZone' in __salt__['grains.ls']():
# from ec2_info grains
output['amazon_ec2'] = {
'availability_zone': __salt__['grains.get']('availabilityZone'),
'region': __salt__['grains.get']('region')
}
# figure how monitoring can reach this host
ip_addrs = __salt__['pillar.get']('ip_addrs', {})
interface = __salt__['pillar.get']('network_interface', 'eth0')
if ip_addrs:
# from pillar data
output['ip_addrs'] = {
'public': ip_addrs.get('public',
get_first_ip(interface, 'public')),
'private': ip_addrs.get('private',
get_first_ip(interface, 'private'))
}
elif 'amazon_ec2' in output:
# if IP not defined, just pick those from EC2
output['ip_addrs'] = {
'public': __salt__['grains.get']('public-ipv4'),
'private': __salt__['grains.get']('privateIp'),
}
else:
# from network interface
output['ip_addrs'] = {
'public': get_first_ip(interface, 'public'),
'private': get_first_ip(interface, 'private')
}
# figure how monitoring can reach this host using IPv6
ip_addrs6 = __salt__['pillar.get']('ip_addrs6', {})
if ip_addrs6:
output['ip_addrs6'] = ip_addrs6
else:
interface = __salt__['pillar.get']('network_interface', 'eth0')
valid_ip_addrs6 = [
ip for ip in __salt__['network.ip_addrs6'](interface=interface)
if not ip.startswith("fe80")] # filter out link local
if len(valid_ip_addrs6) > 0:
output['ip_addrs6'] = {
'public': valid_ip_addrs6[0]
}
output['ip_addrs6']['private'] = output['ip_addrs6']['public']
else:
output['ip_addrs6'] = {'public': None, 'private': None}
# check monitoring_data pillar for extra values to return
monitoring_data = __salt__['pillar.get']('monitoring_data', {})
extra_data = {}
for key_name in monitoring_data:
try:
key_type = monitoring_data[key_name]['type']
except KeyError:
logger.error("Missing type for '%s'", key_name)
continue
try:
path = monitoring_data[key_name]['path']
except KeyError:
logger.error("Missing path for '%s'", key_name)
continue
if key_type == 'keys':
extra_data[key_name] = __salt__['pillar.get'](path, {}).keys()
elif key_type == 'value':
extra_data[key_name] = __salt__['pillar.get'](path)
elif key_type == 'exists':
value = __salt__['pillar.get'](path, _DontExistData)
if value == _DontExistData:
extra_data[key_name] = False
else:
extra_data[key_name] = True
else:
logger.error("Unknown key type '%s'", key_type)
output['extra'] = extra_data
return output
def run_check(check_name, checks=None):
'''
Run a specific nagios check
CLI Example::
salt '*' nrpe.run_check <check name>
'''
if checks is None:
checks = list_nrpe_checks()
logger.debug("Found %d checks", len(checks.keys()))
ret = {
'name': check_name,
'changes': {},
}
if check_name not in checks:
ret['result'] = False
ret['comment'] = "Can't find check '{0}'".format(check_name)
return ret
cmd = checks[check_name]['command']
if __salt__['pillar.get']('__test__', False):
# nagios user may not have permission to run sudo with --help
verbose_cmd = re.sub(r'^sudo\s[^\/]*\/', '/', cmd) + ' --help'
outputverbose = __salt__['cmd.run_all'](verbose_cmd, runas='nagios')
if ' -v,' in outputverbose['stdout']:
# yeah, the check supports verbose, let be verbose
cmd += ' -vv'
output = __salt__['cmd.run_all'](cmd, runas='nagios')
ret['comment'] = "stdout: '{0}' stderr: '{1}'".format(output['stdout'],
output['stderr'])
if output['retcode'] == 0 and output['stderr']:
ret['comment'] += "\nCheck is considered fail cos stderr is not empty"
ret['result'] = False
return ret
ret['result'] = output['retcode'] == 0
return ret
def run_all_checks(return_only_failure=False):
'''
Run all available nagios check, usefull to check if everything is fine
before monitoring system find it.
CLI Example::
salt '*' nrpe.run_all_checks
'''
output = {}
all_checks = list_nrpe_checks()
for check_name in all_checks:
check_result = run_check(check_name, all_checks)
del check_result['changes']
del check_result['name']
if return_only_failure:
if not check_result['result']:
output[check_name] = check_result
else:
output[check_name] = check_result
return output
class SaltMineCheck(object):
'''
Raw check from salt mine ``monitoring.data``.
'''
def __init__(self, minion_id, name, data):
self.minion = minion_id
self.name = name
self.data = data
self.resolved = False
def __repr__(self):
return '%s (minion %s) %s' % (self.name, self.minion, repr(self.data))
def resolve_dependencies(self, existing_resolved):
'''
Resolve all dependencies to other checks.
Replace data['dependencies'] with list of :class:`CheckData`
'''
if self.resolved:
return True
dep_names = self.data.get('dependencies', ())
if not dep_names:
logger.debug("%s: no dependencies required", self)
self.resolved = True
return True
logger.debug('%s: %d dependencies', self, len(dep_names))
deps = []
try:
for dep_name in dep_names:
try:
deps.append(existing_resolved.get_minion_check(
self.minion, dep_name))
except (KeyError, IndexError):
logger.debug("%s: dependency of %s don't exist.", self,
dep_name)
raise
except (KeyError, IndexError):
logger.debug("Can't resolve all dependencies of %s, skip.", self)
return False
self.resolved = True
self.data['dependencies'] = deps
return True
class CheckData(UserDict):
'''
Unique monitoring check and it's linked minions.
Consumed by ``shinken/infra.jinja2``
'''
def __init__(self, name, check_list, minions=(), **kwargs):
self.minions = []
self.name = name
self.check_list = check_list
for minion in minions:
self.minions.append(minion)
UserDict.__init__(self, **kwargs)
def __repr__(self):
return '%s (%d minions) %s' % (self.name, len(self.minions),
repr(self.data))
def shinken_service_description(self):
'''
Return shinken compatible name. append ``-2``, ``-3`` and so on
based on different check with same name but different configuration.
Note: to work at it's best self.check_list should be ``sort()``
which :func:`shinken` do.
'''
check_list_index = self.check_list.index(self)
if check_list_index > 0:
return '%s-%d' % (self.name, check_list_index + 1)
return self.name
class Check(UserList):
'''
List of :class:`CheckData`
'''
def __init__(self, name, *args, **kwargs):
self.name = name
UserList.__init__(self, *args, **kwargs)
def __repr__(self):
return ' '.join((self.name, repr(self.data)))
def minion_index(self, minion):
for check_data in self:
if minion in check_data.minions:
return self.index(check_data)
raise IndexError("No CheckData with minion %s" % minion)
def check_index(self, salt_mine_check):
'''
Return index in the list of an existing :class:`CheckData`.
'''
for check_data in self:
if salt_mine_check.data == check_data.data:
return self.index(check_data)
raise IndexError("No existing check for %s" % salt_mine_check)
def check_append(self, salt_mine_check):
'''
Append to it's appropriate :class:`CheckData` a :class:`SaltMineCheck`
minion.
'''
if not salt_mine_check.resolved:
raise ValueError("Can't check_append unresolved %s",
salt_mine_check)
try:
index = self.check_index(salt_mine_check)
check = self[index]
except IndexError:
check = CheckData(salt_mine_check.name, self,
dict=salt_mine_check.data)
self.append(check)
check.minions.append(salt_mine_check.minion)
def sort(self, *args, **kwds):
# reverse sort by number of minions, more = first in list
self.data.sort(cmp=lambda y, x: cmp(len(x.minions), len(y.minions)))
class Checks(IterableUserDict):
def get_check_list(self, check_name):
try:
return self[check_name]
except KeyError:
self[check_name] = Check(check_name)
logger.debug("Found new check name %s", check_name)
return self[check_name]
def get_minion_check(self, minion, check_name):
check = self[check_name]
return check[check.minion_index(minion)]
def process_salt_mine_checks(self, salt_mine_checks):
'''
Loop trough a list of :class:`SaltMineCheck`
remove instance that are all resolved processed.
'''
for salt_mine_check in salt_mine_checks:
if salt_mine_check.resolve_dependencies(self):
logger.debug("%s all dependencies are ok, process.",
salt_mine_check)
check_list = self.get_check_list(salt_mine_check.name)
check_list.check_append(salt_mine_check)
salt_mine_checks.remove(salt_mine_check)
def _flatten_mine_data(mine_data):
'''
flatten to a single list all monitor checks for all minions that are
monitored.
'''
output = []
for minion in mine_data.keys():
if not mine_data[minion]['monitor']:
logger.info("Ignore unmonitored minion %s", minion)
else:
logger.debug("Monitor minion %s %d checks", minion,
len(mine_data[minion][MINE_DATA_KEY]))
for check_name in mine_data[minion][MINE_DATA_KEY]:
output.append(SaltMineCheck(minion, check_name,
mine_data[minion][MINE_DATA_KEY][check_name]))
logger.debug("Total of %d checks to process", len(output))
return output
def shinken(mine_data=None):
'''
Pre-process all salt mine monitoring data for all minions to let
shinken build a monitoring configuration.
The
'''
# which dict key data() put data processed by this function
if not mine_data:
mine_data = __salt__['mine.get']('*', MINE_DATA_FUNC_NAME)
flat = _flatten_mine_data(mine_data)
del mine_data
output = Checks()
# loop trough all flatten checks until it's resolved and processed
while flat:
before = len(flat)
output.process_salt_mine_checks(flat)
after = len(flat)
if after == before:
unresolvable = []
for check in flat:
unresolvable.append('%s(%s)' % (check.name, check.minion))
raise ValueError("Can't resolve all dependencies of: %s" %
','.join(unresolvable))
elif not after:
logger.debug("Processed successfully all checks from all minions.")
else:
logger.debug("Processed %d salt mine check, %d for next batch",
before - after, after)
# sort all :class:`Check`
for check_name in output.keys():
output[check_name].sort()
for check_data in output[check_name]:
check_data.minions.sort()
return output
|
{
"content_hash": "54ccefbcf6a903141894f0e019c1952c",
"timestamp": "",
"source": "github",
"line_count": 496,
"max_line_length": 79,
"avg_line_length": 33.22782258064516,
"alnum_prop": 0.5548207026272678,
"repo_name": "rtx3/saltstack-deyunio",
"id": "155efc1dd1263dec3894f14ba1482920850fcbfc",
"size": "16585",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "srv/salt/modules/monitoring.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "3267"
},
{
"name": "Python",
"bytes": "408808"
},
{
"name": "Ruby",
"bytes": "1076"
},
{
"name": "SaltStack",
"bytes": "80981"
},
{
"name": "Scheme",
"bytes": "1393"
},
{
"name": "Shell",
"bytes": "7886"
}
],
"symlink_target": ""
}
|
import base64
from .fields import BaseField
class BaseTask(object):
def serialize(self, **result):
return result
class ProxyMixin(BaseTask):
def __init__(self, *args, **kwargs):
self.proxy = kwargs.pop('proxy')
self.userAgent = kwargs.pop('user_agent')
self.cookies = kwargs.pop('cookies', '')
super(ProxyMixin, self).__init__(*args, **kwargs)
def serialize(self, **result):
result = super(ProxyMixin, self).serialize(**result)
result.update(self.proxy.serialize())
result['userAgent'] = self.userAgent
if self.cookies:
result['cookies'] = self.cookies
return result
class NoCaptchaTaskProxylessTask(BaseTask):
type = "NoCaptchaTaskProxyless"
websiteURL = None
websiteKey = None
websiteSToken = None
def __init__(self, website_url, website_key, website_s_token=None, is_invisible=None):
self.websiteURL = website_url
self.websiteKey = website_key
self.websiteSToken = website_s_token
self.isInvisible = is_invisible
def serialize(self):
data = {'type': self.type,
'websiteURL': self.websiteURL,
'websiteKey': self.websiteKey}
if self.websiteSToken is not None:
data['websiteSToken'] = self.websiteSToken
if self.isInvisible is not None:
data['isInvisible'] = self.isInvisible
return data
class FunCaptchaTask(ProxyMixin):
type = "FunCaptchaTask"
websiteURL = None
websiteKey = None
def __init__(self, website_url, website_key, *args, **kwargs):
self.websiteURL = website_url
self.websiteKey = website_key
super(FunCaptchaTask, self).__init__(*args, **kwargs)
def serialize(self, **result):
result = super(FunCaptchaTask, self).serialize(**result)
result.update({'type': self.type,
'websiteURL': self.websiteURL,
'websitePublicKey': self.websiteKey})
return result
class NoCaptchaTask(ProxyMixin, NoCaptchaTaskProxylessTask):
type = "NoCaptchaTask"
class ImageToTextTask(object):
type = "ImageToTextTask"
fp = None
phrase = None
case = None
numeric = None
math = None
minLength = None
maxLength = None
def __init__(self, fp, phrase=None, case=None, numeric=None, math=None, min_length=None, max_length=None):
self.fp = fp
self.phrase = phrase
self.case = case
self.numeric = numeric
self.math = math
self.minLength = min_length
self.maxLength = max_length
def serialize(self):
return {'type': self.type,
'body': base64.b64encode(self.fp.read()).decode('utf-8'),
'phrase': self.phrase,
'case': self.case,
'numeric': self.numeric,
'math': self.math,
'minLength': self.minLength,
'maxLength': self.maxLength}
class CustomCaptchaTask(BaseTask):
type = 'CustomCaptchaTask'
imageUrl = None
assignment = None
form = None
def __init__(self, imageUrl, form=None, assignment=None):
self.imageUrl = imageUrl
self.form = form or {}
self.assignment = assignment
def serialize(self):
data = super(CustomCaptchaTask, self).serialize()
data.update({'type': self.type,
'imageUrl': self.imageUrl})
if self.form:
forms = []
for name, field in self.form.items():
if isinstance(field, BaseField):
forms.append(field.serialize(name))
else:
field = field.copy()
field['name'] = name
forms.append(field)
data['forms'] = forms
if self.assignment:
data['assignment'] = self.assignment
return data
|
{
"content_hash": "12b76f8855d8c0ad5b22d51438caf3f6",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 110,
"avg_line_length": 30.8125,
"alnum_prop": 0.5826572008113591,
"repo_name": "pannal/Subliminal.bundle",
"id": "57462763f55843da005b046563702a6c6131148f",
"size": "3944",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Contents/Libraries/Shared/python_anticaptcha/tasks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "3012769"
},
{
"name": "Python",
"bytes": "3311785"
},
{
"name": "Shell",
"bytes": "273"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals, division, absolute_import
import logging
from flexget import options, plugin
from flexget.event import event
from flexget.utils.tools import console
log = logging.getLogger('dump')
def dump(entries, debug=False, eval_lazy=False, trace=False):
"""
Dump *entries* to stdout
:param list entries: Entries to be dumped.
:param bool debug: Print non printable fields as well.
:param bool eval_lazy: Evaluate lazy fields.
:param bool trace: Display trace information.
"""
def sort_key(field):
# Sort certain fields above the rest
if field == 'title':
return 0
if field == 'url':
return 1
if field == 'original_url':
return 2
return field
for entry in entries:
for field in sorted(entry, key=sort_key):
if entry.is_lazy(field) and not eval_lazy:
value = '<LazyField - value will be determined when it is accessed>'
else:
value = entry[field]
if isinstance(value, basestring):
try:
console('%-17s: %s' % (field, value.replace('\r', '').replace('\n', '')))
except Exception:
console('%-17s: %r (warning: unable to print)' % (field, value))
elif isinstance(value, list):
console('%-17s: %s' % (field, '[%s]' % ', '.join(unicode(v) for v in value)))
elif isinstance(value, (int, float, dict)):
console('%-17s: %s' % (field, value))
elif value is None:
console('%-17s: %s' % (field, value))
else:
try:
value = str(entry[field])
console('%-17s: %s' % (field, value.replace('\r', '').replace('\n', '')))
except Exception:
if debug:
console('%-17s: [not printable] (%r)' % (field, value))
if trace:
console('-- Processing trace:')
for item in entry.traces:
console('%-10s %-7s %s' % (item[0], '' if item[1] is None else item[1], item[2]))
console('')
class OutputDump(object):
"""
Outputs all entries to console
"""
schema = {'type': 'boolean'}
@plugin.priority(0)
def on_task_output(self, task, config):
if not config and task.options.dump_entries is None:
return
eval_lazy = 'eval' in task.options.dump_entries
trace = 'trace' in task.options.dump_entries
states = ['accepted', 'rejected', 'failed', 'undecided']
dumpstates = [s for s in states if s in task.options.dump_entries]
specificstates = dumpstates
if not dumpstates:
dumpstates = states
undecided = [entry for entry in task.all_entries if entry.undecided]
if 'undecided' in dumpstates:
if undecided:
console('-- Undecided: --------------------------')
dump(undecided, task.options.debug, eval_lazy, trace)
elif specificstates:
console('No undecided entries')
if 'accepted' in dumpstates:
if task.accepted:
console('-- Accepted: ---------------------------')
dump(task.accepted, task.options.debug, eval_lazy, trace)
elif specificstates:
console('No accepted entries')
if 'rejected' in dumpstates:
if task.rejected:
console('-- Rejected: ---------------------------')
dump(task.rejected, task.options.debug, eval_lazy, trace)
elif specificstates:
console('No rejected entries')
@event('plugin.register')
def register_plugin():
plugin.register(OutputDump, 'dump', builtin=True, api_ver=2)
@event('options.register')
def register_parser_arguments():
options.get_parser('execute').add_argument('--dump', nargs='*', choices=['eval', 'trace', 'accepted', 'rejected',
'undecided'], dest='dump_entries', help='display all entries in task with fields they contain, '
'use `--dump eval` to evaluate all lazy fields. Specify an entry state/states to only dump matching entries.')
|
{
"content_hash": "81066541332ca9bb69f640c6024c74fa",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 118,
"avg_line_length": 38.86363636363637,
"alnum_prop": 0.5483040935672515,
"repo_name": "X-dark/Flexget",
"id": "361094f56598334fc4174eb2320eab8e133bbdda",
"size": "4275",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flexget/plugins/output/dump.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11875"
},
{
"name": "HTML",
"bytes": "79376"
},
{
"name": "JavaScript",
"bytes": "263723"
},
{
"name": "Python",
"bytes": "3325135"
},
{
"name": "SRecode Template",
"bytes": "3"
}
],
"symlink_target": ""
}
|
"""
Unit test runner.
To run: python3 -m twest.test_all
(The twest, two, twcommon modules must be in your PYTHON_PATH.
MongoDB must be running; these tests run in (and trash) the 'testdb'
collection.)
This is a simplified version of the runner in tornado.testing.
"""
import sys
import unittest
import tornado.options
import tornado.testing
testlist = [
'twest.test_interp',
'twest.test_eval',
'twest.test_funcs',
'twest.test_propcache',
'twcommon.misc',
'two.grammar',
]
if __name__ == '__main__':
# Sets up some logging stuff. Plus we may use the options someday.
tornado.options.parse_command_line()
argv = [sys.argv[0]] + testlist
kwargs = {}
unittest.main(module=None, argv=argv, **kwargs)
|
{
"content_hash": "b5e087df25dac41b2371bc077c6d56a9",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 70,
"avg_line_length": 22.90909090909091,
"alnum_prop": 0.6693121693121693,
"repo_name": "erkyrath/tworld",
"id": "41e2f699107f76c3000b48a3141b64334890d741",
"size": "756",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/twest/test_all.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "16218"
},
{
"name": "HTML",
"bytes": "33052"
},
{
"name": "JavaScript",
"bytes": "153012"
},
{
"name": "Python",
"bytes": "686421"
}
],
"symlink_target": ""
}
|
"""
log解析
"""
from bs4 import BeautifulSoup
import datetime
import jieba.analyse
from snownlp import SnowNLP
from db.travellog import Tlog
def parser_log(place_id, url, html):
"""
解析html页面内容
"""
if html is None:
return
tlog = Tlog(url, place_id)
tlog.html = html
# 使用bs解析
html_bs_obj = BeautifulSoup(tlog.html, 'lxml')
try:
# 大标题
tlog.title = html_bs_obj.select('h1')[0].text.strip()
# 文字内容
# 两种content class va_con or a_con_text
if html_bs_obj.find(class_='va_con'):
tlog.text_content = html_bs_obj.find(class_='va_con').text
elif html_bs_obj.find(class_='a_con_text'):
tlog.text_content = html_bs_obj.find(class_='a_con_text').text
else:
raise AttributeError
tlog.text_content = ''.join(tlog.text_content.split())
print('已抓取:', tlog.title)
except IndexError:
tlog.error = 'Parse content error. Index out of range.'
except AttributeError:
tlog.error = 'Parse content error. No attribute.'
# 如果时间,天数等非必要参数问题,可以忽略,使用标记值
try:
# 出发时间
start_time = html_bs_obj.select('.time')[0].text.split(r'/')[1]
# 转datetime
tlog.start_time = datetime.datetime.strptime(start_time, '%Y-%m-%d')
except:
tlog.start_time = datetime.datetime(1900, 1, 1, 0, 0)
try:
# 出行天数
days = html_bs_obj.select('.day')[0].text.split(r'/')[1]
# 转int
tlog.days = int(days.split('天')[0])
except:
tlog.days = -1
# 字数,图片
try:
total_obj = html_bs_obj.find(class_='vc_total')
tlog.total_words, tlog.total_pictures = total_obj.find_all('span')
tlog.help_persons = total_obj.find(class_='_j_total_person').text
if tlog.total_words.text == '':
tlog.total_words = len(tlog.text_content)
else:
tlog.total_words = int(tlog.total_words.text)
if tlog.total_pictures.text == '':
# 使用bs统计图片
tlog.total_pictures = len(html_bs_obj.find_all('div', class_='add_pic'))
else:
tlog.total_pictures = int(tlog.total_pictures.text)
if tlog.help_persons == '':
tlog.help_persons = -1
else:
tlog.help_persons = int(tlog.help_persons)
except Exception as e:
print(e)
keywords_parser(tlog)
tlog.status = 3
tlog.save()
def keywords_parser(tlog):
"""
关键词提取
"""
try:
s = SnowNLP(tlog.text_content)
# print('Keywords:', s.keywords(10))
tlog.sentiments = cal_content_avg_sentiments(s.sentences)
# jieba
tlog.keywords = jieba.analyse.extract_tags(tlog.text_content)
except Exception as e:
# 暂不处理
pass
def cal_content_avg_sentiments(sentences):
"""
根据给定的句子list,计算每句的sentiments,计算平均数返回
:param sentences:
:return: 平均数结果
"""
list_sentiments = [SnowNLP(sentence).sentiments for sentence in sentences]
if len(list_sentiments) == 0:
return -1
else:
return sum(list_sentiments) / len(list_sentiments)
|
{
"content_hash": "f0128d0523506598e414c6ec56f0ad08",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 84,
"avg_line_length": 29.951923076923077,
"alnum_prop": 0.5823434991974318,
"repo_name": "bobobo80/python-crawler-test",
"id": "e69706c8e045acff4c400018aa9bf5fb99edd41c",
"size": "3325",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mfw_parser/log_parser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "672137"
},
{
"name": "Python",
"bytes": "24065"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import re
import json
# This ensures that we use 'str' in Python 3, and 'unicode' in Python 2
PythonString = "".__class__
class JsonTraverseParser:
def __init__(self, raw_data, custom_json_impl=None):
if raw_data and not isinstance(raw_data, PythonString):
raise TypeError("the 'raw_data' argument must be {}, not '{}'".format(PythonString, type(raw_data)))
json_impl = custom_json_impl or json
self.data = json_impl.loads(raw_data) if raw_data else None
def traverse(self, path, force_list=False):
if not isinstance(path, PythonString):
raise TypeError("the 'path' argument must be {}, not '{}'".format(PythonString, type(path)))
reduced = []
if self.data:
reduced.append(self.data)
if path:
for item in path.split("."):
list_reduced = list(reduced)
if self.is_valid_index(item):
list_reduced = self.reduce_list(reduced, item)
dict_reduced = self.flatten(reduced)
if not list_reduced or list_reduced == reduced:
dict_reduced = self.reduce_dict(dict_reduced, item)
if list_reduced and list_reduced != reduced:
reduced = list_reduced
elif dict_reduced and dict_reduced != self.flatten(reduced):
reduced = dict_reduced
else:
reduced = []
if isinstance(reduced, list) and len(reduced) == 1:
reduced = reduced[0]
if isinstance(reduced, list) and len(reduced) == 0:
reduced = None
if force_list and not isinstance(reduced, list):
if reduced is None:
reduced = []
else:
reduced = [reduced]
return reduced
def reduce_list(self, reduced, item):
outputs = []
for value in reduced:
try:
outputs.append(value[int(item)])
except (ValueError, IndexError, KeyError, TypeError):
pass
return outputs
def reduce_dict(self, reduced, item):
outputs = []
for value in reduced:
try:
outputs.append(value[item])
except (KeyError, TypeError):
pass
return outputs
def flatten(self, reduced):
flattened = []
for value_or_sublist in reduced:
if isinstance(value_or_sublist, list):
flattened += value_or_sublist
else:
flattened.append(value_or_sublist)
return flattened
def is_valid_index(self, string):
return re.match(r"^(0|[1-9][0-9]*)$", string)
|
{
"content_hash": "8610433201c606326040543c827216d0",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 112,
"avg_line_length": 31.636363636363637,
"alnum_prop": 0.5463362068965517,
"repo_name": "EmilStenstrom/json-traverse",
"id": "3183d671638097377ea140fa1e46daeb384d1edb",
"size": "2784",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jsontraverse/parser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12053"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('taskzilla', '0005_auto_20151011_0100'),
]
operations = [
migrations.AddField(
model_name='task',
name='description',
field=models.TextField(default=''),
),
]
|
{
"content_hash": "e9623599eb868c82fc6971ba02cef70e",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 49,
"avg_line_length": 21,
"alnum_prop": 0.5873015873015873,
"repo_name": "jailuthra/taskzilla",
"id": "f9820b944e5bba7c5ed3a5784e95c3d4331b5991",
"size": "402",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "taskzilla/migrations/0006_task_description.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "38"
},
{
"name": "HTML",
"bytes": "7846"
},
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "Python",
"bytes": "16124"
}
],
"symlink_target": ""
}
|
"""Tests for bundle support."""
import os
import tempfile
from dulwich.tests import (
TestCase,
)
from dulwich.bundle import (
Bundle,
read_bundle,
write_bundle,
)
class BundleTests(TestCase):
def test_roundtrip_bundle(self):
origbundle = Bundle()
origbundle.version = 3
origbundle.capabilities = {"foo": None}
origbundle.references = {b"refs/heads/master": b"ab" * 20}
origbundle.prerequisites = [(b"cc" * 20, "comment")]
with tempfile.TemporaryDirectory() as td:
with open(os.path.join(td, "foo"), "wb") as f:
write_bundle(f, origbundle)
with open(os.path.join(td, "foo"), "rb") as f:
newbundle = read_bundle(f)
self.assertEqual(origbundle, newbundle)
|
{
"content_hash": "5176997c74990424663ee83e6362c9c8",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 66,
"avg_line_length": 25.838709677419356,
"alnum_prop": 0.5967540574282147,
"repo_name": "sonntagsgesicht/regtest",
"id": "7a9f38af1ca37da13ea42a3496a3c23cc6106233",
"size": "1750",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": ".aux/venv/lib/python3.9/site-packages/dulwich/tests/test_bundle.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "13888"
}
],
"symlink_target": ""
}
|
import numpy as np
from scipy.stats import itemfreq
from collections import defaultdict
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import LinearSVC as SVM
def set_trace():
from IPython.core.debugger import Pdb
import sys
Pdb(color_scheme='Linux').set_trace(sys._getframe().f_back)
def impute(data, imputer, imp_method, params_dict):
imp_data = None
if imp_method == 'RandomReplace':
imp_data = imputer.replace(data, params_dict['miss_data_cond'])
elif imp_method == 'Drop':
imp_data = imputer.drop(data, params_dict['miss_data_cond'])
elif imp_method == 'Summary':
imp_data = imputer.summarize(data,
params_dict['summary_func'],
params_dict['miss_data_cond'])
elif imp_method == 'RandomForest':
clf = RandomForestClassifier(n_estimators=100, criterion='gini')
imp_data = imputer.predict(data,
params_dict['cat_cols'],
params_dict['miss_data_cond'],
clf)
elif imp_method == 'SVM':
clf = SVM()
imp_data = imputer.predict(data,
params_dict['cat_cols'],
params_dict['miss_data_cond'],
clf)
elif imp_method == 'LogisticRegression':
clf = LogisticRegression()
imp_data = imputer.predict(data,
params_dict['cat_cols'],
params_dict['miss_data_cond'],
clf)
elif imp_method == 'SVD':
imp_data = imputer.factor_analysis(data,
params_dict['cat_cols'],
params_dict['miss_data_cond'],
technique='SVD')
elif imp_method == 'KNN':
imp_data = imputer.knn(data,
params_dict['n_neighbors'],
params_dict['knn_summary_func'],
params_dict['miss_data_cond'],
params_dict['cat_cols'])
elif imp_method == 'Identity':
imp_data = data
else:
raise Exception("Imputation method {} is not valid".format(imp_method))
return imp_data
def perturb_data(x, cols, ratio, monotone, missing_data_symbol, mnar=None,
in_place=False):
"""Perturbs data by substituting existing values with missing data symbol
such that each feature has a minimum missing data ratio
Parameters
----------
x : np.ndarray
Matrix with categorical data, where rows are observations and
columns are features
cols : int tuple
index of columns that are categorical
ratio : float [0, 1]
Ratio of observations in data to have missing data
missing_data_symbol : str
String that represents missing data in data
monotone: boolean
Non-monotone: Any observation and feature can present a missing
value. Restrict the number of missing values in a observations
to not more than half of the features.
Monotone: set to missing all the values of 30% of randomly selected
features with categorical variables
mnar: tuple
Will perturb only items in the x matrix that matches items in the tuple
MNAR will suppress monotone
"""
def zero():
return 0
if in_place:
data = x
else:
data = np.copy(x)
n_perturbations = int(len(x) * ratio)
if mnar is not None:
mask = []
[mask.extend(np.argwhere(data == item).tolist()) for item in mnar]
mask = np.array(mask)
n_perturbations = int(len(mask) * ratio)
if n_perturbations < 1:
raise Exception('Number of perturbations is smaller than 1.')
mask_rows = np.random.choice(mask.shape[0],
max(int(len(mask) * ratio), 1),
replace=False)
coords = np.array(mask[mask_rows], ndmin=2)
data[coords[:, 0], coords[:, 1]] = missing_data_symbol
miss_dict = defaultdict(list)
[miss_dict[i[1]].append(i[0]) for i in coords]
elif monotone:
missing_mask = np.random.choice((0, 1), data[:, cols].shape, True,
(1-ratio, ratio)).astype(bool)
miss_dict = defaultdict(list)
for i in xrange(len(cols)):
rows = np.where(missing_mask[:, i])[0]
data[rows, cols[i]] = missing_data_symbol
miss_dict[cols[i]] = rows
"""
cols = np.random.choice(cols, int(len(cols) * monotone))
rows = np.random.randint(0, len(data), n_perturbations)
cols = np.random.choice(cols, n_perturbations)
data[rows, cols] = missing_data_symbol
miss_dict = defaultdict(list)
for (row, col) in np.dstack((rows, cols))[0]:
miss_dict[col].append(row)
"""
else:
# slow
row_col_miss = defaultdict(zero)
miss_dict = defaultdict(list)
i = 0
while i < n_perturbations:
row = np.random.randint(0, len(data))
col = np.random.choice(cols)
# proceed if less than half the features are missing
if row_col_miss[row] < len(cols) * 0.5 \
and data[row, col] != missing_data_symbol:
data[row, col] = missing_data_symbol
row_col_miss[row] += 1
miss_dict[col].append(row)
i += 1
return data, miss_dict
def compute_histogram(data, labels):
histogram = dict(itemfreq(data))
for label in labels:
if label not in histogram:
histogram[label] = .0
return histogram
def compute_error_rate(y, y_hat, feat_imp_ids):
error_rate = {}
for col, ids in feat_imp_ids.items():
errors = sum(y[ids, col] != y_hat[ids, col])
error_rate[col] = errors / float(len(ids))
return error_rate
|
{
"content_hash": "577079ad4246e23eb17233434436da93",
"timestamp": "",
"source": "github",
"line_count": 168,
"max_line_length": 79,
"avg_line_length": 37.214285714285715,
"alnum_prop": 0.5419065898912349,
"repo_name": "rafaelvalle/MDI",
"id": "7780191e6aea30dc81ee559ec6673dfe7fc6cda9",
"size": "6252",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "processing.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "124144"
}
],
"symlink_target": ""
}
|
"""Tests for the PyBEL assembler."""
import json
import networkx as nx
import pybel.constants as pc
from pybel.dsl import abundance, activity, bioprocess, \
complex_abundance, hgvs, pmod, protein, reaction
from indra.assemblers.pybel import assembler as pa
from indra.databases import hgnc_client
from indra.statements import *
def id(gene_name):
return hgnc_client.get_hgnc_id(gene_name)
phos_dsl = pmod('Ph', 'Ser', 218)
ub_dsl = pmod('Ub', 'Ser', 218)
egfr_phos_dsl = pmod('Ph', 'Tyr', 1173)
braf_dsl = protein(namespace='HGNC', name='BRAF', identifier='1097')
map2k1_dsl = protein(namespace='HGNC', name='MAP2K1', identifier='6840')
tp53_dsl = protein(namespace='HGNC', name='TP53', identifier='11998')
mdm2_dsl = protein(namespace='HGNC', name='MDM2', identifier='6973')
egfr_dsl = protein(namespace='HGNC', name='EGFR', identifier='3236')
chebi_17534 = abundance(namespace='CHEBI', name='D-glucose',
identifier='17634')
chebi_4170 = abundance(namespace='CHEBI', name='D-glucopyranose 6-phosphate',
identifier='4170')
chebi_17534_to_4170 = reaction(chebi_17534, chebi_4170)
grb2_dsl = protein(namespace='HGNC', name='GRB2', identifier='4566')
sos1_dsl = protein(namespace='HGNC', name='SOS1', identifier='11187')
sos1_phosphorylated_dsl = sos1_dsl.with_variants(pmod('Ph'))
kras_node = protein(namespace='HGNC', name='KRAS', identifier='6407')
egfr_grb2_sos1_complex_dsl = complex_abundance([
egfr_dsl,
grb2_dsl,
sos1_dsl,
])
egfr_grb2_sos1_phos_complex_dsl = complex_abundance([
egfr_dsl,
grb2_dsl,
sos1_phosphorylated_dsl,
])
def draw(g, filename):
ag = nx.nx_agraph.to_agraph(g)
ag.draw(filename, prog='dot')
def get_edge_data(g, u, v):
assert g.has_edge(u, v)
data = g.get_edge_data(u, v)
return list(data.values())[0]
def get_first_edge_data(g):
return list(g.edges(data=True))[0][2]
def test_simple_modification_no_evidence():
braf = Agent('BRAF', db_refs={'HGNC': '1097', 'UP': 'P15056'})
braf_kin = Agent('BRAF', activity=ActivityCondition('kinase', True),
db_refs={'HGNC': '1097', 'UP': 'P15056'})
braf_cat = Agent('BRAF', activity=ActivityCondition('catalytic', True),
db_refs={'HGNC': '1097', 'UP': 'P15056'})
map2k1 = Agent('MAP2K1', db_refs={'HGNC': '6840', 'UP': 'Q02750'}) # MEK
stmt1 = Phosphorylation(braf, map2k1, 'S', '218')
stmt2 = Phosphorylation(braf_kin, map2k1, 'S', '218')
stmt3 = Ubiquitination(braf_cat, map2k1, 'S', '218')
# Edge info for subject
edge1 = None
edge2 = activity('kin')
edge3 = activity('cat')
for stmt, modtuple, subj_edge in ((stmt1, phos_dsl, edge1),
(stmt2, phos_dsl, edge2),
(stmt3, ub_dsl, edge3)):
pba = pa.PybelAssembler([stmt])
belgraph = pba.make_model()
assert belgraph.number_of_nodes() == 3, belgraph.number_of_nodes()
map2k1_mod_dsl = map2k1_dsl.with_variants(modtuple)
assert set(belgraph) == {braf_dsl, map2k1_dsl, map2k1_mod_dsl}, \
(set(belgraph), {braf_dsl, map2k1_dsl, map2k1_mod_dsl})
assert belgraph.number_of_edges() == 2, belgraph.number_of_edges()
assert belgraph.has_edge(map2k1_dsl, map2k1_mod_dsl)
assert belgraph.has_edge(braf_dsl, map2k1_mod_dsl)
edge_data = get_edge_data(belgraph, braf_dsl, map2k1_mod_dsl)
assert edge_data[pc.RELATION] == pc.INCREASES
assert edge_data.get(pc.SUBJECT) == subj_edge
def test_modification_with_evidences():
braf_kin = Agent('BRAF', activity=ActivityCondition('kinase', True),
db_refs={'HGNC': '1097', 'UP': 'P15056'})
mek = Agent('MAP2K1', db_refs={'HGNC': '6840', 'UP': 'Q02750'})
evidence = Evidence(source_api='test', text='evidence text', pmid='1234', epistemics={
'dummy': ['a', 'b'],
'scalar': 'yes',
'missing': None,
})
stmt = Phosphorylation(braf_kin, mek, 'S', '218', evidence=evidence)
pba = pa.PybelAssembler([stmt])
belgraph = pba.make_model()
assert belgraph.number_of_nodes() == 3, belgraph.number_of_nodes()
assert braf_dsl in belgraph
map2k1_mod_dsl = map2k1_dsl.with_variants(phos_dsl)
assert map2k1_mod_dsl in belgraph
assert belgraph.number_of_edges() == 2
edge_data = get_edge_data(belgraph, braf_dsl, map2k1_mod_dsl)
assert edge_data.get(pc.SUBJECT) == activity('kin')
assert edge_data[pc.RELATION] == pc.INCREASES
assert edge_data.get(pc.EVIDENCE) == 'evidence text', edge_data
assert edge_data[pc.CITATION] == {
pc.CITATION_DB: pc.CITATION_TYPE_PUBMED,
pc.CITATION_IDENTIFIER: '1234',
}
assert 'source_api' in edge_data[pc.ANNOTATIONS]
assert 'test' in edge_data[pc.ANNOTATIONS]['source_api']
assert 'source_id' not in edge_data[pc.ANNOTATIONS]
assert 'source_hash' in edge_data[pc.ANNOTATIONS]
assert 'dummy' in edge_data[pc.ANNOTATIONS]
assert 'a' in edge_data[pc.ANNOTATIONS]['dummy']
assert 'b' in edge_data[pc.ANNOTATIONS]['dummy']
assert 'scalar' in edge_data[pc.ANNOTATIONS]
assert 'yes' in edge_data[pc.ANNOTATIONS]['scalar']
assert 'missing' not in edge_data[pc.ANNOTATIONS]
def test_modification_with_mutation():
braf = Agent('BRAF', mutations=[MutCondition('600', 'V', 'E')],
db_refs={'HGNC': '1097', 'UP': 'P15056'})
mek = Agent('MAP2K1', db_refs={'HGNC': '6840', 'UP': 'Q02750'})
stmt = Phosphorylation(braf, mek, 'S', '218')
pba = pa.PybelAssembler([stmt])
belgraph = pba.make_model()
# Adds in the base protein nodes as well as the variants (so 4 nodes)
assert belgraph.number_of_nodes() == 4, belgraph.number_of_nodes()
braf_mut_dsl = braf_dsl.with_variants(hgvs('p.Val600Glu'))
assert braf_mut_dsl in belgraph
def test_activation():
braf_no_act = Agent('BRAF', db_refs={'HGNC': '1097', 'UP': 'P15056'})
braf_kin = Agent('BRAF', activity=ActivityCondition('kinase', True),
db_refs={'HGNC': '1097', 'UP': 'P15056'})
mek = Agent('MAP2K1', db_refs={'HGNC': '6840', 'UP': 'Q02750'})
stmt1 = Activation(braf_no_act, mek)
stmt2 = Activation(braf_kin, mek, 'kinase')
hash1 = stmt1.get_hash(refresh=True)
hash2 = stmt2.get_hash(refresh=True)
edge1 = {
pc.RELATION: pc.INCREASES,
pc.OBJECT: activity(),
pc.ANNOTATIONS: {
'stmt_hash': {hash1: True},
'uuid': {stmt1.uuid: True},
'belief': {stmt1.belief: True},
},
}
edge2 = {
pc.RELATION: pc.INCREASES,
pc.SUBJECT: activity('kin'),
pc.OBJECT: activity('kin'),
pc.ANNOTATIONS: {
'stmt_hash': {hash2: True},
'uuid': {stmt2.uuid: True},
'belief': {stmt2.belief: True},
},
}
for stmt, edge in ((stmt1, edge1), (stmt2, edge2)):
pba = pa.PybelAssembler([stmt])
belgraph = pba.make_model()
assert belgraph.number_of_nodes() == 2, belgraph.number_of_nodes()
assert braf_dsl in belgraph
assert map2k1_dsl in belgraph
assert belgraph.number_of_edges() == 1
edge_data = get_first_edge_data(belgraph)
assert edge_data == edge, edge_data
def test_direct_activation():
braf_no_act = Agent('BRAF', db_refs={'HGNC': '1097', 'UP': 'P15056'})
braf_kin = Agent('BRAF', activity=ActivityCondition('kinase', True),
db_refs={'HGNC': '1097', 'UP': 'P15056'})
mek = Agent('MAP2K1', db_refs={'HGNC': '6840', 'UP': 'Q02750'})
stmt1_ev = Evidence(
pmid='1234',
epistemics={'direct': True},
)
stmt1 = Activation(braf_no_act, mek, evidence=stmt1_ev)
stmt2 = Activation(braf_kin, mek, 'kinase', evidence=stmt1_ev)
hash1 = stmt1.get_hash(refresh=True)
hash2 = stmt2.get_hash(refresh=True)
edge1 = {
pc.RELATION: pc.DIRECTLY_INCREASES,
pc.OBJECT: activity(),
pc.EVIDENCE: 'No evidence text.',
pc.CITATION: {
pc.CITATION_DB: pc.CITATION_TYPE_PUBMED,
pc.CITATION_IDENTIFIER: '1234',
},
pc.ANNOTATIONS: {
'stmt_hash': {hash1: True},
'source_hash': {stmt1_ev.get_source_hash(): True},
'uuid': {stmt1.uuid: True},
'belief': {stmt1.belief: True},
},
}
edge2 = {
pc.RELATION: pc.DIRECTLY_INCREASES,
pc.SUBJECT: activity('kin'),
pc.OBJECT: activity('kin'),
pc.EVIDENCE: 'No evidence text.',
pc.CITATION: {
pc.CITATION_DB: pc.CITATION_TYPE_PUBMED,
pc.CITATION_IDENTIFIER: '1234',
},
pc.ANNOTATIONS: {
'stmt_hash': {hash2: True},
'source_hash': {stmt1_ev.get_source_hash(): True},
'uuid': {stmt2.uuid: True},
'belief': {stmt2.belief: True},
},
}
for stmt, expected_edge in ((stmt1, edge1), (stmt2, edge2)):
pba = pa.PybelAssembler([stmt])
belgraph = pba.make_model()
assert belgraph.number_of_nodes() == 2, belgraph.number_of_nodes()
assert braf_dsl in belgraph
assert map2k1_dsl in belgraph
assert belgraph.number_of_edges() == 1
edge_data = get_first_edge_data(belgraph)
assert expected_edge == edge_data, json.dumps(edge_data, indent=1)
def test_inhibition():
braf_kin = Agent('BRAF', activity=ActivityCondition('kinase', True),
db_refs={'HGNC': '1097', 'UP': 'P15056'})
mek = Agent('MAP2K1', db_refs={'HGNC': '6840', 'UP': 'Q02750'})
stmt = Inhibition(braf_kin, mek, 'kinase')
stmt_hash = stmt.get_hash(refresh=True)
edge = {
pc.RELATION: pc.DECREASES,
pc.SUBJECT: activity('kin'),
pc.OBJECT: activity('kin'),
pc.ANNOTATIONS: {
'stmt_hash': {stmt_hash: True},
'uuid': {stmt.uuid: True},
'belief': {stmt.belief: True},
},
}
pba = pa.PybelAssembler([stmt])
belgraph = pba.make_model()
assert belgraph.number_of_nodes() == 2, belgraph.number_of_nodes()
assert braf_dsl in belgraph
assert map2k1_dsl in belgraph
assert belgraph.number_of_edges() == 1
edge_data = get_first_edge_data(belgraph)
assert edge_data == edge, edge_data
def test_increase_amount():
tp53 = Agent('TP53', db_refs={'HGNC': '11998'})
mdm2 = Agent('MDM2', db_refs={'HGNC': '6973'})
stmt = IncreaseAmount(tp53, mdm2)
pba = pa.PybelAssembler([stmt])
belgraph = pba.make_model()
assert belgraph.number_of_nodes() == 2, belgraph.number_of_nodes()
assert mdm2_dsl in belgraph
assert tp53_dsl in belgraph
assert belgraph.number_of_edges() == 1
edge_data = get_first_edge_data(belgraph)
assert edge_data[pc.RELATION] == pc.INCREASES
def test_increase_amount_tscript():
tp53 = Agent('TP53', activity=ActivityCondition('transcription', True),
db_refs={'HGNC': '11998'})
mdm2 = Agent('MDM2', db_refs={'HGNC': '6973'})
stmt = IncreaseAmount(tp53, mdm2)
pba = pa.PybelAssembler([stmt])
belgraph = pba.make_model()
assert belgraph.number_of_nodes() == 2, belgraph.number_of_nodes()
assert mdm2_dsl in belgraph
assert tp53_dsl in belgraph
assert belgraph.number_of_edges() == 1
edge_data = get_first_edge_data(belgraph)
assert edge_data[pc.RELATION] == pc.INCREASES
assert edge_data[pc.SUBJECT] == activity('tscript')
def test_gef():
gef = Agent('SOS1', mods=[ModCondition('phosphorylation')],
db_refs={'HGNC': '11187'})
ras = Agent('KRAS', db_refs={'HGNC': '6407'})
stmt = Gef(gef, ras)
stmt_hash = stmt.get_hash(refresh=True)
pba = pa.PybelAssembler([stmt])
belgraph = pba.make_model()
assert len(belgraph) == 3
assert belgraph.number_of_edges() == 2
gef_reference_node = protein(
namespace='HGNC', name='SOS1', identifier='11187')
gef_node = gef_reference_node.with_variants(pmod('Ph'))
assert gef_reference_node in belgraph
assert gef_node in belgraph
assert kras_node in belgraph
edge_data = get_edge_data(belgraph, gef_node, kras_node)
edge = {
pc.RELATION: pc.DIRECTLY_INCREASES,
pc.SUBJECT: activity('gef'),
pc.OBJECT: activity('gtp'),
pc.ANNOTATIONS: {
'stmt_hash': {stmt_hash: True},
'uuid': {stmt.uuid: True},
'belief': {stmt.belief: True},
},
}
assert edge_data == edge, edge_data
def test_gap():
gap = Agent('RASA1', mods=[ModCondition('phosphorylation')],
db_refs={'HGNC': '9871'})
ras = Agent('KRAS', db_refs={'HGNC': '6407'})
stmt = Gap(gap, ras)
stmt_hash = stmt.get_hash(refresh=True)
pba = pa.PybelAssembler([stmt])
belgraph = pba.make_model()
assert len(belgraph) == 3
assert belgraph.number_of_edges() == 2
gap_reference_node = protein(
namespace='HGNC', name='RASA1', identifier='9871')
gap_node = gap_reference_node.with_variants(pmod('Ph'))
ras_node = protein(namespace='HGNC', name='KRAS', identifier='6407')
assert gap_reference_node in belgraph
assert gap_node in belgraph
assert ras_node in belgraph
edge_data = get_edge_data(belgraph, gap_node, ras_node)
edge = {
pc.RELATION: pc.DIRECTLY_DECREASES,
pc.SUBJECT: activity('gap'),
pc.OBJECT: activity('gtp'),
pc.ANNOTATIONS: {
'stmt_hash': {stmt_hash: True},
'uuid': {stmt.uuid: True},
'belief': {stmt.belief: True},
},
}
assert edge_data == edge, edge_data
def test_active_form():
ras = Agent('KRAS', mutations=[MutCondition('12', 'G', 'V')],
db_refs={'HGNC': '6407'})
mapk1_p = Agent('MAP2K1',
mods=[ModCondition('phosphorylation', 'T', '185')],
db_refs={'HGNC': hgnc_client.get_hgnc_id('MAP2K1')})
mapk1_pp = Agent('MAP2K1',
mods=[ModCondition('phosphorylation', 'T', '185'),
ModCondition('phosphorylation', 'Y', '187')],
db_refs={'HGNC': hgnc_client.get_hgnc_id('MAP2K1')})
stmt1 = ActiveForm(ras, 'gtpbound', True)
stmt2 = ActiveForm(mapk1_p, 'kinase', True)
stmt3 = ActiveForm(mapk1_pp, 'kinase', True)
for i, stmt in enumerate((stmt1, stmt2, stmt3)):
pba = pa.PybelAssembler([stmt])
belgraph = pba.make_model()
if i == 2:
assert len(belgraph) == 3, len(belgraph)
else:
assert len(belgraph) == 2, len(belgraph)
def test_complex():
egfr = Agent('EGFR', db_refs={'HGNC': id('EGFR')})
grb2 = Agent('GRB2', db_refs={'HGNC': id('GRB2')})
sos = Agent('SOS1', db_refs={'HGNC': id('SOS1')})
stmt = Complex([egfr, grb2, sos])
pba = pa.PybelAssembler([stmt])
belgraph = pba.make_model()
# The graph should contain the node for the complex as well as nodes
# for all of the members
assert len(belgraph) == 4
assert egfr_grb2_sos1_complex_dsl in belgraph
for member in egfr_grb2_sos1_complex_dsl.members:
assert member in belgraph
def test_rxn_no_controller():
glu = Agent('D-glucose', db_refs={'CHEBI': 'CHEBI:17634'})
g6p = Agent('D-glucopyranose 6-phosphate', db_refs={'CHEBI': 'CHEBI:4170'})
stmt = Conversion(None, [glu], [g6p])
pba = pa.PybelAssembler([stmt])
belgraph = pba.make_model()
# The graph should contain the node for the reaction as well as nodes
# for all of the members
assert chebi_17534_to_4170 in belgraph
for reactant in chebi_17534_to_4170.reactants:
assert reactant in belgraph
assert belgraph.number_of_nodes() == 3, belgraph.number_of_nodes()
# TODO check edge chebi_17534_to_4170 hasReactant chebi_17534
for product in chebi_17534_to_4170.products:
assert product in belgraph
# TODO check edge chebi_17534_to_4170 hasProduct chebi_4170
def test_rxn_with_controller():
hk1 = Agent('HK1', db_refs={'HGNC': id('HK1')})
glu = Agent('D-glucose', db_refs={'CHEBI': 'CHEBI:17634'})
g6p = Agent('D-glucopyranose 6-phosphate', db_refs={'CHEBI': 'CHEBI:4170'})
stmt = Conversion(hk1, [glu], [g6p])
pba = pa.PybelAssembler([stmt])
belgraph = pba.make_model()
# check the catalyst makes it
assert protein(namespace='HGNC', name='HK1', identifier=id('HK1')) \
in belgraph
# The reaction data should be the same as before
assert chebi_17534 in belgraph
assert chebi_4170 in belgraph
assert chebi_17534_to_4170 in belgraph
# The graph should contain the node for the reaction as well as nodes
# for all of the members
assert belgraph.number_of_nodes() == 4, belgraph.number_of_nodes()
def test_autophosphorylation():
egfr = Agent('EGFR', db_refs={'HGNC': id('EGFR')})
stmt = Autophosphorylation(egfr, 'Y', '1173')
stmt_hash = stmt.get_hash(refresh=True)
pba = pa.PybelAssembler([stmt])
belgraph = pba.make_model()
assert len(belgraph) == 2
assert egfr_dsl in belgraph
egfr_phos_node = egfr_dsl.with_variants(egfr_phos_dsl)
assert egfr_dsl in belgraph
assert egfr_phos_node in belgraph
assert belgraph.number_of_nodes() == 2
assert belgraph.number_of_edges() == 2
# There will be two edges between these nodes
edge_dicts = list(belgraph.get_edge_data(egfr_dsl,
egfr_phos_node).values())
assert {pc.RELATION: pc.DIRECTLY_INCREASES,
pc.ANNOTATIONS: {
'stmt_hash': {stmt_hash: True},
'uuid': {stmt.uuid: True},
'belief': {stmt.belief: True},
}} in edge_dicts
# Test an autophosphorylation with a bound condition
tab1 = Agent('TAB1', db_refs={'HGNC': id('TAB1')})
p38_tab1 = Agent('MAPK14', bound_conditions=[BoundCondition(tab1)],
db_refs={'HGNC': id('MAPK14')})
stmt = Autophosphorylation(p38_tab1, 'Y', '100')
pba = pa.PybelAssembler([stmt])
belgraph = pba.make_model()
assert belgraph.number_of_nodes() == 4
assert belgraph.number_of_edges() == 4
def test_bound_condition():
egfr = Agent('EGFR', db_refs={'HGNC': id('EGFR')})
grb2 = Agent('GRB2', db_refs={'HGNC': id('GRB2')})
ras = Agent('KRAS', db_refs={'HGNC': '6407'})
sos1_bound = Agent(
'SOS1', mods=[ModCondition('phosphorylation')],
bound_conditions=[BoundCondition(egfr), BoundCondition(grb2)],
db_refs={'HGNC': id('SOS1')}
)
stmt = Gef(sos1_bound, ras)
stmt_hash = stmt.get_hash(refresh=True)
pba = pa.PybelAssembler([stmt])
belgraph = pba.make_model()
assert len(belgraph) == 6
assert belgraph.number_of_edges() == 5
# Don't bother to check the tuple, which is now generated by
# PyBEL directly, but check the node data
assert egfr_grb2_sos1_phos_complex_dsl in belgraph
assert kras_node in belgraph
assert (egfr_grb2_sos1_phos_complex_dsl, kras_node) in belgraph.edges()
edge_data = (
egfr_grb2_sos1_phos_complex_dsl,
kras_node,
{
pc.RELATION: pc.DIRECTLY_INCREASES,
pc.OBJECT: activity('gtp'),
pc.ANNOTATIONS: {
'stmt_hash': {stmt_hash: True},
'uuid': {stmt.uuid: True},
'belief': {stmt.belief: True},
},
},
)
belgraph_edges = belgraph.edges(data=True)
assert edge_data in belgraph_edges, belgraph_edges
def test_transphosphorylation():
egfr = Agent('EGFR', db_refs={'HGNC': id('EGFR')})
egfr_dimer = Agent('EGFR', bound_conditions=[BoundCondition(egfr)],
db_refs={'HGNC': id('EGFR')})
stmt = Transphosphorylation(egfr_dimer, 'Y', '1173')
stmt_hash = stmt.get_hash(refresh=True)
pba = pa.PybelAssembler([stmt])
belgraph = pba.make_model()
assert belgraph.number_of_nodes() == 3
assert belgraph.number_of_edges() == 3
egfr_dimer_node = complex_abundance([egfr_dsl, egfr_dsl])
egfr_phos_node = egfr_dsl.with_variants(pmod('Ph', 'Tyr', 1173))
edge_data = get_edge_data(belgraph, egfr_dimer_node, egfr_phos_node)
assert edge_data == {
pc.RELATION: pc.DIRECTLY_INCREASES,
pc.ANNOTATIONS: {
'stmt_hash': {stmt_hash: True},
'uuid': {stmt.uuid: True},
'belief': {stmt.belief: True},
},
}, edge_data
"""
def test_translocation():
foxo = Agent('FOXO1', db_refs={'HGNC': id('FOXO1')})
stmt = Translocation(foxo, 'cytoplasm', 'nucleus')
nuc_go = 'GO:0005634'
cyto_go = 'GO:0005737'
pba = pa.PybelAssembler([stmt])
belgraph = pba.make_model()
assert len(belgraph) == 1
"""
def test_complex_with_pmod():
sos1_phos = Agent('SOS1',
mods=[ModCondition('phosphorylation', 'Y', '100')],
db_refs={'HGNC': id('SOS1')})
grb2 = Agent('GRB2', db_refs={'HGNC': id('GRB2')})
egfr = Agent('EGFR', db_refs={'HGNC': id('EGFR')})
stmt = Complex([sos1_phos, grb2, egfr])
pba = pa.PybelAssembler([stmt])
belgraph = pba.make_model()
assert belgraph.number_of_nodes() == 5
assert belgraph.number_of_edges() == 4
egfr_grb2_sos_phos_tyr_100 = complex_abundance([
egfr_dsl,
grb2_dsl,
sos1_dsl.with_variants(pmod('Ph', 'Tyr', 100))
])
assert sos1_dsl in belgraph
assert egfr_grb2_sos_phos_tyr_100 in belgraph
for member in egfr_grb2_sos_phos_tyr_100.members:
assert member in belgraph
def test_complex_with_complex():
grb2 = Agent('GRB2', db_refs={'HGNC': id('GRB2')})
egfr_grb2 = Agent('EGFR', db_refs={'HGNC': id('EGFR')},
bound_conditions=[BoundCondition(grb2)])
sos1_phos = Agent('SOS1',
mods=[ModCondition('phosphorylation', 'Y', '100')],
db_refs={'HGNC': id('SOS1')})
stmt = Complex([sos1_phos, egfr_grb2])
pba = pa.PybelAssembler([stmt])
belgraph = pba.make_model()
assert len(belgraph) == 6
assert belgraph.number_of_edges() == 5
egfr_grb2_complex = complex_abundance([egfr_dsl, grb2_dsl])
egfr_grb2_complex_sos1_phos_complex = complex_abundance([
egfr_grb2_complex,
sos1_dsl.with_variants(pmod('Ph', 'Tyr', 100))
])
assert egfr_grb2_complex in belgraph
for member in egfr_grb2_complex.members:
assert member in belgraph
assert egfr_grb2_complex_sos1_phos_complex in belgraph
for member in egfr_grb2_complex_sos1_phos_complex.members:
assert member in belgraph
def test_no_activity_on_bioprocess():
yfg_agent = Agent('PPP1R13L', db_refs={'HGNC': id('PPP1R13L')})
apoptosis_agent = Agent('apoptotic process', db_refs={'GO': 'GO:0006915'})
stmt = Activation(yfg_agent, apoptosis_agent)
pba = pa.PybelAssembler([stmt])
belgraph = pba.make_model()
assert len(belgraph) == 2
assert belgraph.number_of_edges() == 1
yfg_pybel = protein('HGNC', 'PPP1R13L', identifier='18838')
apoptosis_pybel = bioprocess('GO', name='apoptotic process',
identifier='GO:0006915')
assert yfg_pybel in belgraph
assert apoptosis_pybel in belgraph
_, _, e = list(belgraph.edges(data=True))[0]
assert pc.OBJECT not in e
def test_belgraph_to_signed_graph():
braf_no_act = Agent('BRAF', db_refs={'HGNC': '1097', 'UP': 'P15056'})
mek = Agent('MAP2K1', db_refs={'HGNC': '6840', 'UP': 'Q02750'})
stmt = Activation(braf_no_act, mek)
hsh = stmt.get_hash(refresh=True)
pba = pa.PybelAssembler([stmt])
belgraph = pba.make_model()
pb_seg = pa.belgraph_to_signed_graph(belgraph, propagate_annotations=True)
assert len(pb_seg.edges) == 1
edge = (braf_dsl, map2k1_dsl, 0)
assert edge in pb_seg.edges
edge_dict = pb_seg.edges.get(edge)
assert edge_dict
assert 'stmt_hash' in edge_dict
assert isinstance(edge_dict['stmt_hash'], int)
assert hsh == edge_dict['stmt_hash']
assert 'uuid' in edge_dict
assert isinstance(edge_dict['uuid'], str)
assert stmt.uuid == edge_dict['uuid']
assert 'belief' in edge_dict
assert isinstance(edge_dict['belief'], (float, int))
assert stmt.belief == edge_dict['belief']
def test_context_and_annotations():
braf_no_act = Agent('BRAF', db_refs={'HGNC': '1097', 'UP': 'P15056'})
mek = Agent('MAP2K1', db_refs={'HGNC': '6840', 'UP': 'Q02750'})
ev = Evidence(source_api='reach',
annotations={'string_val': 'x',
'int_val': 5,
'dict_val': {'x': 5},
'list_val': ['a', 'b']},
context=BioContext(
cell_type=RefContext('HCC366',
db_refs={'EFO': '0003131'})))
stmt = Activation(braf_no_act, mek, evidence=[ev])
pba = pa.PybelAssembler([stmt])
belgraph = pba.make_model()
_, _, data = list(belgraph.edges(data=True))[0]
assert data['annotations']['cell_type'] == {'EFO:0003131': True}
assert 'string_val' not in data['annotations']
pba = pa.PybelAssembler([stmt], annotations_to_include=['string_val',
'int_val',
'dict_val',
'list_val'])
belgraph = pba.make_model()
_, _, data = list(belgraph.edges(data=True))[0]
assert data['annotations']['cell_type'] == {'EFO:0003131': True}
assert data['annotations']['string_val'] == {'x': True}
assert data['annotations']['int_val'] == {5: True}
assert 'dict_val' not in data['annotations']
assert data['annotations']['list_val'] == {'a': True, 'b': True}
|
{
"content_hash": "1b063aff2356e797b210e3ff06c3632a",
"timestamp": "",
"source": "github",
"line_count": 687,
"max_line_length": 90,
"avg_line_length": 37.71033478893741,
"alnum_prop": 0.5971359092137261,
"repo_name": "sorgerlab/indra",
"id": "6fd8cbcd3efacd0426a304a793cffe93e635cfc9",
"size": "25932",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "indra/tests/test_pybel_assembler.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "169"
},
{
"name": "Dockerfile",
"bytes": "1710"
},
{
"name": "HTML",
"bytes": "28917"
},
{
"name": "JavaScript",
"bytes": "13276"
},
{
"name": "Python",
"bytes": "3520769"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import pytz
from django.contrib.postgres.fields.jsonb import JSONField
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from trello_reporter.charting.constants import INITIAL_COLUMNS, SPRINT_COMMITMENT_COLUMNS
class TrelloUser(models.Model):
""" contains data about trello users """
username = models.CharField(max_length=255, db_index=True, unique=True)
full_name = models.CharField(max_length=255, blank=True, null=True)
trello_id = models.CharField(max_length=32, db_index=True)
timezone = models.CharField(max_length=63,
choices=zip(pytz.common_timezones, pytz.common_timezones),
default="UTC")
# we DO NOT store user's token persistently, ever
last_login = models.DateTimeField(blank=True, null=True)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=True) # FIXME: in prod
USERNAME_FIELD = "username"
REQUIRED_FIELDS = ["trello_id"]
def __unicode__(self):
return "%s (%s)" % (self.username, self.full_name)
# User API
@property
def is_anonymous(self):
"""
Always return False. This is a way of comparing User objects to
anonymous users.
"""
return False
@property
def is_authenticated(self):
"""
Always return True. This is a way to tell if the user has been
authenticated in templates.
"""
return True
def has_module_perms(self, app_label):
return True
def has_perm(self, perm):
return True
@classmethod
def get_or_create(cls, trello_id, username, full_name=None):
obj, created = cls.objects.get_or_create(trello_id=trello_id, username=username)
if full_name and obj.full_name != full_name:
obj.full_name = full_name
obj.save()
return obj
class KeyValQuerySet(models.QuerySet):
def for_key(self, key):
return self.filter(key=key)
def for_user(self, user_id):
return self.filter(value__user_id=user_id)
def for_board(self, board_id):
return self.filter(value__board_id=board_id)
def get_or_create_setting(self, key, user_id=None, board_id=None, default=None):
q = {"key": key}
if user_id:
q["value__user_id"] = user_id
if board_id:
q["value__board_id"] = board_id
try:
return self.get(**q)
except ObjectDoesNotExist:
value = {}
if board_id:
value["user_id"] = user_id
if board_id:
value["board_id"] = board_id
if default:
value.update(default)
return self.create(key=key, value=value)
class KeyValManager(models.Manager):
def displayed_cols_in_board_detail(self, user, board):
return self.get_or_create_setting(
KeyVal.DISPLAYED_COLS_IN_BOARD_DETAIL, user_id=user.id, board_id=board.id,
default={"columns": INITIAL_COLUMNS}
)
def sprint_commitment_columns(self, board):
return self.get_or_create_setting(
KeyVal.SPRINT_COMMITMENT_COLS, board_id=board.id,
default={"columns": SPRINT_COMMITMENT_COLUMNS}
)
def board_messages(self, board):
"""
{
"messages": [
{"message": "..."}
]
}
:param board:
:return:
"""
return self.get_or_create_setting(
KeyVal.BOARD_MESSAGES, board_id=board.id,
default={"messages": []}
)
class KeyVal(models.Model):
""" key & value table """
key = models.CharField(max_length=63, db_index=True)
value = JSONField()
objects = KeyValManager.from_queryset(KeyValQuerySet)()
def __unicode__(self):
return "%s: %s" % (self.key, self.value)
DISPLAYED_COLS_IN_BOARD_DETAIL = "DISPLAYED_COLS_IN_BOARD_DETAIL"
SPRINT_COMMITMENT_COLS = "SPRINT_COMMITMENT_COLS"
BOARD_MESSAGES = "BOARD_MESSAGES"
|
{
"content_hash": "4ea9a629cc24a4bc67f4f298c66f0639",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 90,
"avg_line_length": 30.607407407407408,
"alnum_prop": 0.600919651500484,
"repo_name": "TomasTomecek/trello-reporter",
"id": "0ba10ab10614236f007014544e8a8443512020af",
"size": "4132",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "trello_reporter/authentication/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "419"
},
{
"name": "HTML",
"bytes": "30644"
},
{
"name": "JavaScript",
"bytes": "11694"
},
{
"name": "Makefile",
"bytes": "618"
},
{
"name": "Python",
"bytes": "129269"
},
{
"name": "Shell",
"bytes": "483"
}
],
"symlink_target": ""
}
|
""" Creating datasets and models associated to a cluster
"""
from .world import world, setup_module, teardown_module
from . import create_source_steps as source_create
from . import create_dataset_steps as dataset_create
from . import create_model_steps as model_create
from . import create_cluster_steps as cluster_create
from . import compare_predictions_steps as prediction_compare
class TestClusterDerived(object):
def setup(self):
"""
Debug information
"""
print("\n-------------------\nTests in: %s\n" % __name__)
def teardown(self):
"""
Debug information
"""
print("\nEnd of tests in: %s\n-------------------\n" % __name__)
def test_scenario1(self):
"""
Scenario: Successfully creating datasets for first centroid of a cluster:
Given I create a data source uploading a "<data>" file
And I wait until the source is ready less than <time_1> secs
And I create a dataset
And I wait until the dataset is ready less than <time_2> secs
And I create a cluster
And I wait until the cluster is ready less than <time_3> secs
When I create a dataset associated to centroid "<centroid_id>"
And I wait until the dataset is ready less than <time_4> secs
Then the dataset is associated to the centroid "<centroid_id>" of the cluster
Examples:
| data | time_1 | time_2 | time_3 | centroid_id | time_4 |
| ../data/iris.csv | 10 | 10 | 40 | 000001 | 10 |
"""
print(self.test_scenario1.__doc__)
examples = [
['data/iris.csv', '10', '10', '40', '000001', '10']]
for example in examples:
print("\nTesting with:\n", example)
source_create.i_upload_a_file(self, example[0])
source_create.the_source_is_finished(self, example[1])
dataset_create.i_create_a_dataset(self)
dataset_create.the_dataset_is_finished_in_less_than(self, example[2])
cluster_create.i_create_a_cluster(self)
cluster_create.the_cluster_is_finished_in_less_than(self, example[3])
dataset_create.i_create_a_dataset_from_cluster(self, example[4])
dataset_create.the_dataset_is_finished_in_less_than(self, example[5])
dataset_create.is_associated_to_centroid_id(self, example[4])
def test_scenario2(self):
"""
Scenario: Successfully creating models for first centroid of a cluster:
Given I create a data source uploading a "<data>" file
And I wait until the source is ready less than <time_1> secs
And I create a dataset
And I wait until the dataset is ready less than <time_2> secs
And I create a cluster with options "<options>"
And I wait until the cluster is ready less than <time_3> secs
When I create a model associated to centroid "<centroid_id>"
And I wait until the model is ready less than <time_4> secs
Then the model is associated to the centroid "<centroid_id>" of the cluster
Examples:
| data | time_1 | time_2 | time_3 | centroid_id | time_4 |
| ../data/iris.csv | 10 | 10 | 40 | 000001 | 10 |
"""
print(self.test_scenario2.__doc__)
examples = [
['data/iris.csv', '10', '10', '40', '000001', '10', '{"model_clusters": true}']]
for example in examples:
print("\nTesting with:\n", example)
source_create.i_upload_a_file(self, example[0])
source_create.the_source_is_finished(self, example[1])
dataset_create.i_create_a_dataset(self)
dataset_create.the_dataset_is_finished_in_less_than(self, example[2])
cluster_create.i_create_a_cluster_with_options(self, example[6])
cluster_create.the_cluster_is_finished_in_less_than(self, example[3])
model_create.i_create_a_model_from_cluster(self, example[4])
model_create.the_model_is_finished_in_less_than(self, example[5])
model_create.is_associated_to_centroid_id(self, example[4])
def test_scenario3(self):
"""
Scenario: Successfully getting the closest point in a cluster:
Given I create a data source uploading a "<data>" file
And I wait until the source is ready less than <time_1> secs
And I create a dataset
And I wait until the dataset is ready less than <time_2> secs
And I create a cluster
And I wait until the cluster is ready less than <time_3> secs
And I create a local cluster
Then the data point in the cluster closest to "<reference>" is "<closest>"
Examples:
| data | time_1 | time_2 | time_3 | reference | closest |
"""
print(self.test_scenario3.__doc__)
examples = [
['data/iris.csv', '10', '10', '40',
'{"petal length": 1.4, "petal width": 0.2,'
' "sepal width": 3.0, "sepal length": 4.89,'
' "species": "Iris-setosa"}',
'{"distance": 0.001894153207990619, "data":'
' {"petal length": "1.4", "petal width": "0.2",'
' "sepal width": "3.0", "sepal length": "4.9",'
' "species": "Iris-setosa"}}'],
['data/spam_4w.csv', '10', '10', '40',
'{"Message": "mobile"}',
'{"distance": 0.0, "data":'
' {"Message": "mobile", "Type": "spam"}}']]
for example in examples:
print("\nTesting with:\n", example)
source_create.i_upload_a_file(self, example[0])
source_create.the_source_is_finished(self, example[1])
dataset_create.i_create_a_dataset(self)
dataset_create.the_dataset_is_finished_in_less_than(self, example[2])
cluster_create.i_create_a_cluster(self)
cluster_create.the_cluster_is_finished_in_less_than(self, example[3])
prediction_compare.i_create_a_local_cluster(self)
cluster_create.closest_in_cluster(self, example[4], example[5])
def test_scenario4(self):
"""
Scenario: Successfully getting the closest centroid in a cluster:
Given I create a data source uploading a "<data>" file
And I wait until the source is ready less than <time_1> secs
And I create a dataset
And I wait until the dataset is ready less than <time_2> secs
And I create a cluster
And I wait until the cluster is ready less than <time_3> secs
And I create a local cluster
Then the centroid in the cluster closest to "<reference>" is "<closest>"
Examples:
| data | time_1 | time_2 | time_3 | reference | closest |
"""
print(self.test_scenario4.__doc__)
examples = [
['data/spam_4w.csv', '10', '10', '40',
'{"Message": "free"}',
'000005']]
for example in examples:
print("\nTesting with:\n", example)
source_create.i_upload_a_file(self, example[0])
source_create.the_source_is_finished(self, example[1])
dataset_create.i_create_a_dataset(self)
dataset_create.the_dataset_is_finished_in_less_than(self, example[2])
cluster_create.i_create_a_cluster(self)
cluster_create.the_cluster_is_finished_in_less_than(self, example[3])
prediction_compare.i_create_a_local_cluster(self)
cluster_create.closest_centroid_in_cluster(self, example[4], example[5])
|
{
"content_hash": "2fa0a76215e0bd48639f2ae618f3bf05",
"timestamp": "",
"source": "github",
"line_count": 163,
"max_line_length": 115,
"avg_line_length": 49.80368098159509,
"alnum_prop": 0.5546932742054693,
"repo_name": "mmerce/python",
"id": "3f6480662d16fa7227d40d0fbef1aff38d8c7b3f",
"size": "8720",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bigml/tests/test_24_cluster_derived.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1531559"
}
],
"symlink_target": ""
}
|
import pytest
from sockeye_contrib import rouge
test_cases = [(
["this is a test", "another test case"], ["this is a test case", "another test case"], 0.9444444394753087,
0.928571423622449, 0.9338624338620563),
(["this is a single test case"], ["this is a single test case"], 0.999999995, 0.999999995,
0.9999999999995),
(["single test case"], ["another single test case"], 0.8571428522448981, 0.7999999952000001,
0.8241758241756372),
(["no overlap between sentences"], ["this is another test case"], 0.0, 0.0, 0.0),
(["exact match in the test case", "another exact match"],
["exact match in the test case", "another exact match"], 0.999999995, 0.999999995, 0.9999999999995)]
@pytest.mark.parametrize("hypotheses, references, rouge1_score, rouge2_score, rougel_score", test_cases)
def test_rouge_1(hypotheses, references, rouge1_score, rouge2_score, rougel_score):
rouge_score = rouge.rouge_1(hypotheses, references)
assert rouge_score == rouge1_score
@pytest.mark.parametrize("hypotheses, references, rouge1_score, rouge2_score, rougel_score", test_cases)
def test_rouge_2(hypotheses, references, rouge1_score, rouge2_score, rougel_score):
rouge_score = rouge.rouge_2(hypotheses, references)
assert rouge_score == rouge2_score
@pytest.mark.parametrize("hypotheses, references, rouge1_score, rouge2_score, rougel_score", test_cases)
def test_rouge_l(hypotheses, references, rouge1_score, rouge2_score, rougel_score):
rouge_score = rouge.rouge_l(hypotheses, references)
assert rouge_score == rougel_score
|
{
"content_hash": "5ecdc03f4df59c06cb8b0f5c645b0f3c",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 120,
"avg_line_length": 52,
"alnum_prop": 0.6820913461538461,
"repo_name": "awslabs/sockeye",
"id": "104d73c632f4e007933871847a13a63ee31674c5",
"size": "2236",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "test/unit/test_rouge.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "824"
},
{
"name": "Dockerfile",
"bytes": "1028"
},
{
"name": "JavaScript",
"bytes": "4196"
},
{
"name": "Python",
"bytes": "1058983"
},
{
"name": "Shell",
"bytes": "2912"
},
{
"name": "TeX",
"bytes": "2847"
}
],
"symlink_target": ""
}
|
"""This example displays country type names, codes, and whether the country
supports a secure server.
Tags: spotlight.getCountriesByCriteria
"""
__author__ = 'api.jdilallo@gmail.com (Joseph DiLallo)'
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfaClient
def main(client):
# Initialize appropriate service.
spotlight_service = client.GetSpotlightService(
'https://advertisersapitest.doubleclick.net', 'v1.20')
# Set search criteria.
country_search_criteria = {
'secure': 'false'
}
# Get countries.
results = spotlight_service.GetCountriesByCriteria(country_search_criteria)[0]
# Display country names, codes and secure server support information.
if results:
for country in results:
print ('Country with name \'%s\', country code \'%s\', and supports a'
' secure server? \'%s\'.'
% (country['name'], country['countryCode'], country['secure']))
else:
print 'No countries found for your criteria.'
if __name__ == '__main__':
# Initialize client object.
client = DfaClient(path=os.path.join('..', '..', '..', '..'))
main(client)
|
{
"content_hash": "d02f7b41271772171faf505daa339095",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 80,
"avg_line_length": 28.511627906976745,
"alnum_prop": 0.6655791190864601,
"repo_name": "donspaulding/adspygoogle",
"id": "988fb810f365da987170e835b005221befba9541",
"size": "1844",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "examples/adspygoogle/dfa/v1_20/get_countries.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3734067"
},
{
"name": "Shell",
"bytes": "603"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/dungeon/corellian_corvette/shared_corvette_search_neutral_destroy_03.iff"
result.attribute_template_id = -1
result.stfName("frn_n","frn_dented_drum")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "b28dff1a75dcd2ee3e800295d5321609",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 109,
"avg_line_length": 26.23076923076923,
"alnum_prop": 0.7126099706744868,
"repo_name": "anhstudios/swganh",
"id": "5eeb9d673f6769bec352db6288ed6fc3aea8ec51",
"size": "486",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/tangible/dungeon/corellian_corvette/shared_corvette_search_neutral_destroy_03.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
}
|
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.6.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1beta1ClusterRoleList(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, api_version=None, items=None, kind=None, metadata=None):
"""
V1beta1ClusterRoleList - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'api_version': 'str',
'items': 'list[V1beta1ClusterRole]',
'kind': 'str',
'metadata': 'V1ListMeta'
}
self.attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
self._api_version = api_version
self._items = items
self._kind = kind
self._metadata = metadata
@property
def api_version(self):
"""
Gets the api_version of this V1beta1ClusterRoleList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#resources
:return: The api_version of this V1beta1ClusterRoleList.
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""
Sets the api_version of this V1beta1ClusterRoleList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#resources
:param api_version: The api_version of this V1beta1ClusterRoleList.
:type: str
"""
self._api_version = api_version
@property
def items(self):
"""
Gets the items of this V1beta1ClusterRoleList.
Items is a list of ClusterRoles
:return: The items of this V1beta1ClusterRoleList.
:rtype: list[V1beta1ClusterRole]
"""
return self._items
@items.setter
def items(self, items):
"""
Sets the items of this V1beta1ClusterRoleList.
Items is a list of ClusterRoles
:param items: The items of this V1beta1ClusterRoleList.
:type: list[V1beta1ClusterRole]
"""
if items is None:
raise ValueError("Invalid value for `items`, must not be `None`")
self._items = items
@property
def kind(self):
"""
Gets the kind of this V1beta1ClusterRoleList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
:return: The kind of this V1beta1ClusterRoleList.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""
Sets the kind of this V1beta1ClusterRoleList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
:param kind: The kind of this V1beta1ClusterRoleList.
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""
Gets the metadata of this V1beta1ClusterRoleList.
Standard object's metadata.
:return: The metadata of this V1beta1ClusterRoleList.
:rtype: V1ListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""
Sets the metadata of this V1beta1ClusterRoleList.
Standard object's metadata.
:param metadata: The metadata of this V1beta1ClusterRoleList.
:type: V1ListMeta
"""
self._metadata = metadata
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
{
"content_hash": "88cd68bbe3ae800fbbd08cece98844fa",
"timestamp": "",
"source": "github",
"line_count": 192,
"max_line_length": 272,
"avg_line_length": 31.838541666666668,
"alnum_prop": 0.5918534271225258,
"repo_name": "skuda/client-python",
"id": "82150a5ee76e254aedc9e5ab815bef904c5b0bac",
"size": "6130",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kubernetes/client/models/v1beta1_cluster_role_list.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "5907789"
},
{
"name": "Shell",
"bytes": "8195"
}
],
"symlink_target": ""
}
|
from google.cloud import aiplatform_v1
def sample_update_model_deployment_monitoring_job():
# Create a client
client = aiplatform_v1.JobServiceClient()
# Initialize request argument(s)
model_deployment_monitoring_job = aiplatform_v1.ModelDeploymentMonitoringJob()
model_deployment_monitoring_job.display_name = "display_name_value"
model_deployment_monitoring_job.endpoint = "endpoint_value"
request = aiplatform_v1.UpdateModelDeploymentMonitoringJobRequest(
model_deployment_monitoring_job=model_deployment_monitoring_job,
)
# Make the request
operation = client.update_model_deployment_monitoring_job(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
# [END aiplatform_v1_generated_JobService_UpdateModelDeploymentMonitoringJob_sync]
|
{
"content_hash": "89108d6394a07ac582ff74572b3d22c0",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 82,
"avg_line_length": 32.96296296296296,
"alnum_prop": 0.7561797752808989,
"repo_name": "googleapis/python-aiplatform",
"id": "321b8f8b2ceab62f3ab47e69d5fb78ee0efab80a",
"size": "2320",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/aiplatform_v1_generated_job_service_update_model_deployment_monitoring_job_sync.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "23977004"
},
{
"name": "Shell",
"bytes": "30668"
}
],
"symlink_target": ""
}
|
"""
XML token classes representing mark up and text.
The property idiom used in this module is discussed here:
http://docs.python.org/release/3.2/library/functions.html#property
"""
import re
import sys
from collections import OrderedDict
from .rex import XML_SPE_, ElemTagRE_, AttRE_
__all__ = [
'Cdata', 'Comment', 'Doctype', 'Empty', 'End', 'Error', 'PI', 'Start', 'StartOrEmpty', 'Tag',
'Text', 'Token', 'XmlDecl',
'tokenize',
'RexlibError', 'MarkupError', 'WellformednessError', 'SecondaryParsingError'
]
#
# Token Classes
#
class Token(object):
"""
Abstract superclass for all token classes.
"""
__slots__ = ['xml']
template = NotImplemented
# TODO: Move encoding to tokenizer function(s).
encoding = sys.getdefaultencoding()
MAX_REPR_WIDTH = 60
def __repr__(self):
"""
Tokens longer than MAX_REPR_WIDTH will be sliced (with an elipsis
added to indicate that the whole token is not being displayed). This
is useful for keeping the display of Text tokens (which can be very
long) managable.
To change the slice size used for all tokens, set the class variable
Token.MAX_REPR_WIDTH. Setting it to None will cause the full token
to be displayed; the usual Python convention,
eval(repr(token)) == token, then holds.
"""
text = self.xml
MAX_REPR_WIDTH = self.MAX_REPR_WIDTH
if MAX_REPR_WIDTH is not None and len(text) > MAX_REPR_WIDTH:
text = '{0}...'.format(text[:MAX_REPR_WIDTH])
return '{self.__class__.__name__}({text!r})'.format(
self=self, text=text)
def is_a(self, token_class, *_not_used):
"""
Check whether the current token is an instance of class token_class.
token.is_a(Start) reads as "token is a Start?"
Positional arguments are used by some token classes (Tag: *names,
PI: *targets).
"""
return isinstance(self, token_class)
def reserialize(self):
"""
Update self.xml based on internal state.
"""
raise NotImplementedError
class Text(Token):
"""
Plain text: a run of text not containing the "<" character.
"""
__slots__ = []
def __init__(self, xml):
self.xml = xml
@property
def isspace(self):
"""isspace property: token is whitespace"""
return self.xml.isspace()
class Tag(Token):
"""
Abstract superclass for Start, End, and Empty.
"""
__slots__ = ['_name']
def is_a(self, token_class, *names):
return (isinstance(self, token_class)
and (not names or self.name in names))
@property
def name(self):
"""name property: the tag name"""
return self._name
@name.setter
def name(self, name):
self._name = name
self.reserialize()
# TODO: add basic namespace extraction support for attributes?
@property
def ns_prefix(self):
"""ns_prefix property: namespace prefix of qualified tag name"""
qname = self._name
return ':' in qname and qname.split(':')[0] or ''
@ns_prefix.setter
def ns_prefix(self, prefix):
qname = self._name
if ':' in qname:
old_prefix, name = qname.split(':', 1)
else:
old_prefix, name = '', qname
if old_prefix != prefix:
# Don't reserialize needlessly.
if prefix:
self._name = '{prefix}:{name}'.format(**locals())
else:
self._name = name
self.reserialize()
class StartOrEmpty(Tag):
"""
Abstract superclass for Start and Empty
"""
__slots__ = ['attributes']
def __init__(self, xml):
self.xml = xml
# Parse element name and attributes.
m = ElemTagRE_.search(xml)
self._name = m.group('name')
self.attributes = attributes = AttributeDict(token=self)
for m in AttRE_.finditer(m.group('attributes')):
attributes[m.group('attribute_name')] = m.group('attribute_value')[1:-1]
def __getitem__(self, attribute_name):
return self.attributes.get(attribute_name)
def __setitem__(self, attribute_name, xml):
self.attributes[attribute_name] = xml
def __delitem__(self, attribute_name):
del self.attributes[attribute_name]
def __contains__(self, attribute_name):
return attribute_name in self.attributes
def delete_attribute(self, attribute_name):
if attribute_name in self.attributes:
del self.attributes[attribute_name]
def set_attribute_order(self, attribute_order=[], sort=False):
"""
Re-order attributes based on attribute_order list. Any attributes
listed in attribute_order will appear first (and in that order); any
remaining attributes will follow in original order. If sort is set
to true, remaining attributes will appear in case-insensitive sorted
order.
"""
self.attributes.set_attribute_order(attribute_order, sort)
def reserialize(self):
self.xml = self.template.format(self=self)
class Start(StartOrEmpty):
"""
A start tag: <tag> or <tag att="val">
"""
__slots__ = []
template = '<{self.name}{self.attributes.to_xml}>'
def __init__(self, xml):
super(Start, self).__init__(xml)
class Empty(StartOrEmpty):
"""
An empty tag: <tag/> or <tag att="val"/>
"""
__slots__ = []
template = '<{self.name}{self.attributes.to_xml}/>'
def __init__(self, xml):
super(Empty, self).__init__(xml)
class End(Tag):
"""
An end tag: </tag>
"""
__slots__ = []
template = '</{self.name}>'
def __init__(self, xml):
self.xml = xml
self._name = xml.split('/')[1][:-1].strip()
def reserialize(self):
self.xml = self.template.format(self=self)
class Comment(Token):
"""
A comment: <!-- comment -->
"""
__slots__ = ['_content']
template = '<!--{self.content}-->'
def __init__(self, xml):
self.xml = xml
self._content = xml[4:-3]
def reserialize(self):
self.xml = self.template.format(self=self)
@property
def content(self):
"""content property: the content of the comment"""
return self._content
@content.setter
def content(self, s):
self._content = s
self.reserialize()
class PI(Token):
"""
A processing instruction: <?target instruction?>
"""
__slots__ = ['_target', '_instruction', '_pseudoattributes']
template = '<?{self.target}{self.space}{self.instruction}?>'
def __init__(self, xml):
self.xml = xml
self._pseudoattributes = None
# Parse PI into target and instruction
# XML: <?target instruction?> (endslice -> -2 for xml)
# SGML: <?target instruction> (endslice -> -1 for sgml)
endslice = -2 if xml.endswith('?>') else -1
try:
self._target, self._instruction = xml[2:endslice].split(None, 1)
except ValueError:
# The PI has a target but no instruction.
self._target = xml[2:endslice]
self._instruction = ''
self._target = self._target.strip()
self._instruction = self._instruction.strip()
def __getitem__(self, attribute_name):
"""
Wait to parse instruction for pseudoattributes until first attribute
lookup.
"""
if not self._pseudoattributes:
self._parse_pseudoattributes()
return self._pseudoattributes.get(attribute_name)
def __setitem__(self, attribute_name, value):
"""
Replace a pseudoattribute if it exists; otherwise append it to the
end of the instruction.
"""
if not self._pseudoattributes:
self._parse_pseudoattributes()
self._pseudoattributes[attribute_name] = value
span = self._pseudoattributes.spans.get(attribute_name)
if span:
i, j = span
l = list(self._instruction)
l[i:j] = ' {attribute_name}="{value}"'.format(**locals())
self._instruction = ''.join(l)
else:
self._instruction += ' {attribute_name}="{value}"'.format(**locals())
self._locate_pseudoattributes()
self.reserialize()
def __delitem__(self, attribute_name):
if not self._pseudoattributes:
self._parse_pseudoattributes()
del self._pseudoattributes[attribute_name]
span = self._pseudoattributes.spans[attribute_name]
i, j = span
l = list(self._instruction)
del l[i:j]
self._instruction = ''.join(l)
self._locate_pseudoattributes()
self.reserialize()
def __contains__(self, attribute_name):
if self._pseudoattributes is not None:
return attribute_name in self._pseudoattributes
else:
return False
def _parse_pseudoattributes(self):
"""
Find anything attribute-like in the PI instruction and store as
attributes.
"""
self._pseudoattributes = AttributeDict(token=self)
# Add a spans attribute to store the offsets of pseudoattributes.
self._pseudoattributes.spans = {}
self._locate_pseudoattributes()
def _locate_pseudoattributes(self):
"""
Find the offsets of pseudoattributes within self._instruction.
This method is called whenever a pseudoattribute is updated
or deleted.
"""
spans = self._pseudoattributes.spans
pseudoattributes = self._pseudoattributes
if pseudoattributes:
# Clear any previous values.
pseudoattributes.clear()
spans.clear()
# Regex AttRE_, requires initial whitespace to match, hence the added
# ' ', below.
for m in AttRE_.finditer(' ' + self._instruction):
attribute_name = m.group('attribute_name')
pseudoattributes[attribute_name] = m.group('attribute_value')[1:-1] # strip delimeters
# Get the span for the attribute using the 'attribute' named group,
# which includes the preceding whitespace.
i, j = m.span('attribute')
# Compensate span for initial space added above.
if i - 1 < 0:
# avoid negative slices
spans[attribute_name] = (0, j - 1)
else:
spans[attribute_name] = (i - 1, j - 1)
def reserialize(self):
"""
Normalization note: instruction will be normalized to remove initial
whitespace.
"""
self._instruction = self._instruction.lstrip()
self.xml = self.template.format(self=self)
def is_a(self, token_class, *targets):
return (isinstance(self, token_class)
and (not targets or self.target in targets))
@property
def target(self):
"""target property: the PI target"""
return self._target
@target.setter
def target(self, val):
self._target = val
self.reserialize()
@property
def instruction(self):
"""instruction property: the PI instruction"""
return self._instruction
@instruction.setter
def instruction(self, val):
self._instruction = val
self._pseudoattributes = None
self.reserialize()
@property
def space(self):
"""
space property: space necessary to separate target and instruction
(' ' if instructions is not empty, otherwise '').
"""
return ' ' if self.instruction.lstrip() else ''
class XmlDecl(PI):
"""
An XML Declaration: <?xml version="1.0" encoding="utf-8" ...?>
"""
__slots__ = []
def __init__(self, xml):
super(XmlDecl, self).__init__(xml)
encoding = self['encoding'] # the XmlDecl encoding pseudoattribute
if encoding:
Token.encoding = encoding
doctype_parser_ = re.compile("""\
(?xs)
<!DOCTYPE\s+(?P<document_element>\S+)
(?:(?:\s+(?P<id_type>SYSTEM|PUBLIC))(?:\s+(?P<delim>["'])
(?P<id_value>.*?)(?P=delim))?)?
(?:\s*\[(?P<internal_subset>.*)\])?
\s*>
""")
class Doctype(Token):
"""
A DOCTYPE declaration: <!DOCTYPE tag ...>
For the following example:
<!DOCTYPE x:body SYSTEM "/S:/xml/dtd/xhtml1-strict-prefixed.dtd"
[<!ENTITY abc "xyz">]>
self.document_element -> 'x:body'
self.id_type -> 'SYSTEM'
self.id_value -> '/S:/xml/dtd/xhtml1-strict-prefixed.dtd'
self.internal_subset -> '<!ENTITY abc "xyz">'
"""
__slots__ = ['_document_element', '_id_type', '_id_value',
'_internal_subset']
template = '<!DOCTYPE {0}>'
def __init__(self, xml):
self.xml = xml
m = doctype_parser_.search(xml)
if m:
d = m.groupdict()
self._document_element = d['document_element']
self._id_type = d['id_type'] or ''
self._id_value = d['id_value'] or ''
self._internal_subset = d['internal_subset'] or ''
else:
raise SecondaryParsingError(
'unexpected DOCTYPE found: {self.xml}'
.format(self=self)
)
def reserialize(self):
l = [self.document_element]
if self._id_type:
l.append(self._id_type)
if self._id_value:
l.append('"{self._id_value}"'.format(self=self))
if self._internal_subset:
l.append('[{self._internal_subset}]'.format(self=self))
self.xml = self.template.format(' '.join(l))
@property
def document_element(self):
"""document_element property: the document element name"""
return self._document_element
@document_element.setter
def document_element(self, val):
self._document_element = val
self.reserialize()
@property
def id_type(self):
"""id_type property: either "PUBLIC" or "SYSTEM" or """""
return self._id_type
@id_type.setter
def id_type(self, val):
self._id_type = val
self.reserialize()
@property
def id_value(self):
"""id_value property: a public URI or system path"""
return self._id_value
@id_value.setter
def id_value(self, val):
self._id_value = val
self.reserialize()
@property
def internal_subset(self):
"""internal_subset property: the internal DTD subset"""
return self._internal_subset
@internal_subset.setter
def internal_subset(self, val):
self._internal_subset = val
self.reserialize()
class Cdata(Token):
"""
A CDATA section: <![CDATA[ literal <markup/> ]]>
"""
__slots__ = ['_content']
template = '<![CDATA[{self.content}]]>'
def __init__(self, xml):
self.xml = xml
self._content = self.xml[9:-3]
def reserialize(self):
self.xml = self.template.format(self=self)
@property
def content(self):
return self._content
@content.setter
def content(self, content):
self._content = content
self.reserialize()
@property
def escaped_content(self):
return self._content.replace('&', '&').replace('<', '<')
def to_text_token(self):
"""
Escape markup characters and remove CDATA section delimiters, returning
a Text token.
"""
return Text(self.escaped_content)
class Error(Token):
"""
A markup error: Token starts with '<' but does not end with '>'.
"""
__slots__ = ['span', 'line', 'column']
def __init__(self, xml, span, line=None, column=None):
self.xml = xml
self.span = span # (start, end) position of token in original string
# TODO: Adjust tokenizer to add line number and column when desired.
# (Tokenizer option? Tokenizer subclass? Only calculate when/after an
# error is encountered?).
def reserialize(self):
pass
#
# Utility classes
#
class AttributeDict(OrderedDict):
"""
A dictionary that preserves the order in which attributes are added.
If the constructor is passed a dictionary with attributes, the order
for those attributes will be random; however, attributes added
subsequently will be ordered following the initial population of
attributes.
self.token is a reference back to the Start or Empty token that
instantiated the AttributeDict; it's used to trigger re-serialization
in the token when an attribute is changed via token.attributes.
"""
def __init__(self, d=None, token=None):
self.token = token
if d is None:
d = {}
OrderedDict.__init__(self, d)
def __setitem__(self, key, item):
OrderedDict.__setitem__(self, key, item)
if self.token:
self.token.reserialize()
def __missing__(self, key):
"""Set a default for missing key, rather than raising an exception."""
return ''
def __delitem__(self, key):
"""Remove items without raising exceptions."""
if key in self:
OrderedDict.__delitem__(self, key)
if self.token:
self.token.reserialize()
def set_attribute_order(self, attribute_order=None, sort=False):
"""
Re-order attributes based on attribute_order list. Any attributes
listed in attribute_order will appear first (and in that order); any
remaining attributes will follow in original order. If sort is set
to true, remaining attributes will appear in case-insensitive sorted
order.
"""
d = OrderedDict(self)
self.clear()
if attribute_order:
for attribute_name in attribute_order:
if attribute_name in d:
self[attribute_name] = d[attribute_name]
d.pop(attribute_name)
if sort and d:
# Do a case-insensitive sort on remaining attributes.
for key in sorted(d, key=str.lower):
self[key] = d[key]
elif d:
# If there are any remaining attribute names in d, add them now.
for key in d:
self[key] = d[key]
del d
if self.token:
self.token.reserialize()
@property
def to_xml(self):
"""
Serialize attribute dict to a string of attributes in the form
' attr1="value 1" attr2="value 2"'.
Normalization note: Attribute value delimiters will be normalized to
double quotes. Any double quotes appearing in attribute values are
escaped as ".
"""
try:
return ''.join(
' {attribute_name}="{attribute_value}"'
.format(
attribute_name=attribute_name,
attribute_value=attribute_value.replace('"', '"')
)
for attribute_name, attribute_value in self.items()
)
except AttributeError:
raise RexlibError(
'Attribute value was not a string: {self}'
.format(self=self)
)
def has_key_nocase(self, key):
"""A case-insensitive version of 'attribute_name' in self."""
return key.lower() in [k.lower() for k in self]
#
# Exceptions
#
class RexlibError(Exception):
"""Superclass for all rexlib exceptions."""
def __init__(self, val):
self.val = val
def __str__(self):
return self.val
class MarkupError(RexlibError):
"""Used for syntax errors in markup."""
def __str__(self):
return 'Syntax error in markup: "{self.val}"'.format(self=self)
class WellformednessError(RexlibError):
"""Used for tag-nesting errors."""
def __str__(self):
return 'Wellformedness error: "{self.val}"'.format(self=self)
class SecondaryParsingError(RexlibError):
"""Used to indicate errors during secondary parsing."""
def __str__(self):
return 'Secondary parsing error: "{self.val}"'.format(self=self)
#
# The tokenizer
#
def tokenize(input, SPE_=XML_SPE_, error_stream=sys.stderr):
"""
A generator function for classifying each token matched by the REX shallow
parsing expression.
Set SPE_=SGML_SPE_ to tokenize SGML.
"""
tokenizer = SPE_.finditer
for m in tokenizer(input):
xml = m.group(0)
if xml[0] != '<':
# Token is text
yield Text(xml)
else:
if xml[-1] == '>':
# Token is markup
c = xml[1]
if c not in '/!?':
if xml[-2] == '/':
yield Empty(xml)
else:
yield Start(xml)
elif c == '/':
yield End(xml)
elif c == '!':
if xml.startswith('<!--'):
yield Comment(xml)
elif xml[2] == '[':
yield Cdata(xml)
elif xml.startswith('<!DOCTYPE'):
yield Doctype(xml)
elif c == '?':
if xml.startswith('<?xml '):
yield XmlDecl(xml)
else:
yield PI(xml)
else:
# REX's error condition (a markup item not ending with '>').
yield Error(xml, span=m.span())
if error_stream:
error_stream.write(
pprint_error_context(m, 'Syntax error in markup'))
#def stream_tokenizer(fin, SPE_=XML_SPE_):
# """
# Tokenize a steam to match objects.
# - one token lookahead
# - allows strings to be split into multiple tokens (so that really
# long strings don't accumulate in memory)
#
# TODO: There's a bug in the code below that I haven't gone back to find
# yet, the symptom being overlaping tokens.
#
# """
# m_prev = None
# for s in stream_reader(fin):
# if m_prev:
# xml = m_prev.group(0)
# if xml.startswith('<'):
# if xml.endswith('>'):
# yield m_prev
# else:
# # Incomplete markup; prepend to next buffer.
# s = '%s%s' % (xml, s)
# else:
# # Allowing text to be yielded as multiple tokens.
# yield m_prev
# m_prev = None
#
# for m in SPE_.finditer(s):
# xml = m.group(0)
# if m_prev:
# yield m_prev
# m_prev = m
# if m_prev:
# yield m_prev
#
# Utility functions
#
def pprint_error_context(m, msg, context_size=30):
"""
Prettyprint a markup error's context.
"""
s = m.string
end = m.end()
start_ellipsis, end_ellipsis = '', ''
if end >= context_size:
start = end - context_size
if end != context_size:
start_ellipsis = '...'
else:
# Start must not be negative due to the special meaning of negative
# slice indexes.
start = 0
if end + context_size < len(s):
end_ellipsis = '...'
before = repr(
'{0}"{1}'.format(start_ellipsis, s[start:end])
)[1:-1]
after = repr(
'{0}"{1}' .format(s[end:end + context_size], end_ellipsis)
)[1:-1]
indent = ' ' * len(before)
return (
'\n {msg}:\n {before}\n {indent}{after}\n'
.format(**locals())
)
|
{
"content_hash": "bd86d27ce83abb802ebf80ea390e9e80",
"timestamp": "",
"source": "github",
"line_count": 833,
"max_line_length": 99,
"avg_line_length": 28.4921968787515,
"alnum_prop": 0.5606303193730513,
"repo_name": "jdnier/rexlib",
"id": "10b6d90d4d3aa21e220ee41e14722f86002414ea",
"size": "23734",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tokens.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "31156"
}
],
"symlink_target": ""
}
|
from pifpaf import drivers
class S3rverDriver(drivers.Driver):
DEFAULT_PORT = 4568
def __init__(self, port=DEFAULT_PORT, **kwargs):
"""Create a new s3rver instance."""
super(S3rverDriver, self).__init__(**kwargs)
self.port = port
@classmethod
def get_options(cls):
return [
{"param_decls": ["--port"],
"type": int,
"default": cls.DEFAULT_PORT,
"help": "port to use for s3rver"},
]
def _setUp(self):
super(S3rverDriver, self)._setUp()
c, _ = self._exec(
["s3rver",
"--directory", self.tempdir,
"--port", str(self.port)],
wait_for_line="(now listening on host|S3rver listening on)")
self.putenv("S3RVER_PORT", str(self.port))
self.putenv("URL", "s3://localhost:%d" % self.port)
self.putenv("HTTP_URL", "http://localhost:%d" % self.port)
|
{
"content_hash": "8c9c709ddc36358ca93977c4c4536c58",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 72,
"avg_line_length": 28.606060606060606,
"alnum_prop": 0.5307203389830508,
"repo_name": "jd/pifpaf",
"id": "1d855bfa983bd4d41a749417ab769b27a60f1f7c",
"size": "1490",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pifpaf/drivers/s3rver.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1689"
},
{
"name": "NASL",
"bytes": "1007"
},
{
"name": "Pascal",
"bytes": "681"
},
{
"name": "Python",
"bytes": "132049"
},
{
"name": "Shell",
"bytes": "426"
}
],
"symlink_target": ""
}
|
import numpy as np
from numpy import fromfile as np_ff
class head:
"""object version of 'header' function. Holds info about # of
particles, mass of each particle type, time and redshift, and any
flags in the header
contents:
self.npart
self.mass
self.time
self.flags
self.npartall
"""
def __init__(self, fname, byteswap=False):
"""read in the header.
fname -- either astring containing the file name or an open file object
byteswap=False -- if True, swap the endian-ness on read-in (on little
endian machines, will assume file is big endian and vice versa)"""
if byteswap:
import sys
# check system byte-ordering.
byteord=sys.byteorder
if byteord == 'little':
bo_mark='<'
else:
bo_mark='>'
else:
# use native byte-ordering
bo_mark='='
import types
# start by reading in header:
if type(fname) is types.StringType:
f=open(fname, 'rb')
elif type(fname) is not types.FileType:
raise TypeError('argument must either be an open file or ' +
'a string containing a file name')
else:
f=fname
pad=np.fromfile(f, count=1, dtype=bo_mark+'i4')
npart=np.fromfile(f, count=6, dtype=bo_mark+'i4')
massarr=np.fromfile(f, count=6, dtype=bo_mark+'f8')
time_reds=np.fromfile(f, count=2, dtype=bo_mark+'f8')
flag=np.fromfile(f, count=2, dtype=bo_mark+'i4')
npartall=np.fromfile(f, count=6, dtype=bo_mark+'i4')
# moreFlags[1] should hold number of files over which snapshot is split
moreFlags=np.fromfile(f, count=2, dtype=bo_mark+'i4')
cosmoParams=np.fromfile(f, count=4, dtype=bo_mark+'f8')
# header is 256 bytes, rest is empty space:
empty=np.fromfile(f, count=24, dtype=bo_mark+'i4')
# done reading header; read f77 record for end of header
pad=np.fromfile(f, count=1, dtype=bo_mark+'i4')
if type(fname) is types.StringType: f.close()
self.npart=npart
self.mass=massarr
self.time=time_reds
self.flags=flag
self.npartall=npartall
self.numFiles=moreFlags[1]
self.cosmoParams=cosmoParams
def read_snapshot(fname, byteswap=False, longIDs=True):
"""
get data from a GADGET snapshot.
INPUTS:
fname -- name of GADGET snapshot file
OPTIONAL INPUTS:
byteswap=False -- if True, swap the endian-ness on read-in (on little
endian machines, will assume file is big endian and vice versa)
longIDs=False -- if True, assume ids 8-byte ints, not 4-byte ints
OUTPUTS
=======
Returns outdat(???) or a dictionary.
"""
outdat = None
if outdat is not None:
if outdat not in ['pos', 'vel', 'ids', 'mass', 'hsml', 'dens', 'disp']:
raise ValueError('outdat must be one of ' +
'[pos, vel, ids, mass, hsml, dens, disp]')
try:
[xx == 'subidorder' for xx in fname.split('_')].index(True)
reshuffled=True
except ValueError:
reshuffled=False
if longIDs:
myIDtype='i8'
myIDlen=8
else:
myIDtype='i4'
myIDlen=4
if byteswap:
import sys
# check system byte-ordering.
byteord=sys.byteorder
if byteord == 'little':
bo_mark='<'
else:
bo_mark='>'
else:
# use native byte-ordering
bo_mark='='
if outdat == 'mass':
if max(massar) > 0:
raise TypeError('there is no mass block! Exiting')
f=open(fname, 'rb')
# start by reading in header:
ghead=head(f, byteswap=byteswap)
npt=ghead.npart.sum()
if outdat == 'mass':
if ghead.mass.max() > 0:
raise TypeError('there is no mass block! Exiting')
f.seek(4, 1)
pos=np.fromfile(f, count=npt*3, dtype=bo_mark + 'f4').reshape((npt, 3))
f.seek(8, 1)
vel=np.fromfile(f, count=npt*3, dtype=bo_mark + 'f4').reshape((npt, 3))
f.seek(8, 1)
ids=np.fromfile(f, count=npt, dtype=bo_mark + myIDtype)
if not reshuffled:
f.close()
if outdat is not None:
return locals()[outdat]
else:
#return pos, vel, ids
snap = {'pos': pos, 'vel': vel, 'ids': ids, 'numpart': ghead.npart}
return snap
else:
f.seek(8, 1)
hsml=np.fromfile(f, count=npt, dtype=bo_mark + 'f4')
f.seek(8, 1)
dens=np.fromfile(f, count=npt, dtype=bo_mark + 'f4')
if reshuffled:
f.seek(8, 1)
disp=np.fromfile(f, count=npt, dtype=bo_mark + 'f4')
f.close()
if outdat is not None:
return locals()[outdat]
else:
#return pos, vel, ids, hsml, dens, disp
snap = {'pos': pos, 'vel': vel, 'ids': ids,
'numpart': ghead.npart, 'dens': dens,
'smooth_lenght': hsml}
return snap
def read_subtab_file(snapnum, fileNum, longIDs=True,
flag_group_velDisp=False,
only_groups=False, only_subhalos=False,
snapbase="",
return_info=False, quiet=False):
"""
use for reading in one individual subhalo_tab file
OUTPUTS
=======
Returns a dictionary.
"""
# use dictionaries:
group={}
subhalo={}
if longIDs:
id_type='int64'
else:
id_type='int32'
# if there are more than 1000 snapshots, need to change 3 -> 4
dirnum='%03d' % snapnum
# fname='%s/groups_%s/subhalo_tab_%s.%d' % (snapbase, dirnum,
# dirnum, fileNum)
fname = 'subhalo_tab_067.999'
f=open(fname, 'rb')
nGr=np_ff(f, dtype='i', count=1)[0]
tot_nGr=np_ff(f, dtype='i', count=1)[0]
nIDs=np_ff(f, dtype='i', count=1)[0]
tot_nIDs=np_ff(f, dtype='int64', count=1)[0]
nTask=np_ff(f, dtype='i', count=1)[0]
nSubs=np_ff(f, dtype='i', count=1)[0]
tot_nSubs=np_ff(f, dtype='i', count=1)[0]
if return_info:
f.close()
info = {'tot_fof_in_file': nGr, 'tot_fof_in_snap': tot_nGr,
'particles_in_fof_in_ID': nIDs, 'particles_in_fof_in_snap': tot_nIDs,
'tot_files_in_snap': nTask, 'sh_in file':nSubs, 'sh_in_snap': tot_nSubs}
return info
if nGr > 0:
group['len'] = np_ff(f, 'i', nGr)
group['offset'] = np_ff(f, 'u4', nGr)
group['mass'] = np_ff(f, 'f', nGr)
group['pos'] = np_ff(f, 'f', nGr*3).reshape(nGr, 3)
group['m_mean200'] = np_ff(f, 'f', nGr)
group['r_mean200'] = np_ff(f, 'f', nGr)
group['m_crit200'] = np_ff(f, 'f', nGr)
group['r_crit200'] = np_ff(f, 'f', nGr)
group['m_tophat'] = np_ff(f, 'f', nGr)
group['r_tophat'] = np_ff(f, 'f', nGr)
if flag_group_velDisp:
group['velDisp_mean200'] = np_ff(f, 'f', nGr)
group['velDisp_crit200'] = np_ff(f, 'f', nGr)
group['velDisp_tophat'] = np_ff(f, 'f', nGr)
group['contaminationCount'] = np_ff(f, 'i', nGr)
group['contaminationMass'] = np_ff(f, 'f', nGr)
group['n_subs'] = np_ff(f, 'i', nGr)
group['firstSub'] = np_ff(f, 'i', nGr)
if not only_groups:
if nSubs > 0:
subhalo['len'] = np_ff(f, 'i', nSubs)
subhalo['offset'] = np_ff(f, 'u4', nSubs)
subhalo['parent'] = np_ff(f, 'i', nSubs)
subhalo['mass'] = np_ff(f, 'f', nSubs)
subhalo['pos'] = np_ff(f, 'f', nSubs*3).reshape((nSubs, 3))
subhalo['vel'] = np_ff(f, 'f', nSubs*3).reshape((nSubs, 3))
subhalo['CM'] = np_ff(f, 'f', nSubs*3).reshape((nSubs, 3))
subhalo['spin'] = np_ff(f, 'f', nSubs*3).reshape((nSubs, 3))
subhalo['velDisp'] = np_ff(f, 'f', nSubs)
subhalo['vMax'] = np_ff(f, 'f', nSubs)
subhalo['rMax'] = np_ff(f, 'f', nSubs)
subhalo['halfMassRad'] = np_ff(f, 'f', nSubs)
subhalo['mostBoundID'] = np_ff(f, id_type, nSubs)
subhalo['GrNr'] = np_ff(f, 'i', nSubs)
f.close()
if not quiet:
print
print "Total num of groups =", tot_nGr
print "Total num of subgroups =", tot_nSubs, '\n'
print "Number of groups in file %d: %d" % (fileNum, nGr)
print "Number of subgroups in file %d: %d\n" % (fileNum, nSubs)
if only_groups:
return group
elif only_subhalos:
return subhalo
else:
out = {'group': group, 'subhalo': subhalo}
return out
def read_reshuffled_group(snapnum, groupNum, vel=False, longIDs=True,
outdatType='pos',
sfrFlag=False, bhFlag=False,
snapbase='',
snapname='snap_newMillen'):
"""
get the positions or velocities from a reshuffled snapshot file.
Note: need to work on subtab files (NOT grouptab files) throughout
OUTPUT
======
Returns an array?
"""
#import gadget as g
# cumulative number of groups read:
cumNgroups=0
cumNids=0
cumNpcls=0
# tells how many IDs have come before target group, will be used for
# correcting overflow.
target_nIDs=0
# how many particles are in target halo?
target_np=0
fileNum=0
# offsets overflow because of u4:
maxOffset=2**32
generalData=read_subtab_file(snapnum, fileNum, longIDs=longIDs,
snapbase=snapbase,
quiet=True, return_info=True)
# number of files:
nTasks=generalData['particles_in_fof_in_snap']
# will hold cumulative number of groups for each group_tab file
lenArr=np.zeros(nTasks, dtype='int32')
# will hold cumulative number of ids for each snapshot_subidorder file
idArr=np.zeros(nTasks, dtype='int64')
for i in range(nTasks):
groupData=read_subtab_file(snapnum, i, longIDs=longIDs,
snapbase=snapbase,
quiet=True, return_info=True)
nGroups=groupData[0]
nIDs=groupData[2]
if cumNgroups <= groupNum:
nPcls=read_subtab_file(snapnum, i, longIDs=longIDs,
snapbase=snapbase, quiet=True)[0]['len']
nPcls_file=nPcls.astype('int64').sum()
# in this case, target group is in the current file
if nGroups + cumNgroups > groupNum:
sum1=nPcls[:groupNum-cumNgroups].astype('int64').sum()
target_nIDs=cumNpcls + sum1
target_np=nPcls[groupNum-cumNgroups]
cumNpcls += nPcls_file
lenArr[i] = cumNgroups
idArr[i] = cumNids
cumNgroups += nGroups
cumNids += nIDs.astype('int64')
if cumNgroups != generalData[tot_fof_in_file]:
raise ValueError('problem with number of groups!')
if cumNids != generalData[particles_in_fof_in_ID]:
raise ValueError('problem with number of IDs!')
subFile=lenArr.searchsorted(groupNum, 'right')-1
# find offset entry of relevant group:
offset=read_subtab_file(snapnum, subFile, longIDs=longIDs,
snapbase=snapbase,
quiet=True)[0]['offset'][groupNum-lenArr[subFile]]
# correct offset for overflow
offset += (target_nIDs // maxOffset) * maxOffset
numSnapFiles=head(snapbase + 'snapdir_%03d/%s_subidorder_%03d.%d'
% (snapnum, snapname, snapnum, 0)).numFiles
npArr=np.zeros(numSnapFiles, dtype='int64')
for i in range(numSnapFiles):
npArr[i]=head(snapbase + 'snapdir_%03d/%s_subidorder_%03d.%d'
% (snapnum, snapname, snapnum, i)).npart[1]
npArr=npArr.cumsum()
snapFile1=npArr.searchsorted(offset, 'right')
snapFile2=npArr.searchsorted(offset + target_np, 'right')
if outdatType not in ['pos', 'vel', 'ids', 'mass', 'hsml', 'dens', 'disp']:
raise ValueError('outdat must be one of ' +
'[pos, vel, ids, mass, hsml, dens, disp]')
# handle first file differently from others:
if offset < npArr[0]:
startId=offset
else:
startId=offset - npArr[snapFile1]
# startId=offset - npArr[snapFile1]
# in this case, all IDs are in one file:
# mysnap='%ssnapdir_%03d/%s_subidorder_%03d.%d' % (snapbase, snapnum, snapname,
# snapnum, snapFile1)
mysnap='%s_subidorder_%03d.%d' % (snapbase, snapnum, snapname,
snapnum, snapFile1)
print target_np
if snapFile1 == snapFile2:
outdat=read_snapshot(mysnap, longIDs=longIDs,
outdat=outdatType)[startId:startId+target_np]
else:
# Handles groups split over multiple snapshots; should be working.
print 'using snapshot files %d-%d' % (snapFile1, snapFile2)
outdat=np.zeros((target_np,3), dtype='float32')
firstNum=npArr[snapFile1]-offset
outdat[:firstNum]=read_snapshot(mysnap, longIDs=longIDs,
outdat=outdatType)[startId:]
counter=firstNum
fileToDo=1
while (fileToDo < snapFile2-snapFile1):
mysnap='%ssnapdir_%03d/%s_subidorder_%03d.%d' % (snapbase, snapnum,
snapname, snapnum,
snapFile1+fileToDo)
numInFile=npArr[snapFile1+fileToDo]-npArr[snapFile1+fileToDo-1]
outdat[counter:counter+numInFile]=read_snapshot(mysnap,
longIDs=longIDs,
outdat=outdatType)
counter += numInFile
fileToDo += 1
mysnap='%ssnapdir_%03d/%s_subidorder_%03d.%d' % (snapbase, snapnum,
snapname,
snapnum, snapFile2)
outdat[counter:]=read_snapshot(mysnap, longIDs=longIDs,
outdat=outdatType)[:target_np-counter]
return outdat
|
{
"content_hash": "66586c789894f37de39bd64ef1c99d4c",
"timestamp": "",
"source": "github",
"line_count": 394,
"max_line_length": 95,
"avg_line_length": 36.870558375634516,
"alnum_prop": 0.5378949542231707,
"repo_name": "brunetto/MasterThesisCode",
"id": "1af4901a5388064694298a24dd075f05716e327b",
"size": "14527",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "master_code/otherMiscCode/tools_da_riordinare/read_fofGroups.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "4029"
},
{
"name": "FORTRAN",
"bytes": "1130"
},
{
"name": "Python",
"bytes": "297674"
},
{
"name": "Shell",
"bytes": "267"
}
],
"symlink_target": ""
}
|
from plugin import Plugin
import re
class React(Plugin):
"""
Adds Slack emoji reactions to messages based on certain keywords
"""
def on_event(self, bot, event, response):
text = event['text']
response.update(timestamp=event['ts'])
# Add a cloud emoji if anyone mentions Overcast Network
if re.search(r'(overcast|ocn)', text, re.IGNORECASE):
response.update(name='cloud')
# Add more reactions here!
# Post reaction if we have an emoji set
if response.get('name'):
bot.sc.api_call('reactions.add', **response)
|
{
"content_hash": "2056185dc84e19a259084947f7b9dd00",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 68,
"avg_line_length": 27.772727272727273,
"alnum_prop": 0.6202945990180033,
"repo_name": "Electroid/nimbus",
"id": "9b8ce0a5a9b508338303a8b560e2716ac795c313",
"size": "611",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugins/react.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25987"
}
],
"symlink_target": ""
}
|
from docutils import core, nodes
from docutils.parsers.rst import directives, Directive
from pygments import highlight
from pygments.formatters import HtmlFormatter
from pygments.lexers import get_lexer_by_name, TextLexer
class Pygments(Directive):
""" Source code syntax hightlighting for ReST syntax."""
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {
'linenos': directives.flag,
'emphasize-lines': directives.unchanged_required,
}
has_content = True
def run(self):
self.assert_has_content()
try:
lexer = get_lexer_by_name(self.arguments[0])
except ValueError:
# no lexer found - use the text one instead of an exception
lexer = TextLexer()
args = {'noclasses': False}
if 'linenos' in self.options:
args['linenos'] = 'table'
if 'emphasize-lines' in self.options:
args['hl_lines'] = self.options['emphasize-lines'].split(',')
formatter = HtmlFormatter(**args)
parsed = highlight(u'\n'.join(self.content), lexer, formatter)
return [nodes.raw('', parsed, format='html')]
directives.register_directive('sourcecode', Pygments)
directives.register_directive('code-block', Pygments)
def html_parts(input_string, source_path=None, destination_path=None,
input_encoding='unicode', doctitle=1, initial_header_level=1):
"""
Given an input string, returns a dictionary of HTML document parts.
Dictionary keys are the names of parts, and values are Unicode strings;
encoding is up to the client.
Parameters:
- `input_string`: A multi-line text string; required.
- `source_path`: Path to the source file or object. Optional, but useful
for diagnostic output (system messages).
- `destination_path`: Path to the file or object which will receive the
output; optional. Used for determining relative paths (stylesheets,
source links, etc.).
- `input_encoding`: The encoding of `input_string`. If it is an encoded
8-bit string, provide the correct encoding. If it is a Unicode string,
use "unicode", the default.
- `doctitle`: Disable the promotion of a lone top-level section title to
document title (and subsequent section title to document subtitle
promotion); enabled by default.
- `initial_header_level`: The initial level for header elements (e.g. 1
for "<h1>").
"""
overrides = {
'input_encoding': input_encoding,
'doctitle_xform': doctitle,
'initial_header_level': initial_header_level,
'report_level': 5
}
parts = core.publish_parts(
source=input_string, source_path=source_path,
destination_path=destination_path,
writer_name='html', settings_overrides=overrides)
return parts
def html_body(input_string, source_path=None, destination_path=None,
input_encoding='unicode', doctitle=1, initial_header_level=1):
"""
Given an input string, returns an HTML fragment as a string.
The return value is the contents of the <body> element.
Parameters (see `html_parts()` for the remainder):
- `output_encoding`: The desired encoding of the output. If a Unicode
string is desired, use the default value of "unicode" .
"""
parts = html_parts(
input_string=input_string, source_path=source_path,
destination_path=destination_path,
input_encoding=input_encoding, doctitle=doctitle,
initial_header_level=initial_header_level)
fragment = parts['html_body']
return fragment
|
{
"content_hash": "afc5116c7980de34656d9a0455c988b8",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 77,
"avg_line_length": 37.45918367346939,
"alnum_prop": 0.6676654862435304,
"repo_name": "exciteresearch/landslide",
"id": "1bcdcdfefbfd35d766ad71b97e6c49ba563e1668",
"size": "3696",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "landslide/rst.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "82877"
},
{
"name": "HTML",
"bytes": "13733"
},
{
"name": "JavaScript",
"bytes": "48966"
},
{
"name": "Python",
"bytes": "53542"
}
],
"symlink_target": ""
}
|
import datetime
import traceback
from sqlalchemy.exc import IntegrityError, ConcurrentModificationError
from voodoo.typechecker import typecheck
from weblab.core.coordinator.sql.model import PostReservationRetrievedData
import weblab.core.coordinator.status as WSS
class PostReservationDataManager(object):
def __init__(self, session_maker, time_provider):
self._session_maker = session_maker
self.time_provider = time_provider
@typecheck(basestring, datetime.datetime, datetime.datetime, basestring)
def create(self, reservation_id, date, expiration_date, initial_data):
session = self._session_maker()
try:
registry = PostReservationRetrievedData(reservation_id = reservation_id, finished = False, date = date, expiration_date = expiration_date, initial_data = initial_data, end_data = None)
session.add(registry)
session.commit()
finally:
session.close()
def delete(self, reservation_id):
session = self._session_maker()
try:
reservation = session.query(PostReservationRetrievedData).filter(PostReservationRetrievedData.reservation_id == reservation_id).first()
if reservation is None:
return
session.delete(reservation)
session.commit()
finally:
session.close()
def finish(self, reservation_id, end_data):
session = self._session_maker()
try:
reservation = session.query(PostReservationRetrievedData).filter(PostReservationRetrievedData.reservation_id == reservation_id).first()
if reservation is None:
return
reservation.finished = True
reservation.end_data = end_data
session.add(reservation)
session.commit()
finally:
session.close()
def find(self, reservation_id):
session = self._session_maker()
try:
reservation = session.query(PostReservationRetrievedData).filter(PostReservationRetrievedData.reservation_id == reservation_id).first()
if reservation is None:
return None
return WSS.PostReservationStatus(reservation_id, reservation.finished, reservation.initial_data, reservation.end_data)
finally:
session.close()
##############################################################
#
# Clean expired PostReservationRetrievedData
#
def clean_expired(self):
session = self._session_maker()
try:
found = False
for expired_data in session.query(PostReservationRetrievedData).filter(PostReservationRetrievedData.expiration_date < self.time_provider.get_datetime()).all():
session.delete(expired_data)
found = True
if found:
try:
session.commit()
except (ConcurrentModificationError, IntegrityError):
# Somebody else did it
traceback.print_exc()
finally:
session.close()
def _clean(self):
session = self._session_maker()
try:
for registry in session.query(PostReservationRetrievedData).all():
session.delete(registry)
session.commit()
finally:
session.close()
|
{
"content_hash": "6222c5cae3b954ee875dd41a4e8513be",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 196,
"avg_line_length": 37.30769230769231,
"alnum_prop": 0.617378497790869,
"repo_name": "ganeshgore/myremolab",
"id": "99120953666e1dc1685b47a1842a10a604e163b4",
"size": "3783",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "server/src/weblab/core/coordinator/sql/post_reservation.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "ASP",
"bytes": "4785"
},
{
"name": "C#",
"bytes": "265761"
},
{
"name": "CSS",
"bytes": "39653"
},
{
"name": "Java",
"bytes": "689284"
},
{
"name": "JavaScript",
"bytes": "74198"
},
{
"name": "PHP",
"bytes": "97324"
},
{
"name": "Python",
"bytes": "5335681"
},
{
"name": "Shell",
"bytes": "794"
},
{
"name": "VHDL",
"bytes": "1372"
}
],
"symlink_target": ""
}
|
from flask import Flask, request, render_template, redirect, url_for, jsonify
import requests
import json
import os
import xmltodict
app = Flask(__name__)
@app.route("/")
def form():
logs = sorted(os.listdir("/home/ubuntu/log"),reverse=True)
eventTime = []
alarmCategory = []
acquisitionMethod = []
emissionType = []
usage = []
threshold = []
nsfName = []
severity = []
for log in logs:
s = open("/home/ubuntu/log/"+log,"rb")
data = xmltodict.parse(s)
eventTime.append(data["notification"]["eventTime"])
alarmCategory.append(data["notification"]["i2nsf-system-detection-alarm"]["alarm-category"]["#text"])
acquisitionMethod.append(data["notification"]["i2nsf-system-detection-alarm"]["acquisition-method"]["#text"])
emissionType.append(data["notification"]["i2nsf-system-detection-alarm"]["emission-type"]["#text"])
usage.append(data["notification"]["i2nsf-system-detection-alarm"]["usage"])
threshold.append(data["notification"]["i2nsf-system-detection-alarm"]["threshold"])
nsfName.append(data["notification"]["i2nsf-system-detection-alarm"]["nsf-name"])
severity.append(data["notification"]["i2nsf-system-detection-alarm"]["severity"])
return render_template('notifDetail.html',
eventTime=eventTime,
alarmCategory=alarmCategory,
acquisitionMethod=acquisitionMethod,
emissionType=emissionType,
usage=usage,
threshold=threshold,
nsfName=nsfName,
severity=severity
)
# return render_template('notification.html',notification_list=log)
@app.route("/<log>")
def getData(log):
s = open("/home/ubuntu/log/"+log,"rb")
data = xmltodict.parse(s)
return render_template('notifDetail.html',
eventTime=data["notification"]["eventTime"],
alarmCategory=data["notification"]["i2nsf-system-detection-alarm"]["alarm-category"]["#text"],
acquisitionMethod=data["notification"]["i2nsf-system-detection-alarm"]["acquisition-method"]["#text"],
emissionType=data["notification"]["i2nsf-system-detection-alarm"]["emission-type"]["#text"],
usage=data["notification"]["i2nsf-system-detection-alarm"]["usage"],
threshold=data["notification"]["i2nsf-system-detection-alarm"]["threshold"],
nsfName=data["notification"]["i2nsf-system-detection-alarm"]["nsf-name"],
severity=data["notification"]["i2nsf-system-detection-alarm"]["severity"]
)
#@app.route("/", methods=['POST'])
#def my_form_post():
# text = request.form['text']
# return redirect(url_for('get_interface',iface=text))
@app.route("/empty")
def not_found():
return ("ERROR NOT FOUND")
if __name__ == '__main__':
app.run(host='10.0.0.14',port=8000)
|
{
"content_hash": "56663476c7e6024e8b02ebafb228b5f9",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 111,
"avg_line_length": 37.9264705882353,
"alnum_prop": 0.7099651027530051,
"repo_name": "kimjinyong/i2nsf-framework",
"id": "d5239176d675b3f26b8b510737cb80dbc0617e3b",
"size": "2579",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Hackathon-112/analyzer/web.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "4396520"
},
{
"name": "C++",
"bytes": "9389"
},
{
"name": "CSS",
"bytes": "51736"
},
{
"name": "Dockerfile",
"bytes": "3839"
},
{
"name": "Emacs Lisp",
"bytes": "24812"
},
{
"name": "Erlang",
"bytes": "1364078"
},
{
"name": "HTML",
"bytes": "42486541"
},
{
"name": "Hack",
"bytes": "6349"
},
{
"name": "Java",
"bytes": "7976"
},
{
"name": "JavaScript",
"bytes": "533000"
},
{
"name": "Makefile",
"bytes": "401170"
},
{
"name": "PHP",
"bytes": "164007"
},
{
"name": "Perl",
"bytes": "2188"
},
{
"name": "Python",
"bytes": "3004949"
},
{
"name": "QMake",
"bytes": "360"
},
{
"name": "Roff",
"bytes": "3906372"
},
{
"name": "Shell",
"bytes": "83872"
},
{
"name": "XSLT",
"bytes": "167018"
}
],
"symlink_target": ""
}
|
__author__ = 'max'
import theano.tensor as T
from lasagne import init
from lasagne.layers import MergeLayer
__all__ = [
"ChainCRFLayer",
"TreeAffineCRFLayer",
"TreeBiAffineCRFLayer",
]
class ChainCRFLayer(MergeLayer):
"""
ChainCRFLayer(incoming, num_labels, mask_input=None, W=init.GlorotUniform(), b=init.Constant(0.), **kwargs)
Parameters
----------
incoming : a :class:`lasagne.layers.Layer` instance or a tuple
The layer feeding into this layer, or the expected input shape.
The output of this layer should be a 3D tensor with shape
``(batch_size, input_length, num_input_features)``
num_labels : int
The number of labels of the crf layer
mask_input : :class:`lasagne.layers.Layer`
Layer which allows for a sequence mask to be input, for when sequences
are of variable length. Default `None`, which means no mask will be
supplied (i.e. all sequences are of the same length).
W : Theano shared variable, expression, numpy array or callable
Initial value, expression or initializer for the weights.
These should be a tensor with shape ``(num_inputs, num_units)``,
where ``num_inputs`` is the size of the second dimension of the input.
See :func:`lasagne.utils.create_param` for more information.
b : Theano shared variable, expression, numpy array, callable or ``None``
Initial value, expression or initializer for the biases. If set to
``None``, the layer will have no biases. Otherwise, biases should be
a 1D array with shape ``(num_units,)
"""
def __init__(self, incoming, num_labels, mask_input=None, W=init.GlorotUniform(), b=init.Constant(0.), **kwargs):
# This layer inherits from a MergeLayer, because it can have two
# inputs - the layer input, and the mask.
# We will just provide the layer input as incomings, unless a mask input was provided.
self.input_shape = incoming.output_shape
incomings = [incoming]
self.mask_incoming_index = -1
if mask_input is not None:
incomings.append(mask_input)
self.mask_incoming_index = 1
super(ChainCRFLayer, self).__init__(incomings, **kwargs)
self.num_labels = num_labels + 1
self.pad_label_index = num_labels
num_inputs = self.input_shape[2]
self.W = self.add_param(W, (num_inputs, self.num_labels, self.num_labels), name="W")
if b is None:
self.b = None
else:
self.b = self.add_param(b, (self.num_labels, self.num_labels), name="b", regularizable=False)
def get_output_shape_for(self, input_shapes):
input_shape = input_shapes[0]
return input_shape[0], input_shape[1], self.num_labels, self.num_labels
def get_output_for(self, inputs, **kwargs):
"""
Compute this layer's output function given a symbolic input variable.
Parameters
----------
:param inputs: list of theano.TensorType
`inputs[0]` should always be the symbolic input variable. When
this layer has a mask input (i.e. was instantiated with
`mask_input != None`, indicating that the lengths of sequences in
each batch vary), `inputs` should have length 2, where `inputs[1]`
is the `mask`. The `mask` should be supplied as a Theano variable
denoting whether each time step in each sequence in the batch is
part of the sequence or not. `mask` should be a matrix of shape
``(n_batch, n_time_steps)`` where ``mask[i, j] = 1`` when ``j <=
(length of sequence i)`` and ``mask[i, j] = 0`` when ``j > (length
of sequence i)``.
:return: theano.TensorType
Symbolic output variable.
"""
input = inputs[0]
mask = None
if self.mask_incoming_index > 0:
mask = inputs[self.mask_incoming_index]
# compute out by tensor dot ([batch, length, input] * [input, num_label, num_label]
# the shape of out should be [batch, length, num_label, num_label]
out = T.tensordot(input, self.W, axes=[[2], [0]])
if self.b is not None:
b_shuffled = self.b.dimshuffle('x', 'x', 0, 1)
out = out + b_shuffled
if mask is not None:
mask_shuffled = mask.dimshuffle(0, 1, 'x', 'x')
out = out * mask_shuffled
return out
class TreeAffineCRFLayer(MergeLayer):
"""
TreeAffineCRFLayer(incoming, num_labels, mask_input=None, W_h=init.GlorotUniform(), W_c=init.GlorotUniform(),
b=init.Constant(0.), **kwargs)
"""
def __init__(self, incoming, num_labels, mask_input=None, W_h=init.GlorotUniform(), W_c=init.GlorotUniform(),
b=init.Constant(0.), **kwargs):
# This layer inherits from a MergeLayer, because it can have two
# inputs - the layer input, and the mask.
# We will just provide the layer input as incomings, unless a mask input was provided.
self.input_shape = incoming.output_shape
incomings = [incoming]
self.mask_incoming_index = -1
if mask_input is not None:
incomings.append(mask_input)
self.mask_incoming_index = 1
super(TreeAffineCRFLayer, self).__init__(incomings, **kwargs)
self.num_labels = num_labels
dim_inputs = self.input_shape[2]
# add parameters
self.W_h = self.add_param(W_h, (dim_inputs, self.num_labels), name='W_h')
self.W_c = self.add_param(W_c, (dim_inputs, self.num_labels), name='W_c')
if b is None:
self.b = None
else:
self.b = self.add_param(b, (self.num_labels,), name='b', regularizable=False)
def get_output_shape_for(self, input_shapes):
"""
:param input_shapes:
:return: the shape of output [batch_size, length, length, num_labels]
"""
input_shape = input_shapes[0]
return input_shape[0], input_shape[1], input_shape[1], self.num_labels
def get_output_for(self, inputs, **kwargs):
"""
:param inputs: inputs: list of theano.TensorType
`inputs[0]` should always be the symbolic input variable. When
this layer has a mask input (i.e. was instantiated with
`mask_input != None`, indicating that the lengths of sequences in
each batch vary), `inputs` should have length 2, where `inputs[1]`
is the `mask`. The `mask` should be supplied as a Theano variable
denoting whether each time step in each sequence in the batch is
part of the sequence or not. `mask` should be a matrix of shape
``(n_batch, n_time_steps)`` where ``mask[i, j] = 1`` when ``j <=
(length of sequence i)`` and ``mask[i, j] = 0`` when ``j > (length
of sequence i)``.
:return: theano.TensorType
Symbolic output variable.
"""
input = inputs[0]
mask = None
if self.mask_incoming_index > 0:
mask = inputs[self.mask_incoming_index]
# compute head part by tensor dot ([batch, length, dim] * [dim, num_label]
# the shape of s_h should be [batch, length, num_label]
s_h = T.tensordot(input, self.W_h, axes=[[2], [0]])
if self.b is not None:
b_shuffled = self.b.dimshuffle('x', 'x', 0)
s_h = s_h + b_shuffled
# compute child part by tensor dot ([batch, length, dim] * [dim, num_label]
# the shape of s_c should be [batch, length, num_label]
s_c = T.tensordot(input, self.W_c, axes=[[2], [0]])
# compute out
input_shape = input.shape
# output shape = [batch, length, length, num_label]
out = T.cast(T.alloc(0.0, input_shape[0], input_shape[1], input_shape[1], self.num_labels), 'floatX')
out = out + s_h.dimshuffle(0, 1, 'x', 2)
out = out + s_c.dimshuffle(0, 'x', 1, 2)
if mask is not None:
mask_shuffled = mask.dimshuffle(0, 1, 'x', 'x')
out = out * mask_shuffled
mask_shuffled = mask.dimshuffle(0, 'x', 1, 'x')
out = out * mask_shuffled
return out
class TreeBiAffineCRFLayer(MergeLayer):
"""
TreeBiAffineCRFLayer(incoming, num_labels, mask_input=None, U=init.GlorotUniform(), W_h=init.GlorotUniform(),
b=init.Constant(0.), **kwargs)
"""
def __init__(self, incoming, num_labels, mask_input=None, U=init.GlorotUniform(), W_h=init.GlorotUniform(),
W_c=init.GlorotUniform(), b=init.Constant(0.), **kwargs):
# This layer inherits from a MergeLayer, because it can have two
# inputs - the layer input, and the mask.
# We will just provide the layer input as incomings, unless a mask input was provided.
self.input_shape = incoming.output_shape
incomings = [incoming]
self.mask_incoming_index = -1
if mask_input is not None:
incomings.append(mask_input)
self.mask_incoming_index = 1
super(TreeBiAffineCRFLayer, self).__init__(incomings, **kwargs)
self.num_labels = num_labels
dim_inputs = self.input_shape[2]
# add parameters
self.U = self.add_param(U, (dim_inputs, dim_inputs, self.num_labels), name='U')
self.W_h = None if W_h is None else self.add_param(W_h, (dim_inputs, self.num_labels), name='W_h')
self.W_c = None if W_c is None else self.add_param(W_c, (dim_inputs, self.num_labels), name='W_c')
if b is None:
self.b = None
else:
self.b = self.add_param(b, (self.num_labels,), name='b', regularizable=False)
def get_output_shape_for(self, input_shapes):
"""
:param input_shapes:
:return: the shape of output [batch_size, length, length, num_labels]
"""
input_shape = input_shapes[0]
return input_shape[0], input_shape[1], input_shape[1], self.num_labels
def get_output_for(self, inputs, **kwargs):
"""
:param inputs: inputs: list of theano.TensorType
`inputs[0]` should always be the symbolic input variable. When
this layer has a mask input (i.e. was instantiated with
`mask_input != None`, indicating that the lengths of sequences in
each batch vary), `inputs` should have length 2, where `inputs[1]`
is the `mask`. The `mask` should be supplied as a Theano variable
denoting whether each time step in each sequence in the batch is
part of the sequence or not. `mask` should be a matrix of shape
``(n_batch, n_time_steps)`` where ``mask[i, j] = 1`` when ``j <=
(length of sequence i)`` and ``mask[i, j] = 0`` when ``j > (length
of sequence i)``.
:return: theano.TensorType
Symbolic output variable.
"""
input = inputs[0]
mask = None
if self.mask_incoming_index > 0:
mask = inputs[self.mask_incoming_index]
# compute the bi-affine part
# first via tensor dot ([batch, length, dim] * [dim, dim, num_label])
# output shape = [batch, length, dim, num_label]
out = T.tensordot(input, self.U, axes=[[2], [0]])
# second via tensor dot ([batch, length, dim, num_label] * [batch, dim, length)
# output shape = [batch, length, length, num_label]
out = T.batched_tensordot(out, input.dimshuffle(0, 2, 1), axes=([2], [1]))
out = out.dimshuffle(0, 1, 3, 2)
# compute head bias part by tensor dot ([batch, length, dim] * [dim, num_label])
# the shape of s_h should be [batch, length, num_label]
if self.W_h is not None:
s_h = T.tensordot(input, self.W_h, axes=[[2], [0]])
out = out + s_h.dimshuffle(0, 1, 'x', 2)
# compute child part by tensor dot ([batch, length, dim] * [dim, num_label]
# the shape of s_c should be [batch, length, num_label]
if self.W_c is not None:
s_c = T.tensordot(input, self.W_c, axes=[[2], [0]])
out = out + s_c.dimshuffle(0, 'x', 1, 2)
# add bias part.
if self.b is not None:
out = out + self.b.dimshuffle('x', 'x', 'x', 0)
if mask is not None:
mask_shuffled = mask.dimshuffle(0, 1, 'x', 'x')
out = out * mask_shuffled
mask_shuffled = mask.dimshuffle(0, 'x', 1, 'x')
out = out * mask_shuffled
return out
|
{
"content_hash": "6c8613685db700f3d82b83b1215f1ac9",
"timestamp": "",
"source": "github",
"line_count": 292,
"max_line_length": 117,
"avg_line_length": 43.12328767123287,
"alnum_prop": 0.5901365946632783,
"repo_name": "XuezheMax/NeuroNLP",
"id": "9446861a16ea1714680912a4007512128b7a871b",
"size": "12592",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "neuronlp/layers/crf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "345428"
},
{
"name": "Shell",
"bytes": "2143"
}
],
"symlink_target": ""
}
|
import expresso.pycas as pc
import rule_symbols as s
ordered_types = (pc.Types.Boolean,pc.Types.Natural,pc.Types.Integer,pc.Types.Rational,pc.Types.Real,pc.Types.Complex)
evaluator = pc.RewriteEvaluator(recursive=True, split_binary=True)
from .logic_evaluator import is_explicit_natural,is_function_type
evaluator.add_rule(pc.DominantType(s.x),s.x)
for i in range(len(ordered_types)):
evaluator.add_rule(pc.DominantType(pc.Types.Imaginary,ordered_types[i]),pc.Types.Complex)
evaluator.add_rule(pc.Type(ordered_types[i]),pc.Types.Type)
for j in range(i):
evaluator.add_rule(pc.DominantType(ordered_types[j],ordered_types[i]),ordered_types[i])
def eval_type_equality(m):
tx = m[s.x]
ty = m[s.y]
if tx in ordered_types and ty in ordered_types:
m[s.z] = tx == ty
return True
return False
evaluator.add_rule(pc.equal(s.x,s.y),False,eval_type_equality)
evaluator.add_rule(pc.Type(pc.Types.Imaginary*pc.Types.Complex),pc.Types.Complex)
evaluator.add_rule(pc.DominantType(s.x,s.x),s.x)
evaluator.add_rule(pc.Type(pc.Types.Imaginary),pc.Types.Type)
evaluator.add_rule(pc.Type(pc.Type(s.x)),pc.Types.Type)
evaluator.add_rule(pc.Type(True),pc.Types.Boolean)
evaluator.add_rule(pc.Type(False),pc.Types.Boolean)
evaluator.add_rule(pc.Type(s.x),pc.Types.Natural,condition=is_explicit_natural(s.x))
evaluator.add_rule(pc.Type(1/s.x),pc.DominantType(pc.Types.Rational,pc.Type(s.x)))
evaluator.add_rule(pc.Type(s.x**s.y),pc.OperationType(pc.Type(s.x)**pc.Type(s.y)))
evaluator.add_rule(pc.Type(s.x*s.y),pc.OperationType(pc.Type(s.x)*pc.Type(s.y)))
for t in (pc.Types.Natural,pc.Types.Integer,pc.Types.Rational,pc.Types.Real):
evaluator.add_rule(pc.OperationType(pc.Types.Imaginary*t),pc.Types.Imaginary)
evaluator.add_rule(pc.OperationType(pc.Types.Imaginary*pc.Types.Imaginary),pc.Types.Real)
evaluator.add_rule(pc.OperationType(s.x**pc.Types.Natural),s.x)
evaluator.add_rule(pc.OperationType(s.x**pc.Types.Integer),pc.DominantType(s.x,pc.Types.Rational))
evaluator.add_rule(pc.OperationType(pc.Types.Natural**pc.Types.Rational),pc.Types.Real)
evaluator.add_rule(pc.OperationType(s.x**s.y),pc.Types.Complex)
evaluator.add_rule(pc.OperationType(s.x*s.y),pc.DominantType(s.x,s.y),condition=pc.Not(pc.Or(is_function_type(s.x,pc.Type),is_function_type(s.y,pc.Type))))
evaluator.add_rule(pc.OperationType(s.x)**s.y,pc.DominantType(s.x,s.y),condition=pc.Not(pc.Or(is_function_type(s.x,pc.Type),is_function_type(s.y,pc.Type))))
evaluator.add_rule(pc.Type(s.x+s.y),pc.DominantType(pc.Type(s.x),pc.Type(s.y)))
evaluator.add_rule(pc.Type(-s.x),pc.DominantType(pc.Types.Integer,pc.Type(s.x)))
evaluator.add_rule(pc.Type(pc.pi),pc.Types.Real)
evaluator.add_rule(pc.Type(pc.e),pc.Types.Real)
evaluator.add_rule(pc.Type(pc.I),pc.Types.Imaginary)
evaluator.add_rule(pc.Type(pc.factorial(s.x)),pc.Types.Natural)
evaluator.add_rule(pc.Type(pc.sign(s.x)),pc.Types.Integer)
evaluator.add_rule(pc.Type(pc.floor(s.x)),pc.Types.Integer)
evaluator.add_rule(pc.Type(pc.ceil(s.x)),pc.Types.Integer)
evaluator.add_rule(pc.Type(pc.round(s.x)),pc.Types.Integer)
evaluator.add_rule(pc.Type(pc.mod(s.x,s.y)),pc.Types.Integer)
evaluator.add_rule(pc.Type(pc.Abs(s.x)),pc.OperationType(pc.Abs(pc.Type(s.x))))
evaluator.add_rule(pc.OperationType(pc.Abs(pc.Types.Complex)),pc.Types.Real)
evaluator.add_rule(pc.OperationType(pc.Abs(pc.Types.Imaginary)),pc.Types.Real)
evaluator.add_rule(pc.OperationType(pc.Abs(pc.Types.Real)),pc.Types.Real)
evaluator.add_rule(pc.OperationType(pc.Abs(pc.Types.Rational)),pc.Types.Rational)
evaluator.add_rule(pc.OperationType(pc.Abs(pc.Types.Integer)),pc.Types.Natural)
evaluator.add_rule(pc.OperationType(pc.Abs(pc.Types.Natural)),pc.Types.Natural)
evaluator.add_rule(pc.Type(pc.real(s.x)),pc.Types.Real)
evaluator.add_rule(pc.Type(pc.imag(s.x)),pc.Types.Real)
evaluator.add_rule(pc.OperationType(abs(s.x)),pc.Types.Real)
#evaluator.add_rule(pc.Type(pc.conjugate(s.x)),pc.Type(s.x))
evaluator.add_rule(pc.Type(pc.Indicator(s.x)),pc.Types.Natural)
evaluator.add_rule(pc.Type(pc.OuterPiecewise(s.x)),pc.Type(s.x))
evaluator.add_rule(pc.Type(pc.InnerPiecewise((s.a,s.b),s.x)),pc.DominantType(pc.Type(s.a),pc.Type(pc.InnerPiecewise(s.x))))
evaluator.add_rule(pc.Type(pc.InnerPiecewise((s.a,s.b))),pc.Type(s.a))
evaluator.add_rule(pc.Type(pc.derivative(s.x,s.y)),pc.Type(s.x))
evaluator.add_rule(pc.Type(pc.evaluated_at(s.x,s.y,s.z)),pc.DominantType(pc.Type(s.x),pc.Type(s.z)))
evaluator.add_rule(pc.Type(pc.tmp(s.x)),pc.Type(s.x))
evaluator.add_rule(pc.Type(pc.sqrt(s.x)),pc.Type(s.x**(1/pc.S(2))))
evaluator.add_rule(pc.Type(pc.atan2(s.x,s.y)),pc.DominantType(pc.Type(s.x),pc.Type(s.x),pc.Types.Rational))
for f in [pc.exp,pc.log,pc.sin,pc.cos,pc.asin ,pc.acos,pc.tan,pc.atan,pc.cot,pc.acot,pc.sinh,pc.cosh,pc.asinh,pc.acosh,pc.tanh,pc.atanh,pc.coth,pc.acoth]:
evaluator.add_rule(pc.Type(f(s.x)),pc.DominantType(pc.Type(s.x),pc.Types.Rational))
def issubtype(x,t):
return pc.equal(pc.DominantType(pc.Type(x),t),t)
from .logic_evaluator import is_mpmath
from mpmath import mp
def mpmath_type_evaluator(m):
v = m[s.x].value
if isinstance(v,mp.mpf):
m[s.y] = pc.Types.Real
elif isinstance(v,mp.mpc):
m[s.y] = pc.Types.Complex
else:
raise AttributeError('unknown mpmath type')
evaluator.add_rule(pc.Type(s.x),s.y,condition=is_mpmath(s.x))
from .canonical_form import canonical_form
from .logic_evaluator import logic_evaluator
type_evaluator = pc.MultiEvaluator(recursive=True, split_binary=True)
type_evaluator.add_evaluator(canonical_form)
type_evaluator.add_evaluator(logic_evaluator)
type_evaluator.add_evaluator(evaluator)
|
{
"content_hash": "735d9b38e8c4d8bc9581da5918e1bf2f",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 156,
"avg_line_length": 39.30555555555556,
"alnum_prop": 0.7351590106007068,
"repo_name": "TheLartians/Expresso",
"id": "84046f4692347c626318861d027039d651bc432a",
"size": "5661",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "expresso/pycas/evaluators/type_evaluator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "133473"
},
{
"name": "CMake",
"bytes": "2310"
},
{
"name": "Python",
"bytes": "142681"
}
],
"symlink_target": ""
}
|
"""
WSGI config for invigorate project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "invigorate.settings")
application = get_wsgi_application()
application = DjangoWhiteNoise(application)
|
{
"content_hash": "d7c42e110481475d7cf5f02af2e68c8f",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 78,
"avg_line_length": 27.166666666666668,
"alnum_prop": 0.7934560327198364,
"repo_name": "asheahan/invigorate",
"id": "3646adde37c0e3aa71ed936581200cbb5b6f4eb6",
"size": "489",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "invigorate/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "24"
},
{
"name": "HTML",
"bytes": "2694"
},
{
"name": "Python",
"bytes": "18310"
}
],
"symlink_target": ""
}
|
""" Support import export formats."""
from __future__ import absolute_import as _abs
from .... import symbol
from .... import ndarray as nd
from ....base import string_types
from ._import_helper import _convert_map as convert_map
class GraphProto(object): # pylint: disable=too-few-public-methods
"""A helper class for handling mxnet symbol copying from pb2.GraphProto.
Definition: https://github.com/onnx/onnx/blob/master/onnx/onnx.proto
"""
def __init__(self):
self._nodes = {}
self._params = {}
self._num_input = 0
self._num_param = 0
self.aux_dict = {}
self.arg_dict = {}
self.model_metadata = {}
def _convert_operator(self, node_name, op_name, attrs, inputs):
"""Convert from onnx operator to mxnet operator.
The converter must specify conversions explicitly for incompatible name, and
apply handlers to operator attributes.
Parameters
----------
:param node_name : str
name of the node to be translated.
:param op_name : str
Operator name, such as Convolution, FullyConnected
:param attrs : dict
Dict of operator attributes
:param inputs: list
list of inputs to the operator
Returns
-------
:return mxnet_sym
Converted mxnet symbol
"""
if op_name in convert_map:
op_name, new_attrs, inputs = convert_map[op_name](attrs, inputs, self)
else:
raise NotImplementedError("Operator {} not implemented.".format(op_name))
if isinstance(op_name, string_types):
new_op = getattr(symbol, op_name, None)
if not new_op:
raise RuntimeError("Unable to map op_name {} to sym".format(op_name))
if node_name is None:
mxnet_sym = new_op(*inputs, **new_attrs)
else:
mxnet_sym = new_op(name=node_name, *inputs, **new_attrs)
return mxnet_sym
return op_name
def from_onnx(self, graph):
"""Construct symbol from onnx graph.
Parameters
----------
graph : onnx protobuf object
The loaded onnx graph
Returns
-------
sym :symbol.Symbol
The returned mxnet symbol
params : dict
A dict of name: nd.array pairs, used as pretrained weights
"""
#get input, output shapes
self.model_metadata = self.get_graph_metadata(graph)
# parse network inputs, aka parameters
for init_tensor in graph.initializer:
if not init_tensor.name.strip():
raise ValueError("Tensor's name is required.")
self._params[init_tensor.name] = self._parse_array(init_tensor)
# converting GraphProto message
for i in graph.input:
if i.name in self._params:
# i is a param instead of input
self._nodes[i.name] = symbol.Variable(name=i.name,
shape=self._params[i.name].shape)
else:
self._nodes[i.name] = symbol.Variable(name=i.name)
# constructing nodes, nodes are stored as directed acyclic graph
# converting NodeProto message
for node in graph.node:
op_name = node.op_type
node_name = node.name.strip()
node_name = node_name if node_name else None
onnx_attr = self._parse_attr(node.attribute)
inputs = [self._nodes[i] for i in node.input]
mxnet_sym = self._convert_operator(node_name, op_name, onnx_attr, inputs)
for k, i in zip(list(node.output), range(len(mxnet_sym.list_outputs()))):
self._nodes[k] = mxnet_sym[i]
# splitting params into args and aux params
for args in mxnet_sym.list_arguments():
if args in self._params:
self.arg_dict.update({args: nd.array(self._params[args])})
for aux in mxnet_sym.list_auxiliary_states():
if aux in self._params:
self.aux_dict.update({aux: nd.array(self._params[aux])})
# now return the outputs
out = [self._nodes[i.name] for i in graph.output]
if len(out) > 1:
out = symbol.Group(out)
else:
out = out[0]
return out, self.arg_dict, self.aux_dict
def get_graph_metadata(self, graph):
"""
Get the model metadata from a given onnx graph.
"""
_params = set()
for tensor_vals in graph.initializer:
_params.add(tensor_vals.name)
input_data = []
for graph_input in graph.input:
if graph_input.name not in _params:
shape = [val.dim_value for val in graph_input.type.tensor_type.shape.dim]
input_data.append((graph_input.name, tuple(shape)))
output_data = []
for graph_out in graph.output:
shape = [val.dim_value for val in graph_out.type.tensor_type.shape.dim]
output_data.append((graph_out.name, tuple(shape)))
metadata = {'input_tensor_data' : input_data,
'output_tensor_data' : output_data
}
return metadata
def graph_to_gluon(self, graph, ctx):
"""Construct SymbolBlock from onnx graph.
Parameters
----------
graph : onnx protobuf object
The loaded onnx graph
ctx : Context or list of Context
Loads the model into one or many context(s).
Returns
-------
sym_block :gluon.nn.SymbolBlock
The returned gluon SymbolBlock
"""
sym, arg_params, aux_params = self.from_onnx(graph)
metadata = self.get_graph_metadata(graph)
data_names = [input_tensor[0] for input_tensor in metadata['input_tensor_data']]
data_inputs = [symbol.var(data_name) for data_name in data_names]
from ....gluon import SymbolBlock
net = SymbolBlock(outputs=sym, inputs=data_inputs)
net_params = net.collect_params()
for param in arg_params:
if param in net_params:
net_params[param].shape = arg_params[param].shape
net_params[param]._load_init(arg_params[param], ctx=ctx)
for param in aux_params:
if param in net_params:
net_params[param].shape = aux_params[param].shape
net_params[param]._load_init(aux_params[param], ctx=ctx)
return net
def _parse_array(self, tensor_proto):
"""Grab data in TensorProto and convert to numpy array."""
try:
from onnx.numpy_helper import to_array
except ImportError:
raise ImportError("Onnx and protobuf need to be installed. "
+ "Instructions to install - https://github.com/onnx/onnx")
np_array = to_array(tensor_proto).reshape(tuple(tensor_proto.dims))
return nd.array(np_array)
def _parse_attr(self, attr_proto):
"""Convert a list of AttributeProto to a dict, with names as keys."""
attrs = {}
for a in attr_proto:
for f in ['f', 'i', 's']:
if a.HasField(f):
attrs[a.name] = getattr(a, f)
# Needed for supporting python version > 3.5
if isinstance(attrs[a.name], bytes):
attrs[a.name] = attrs[a.name].decode(encoding='utf-8')
for f in ['floats', 'ints', 'strings']:
if list(getattr(a, f)):
assert a.name not in attrs, "Only one type of attr is allowed"
attrs[a.name] = tuple(getattr(a, f))
for f in ['t', 'g']:
if a.HasField(f):
attrs[a.name] = getattr(a, f)
for f in ['tensors', 'graphs']:
if list(getattr(a, f)):
raise NotImplementedError("Filed {} is not supported in mxnet.".format(f))
if a.name not in attrs:
raise ValueError("Cannot parse attribute: \n{}\n.".format(a))
return attrs
|
{
"content_hash": "267b994235bca111abd6b66b046cc3e8",
"timestamp": "",
"source": "github",
"line_count": 205,
"max_line_length": 94,
"avg_line_length": 40.229268292682924,
"alnum_prop": 0.5549896932217776,
"repo_name": "precedenceguo/mxnet",
"id": "3af196f8b091c6b2925ba16980a23e44181c6bcd",
"size": "9108",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "python/mxnet/contrib/onnx/onnx2mx/import_onnx.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1731"
},
{
"name": "Batchfile",
"bytes": "13130"
},
{
"name": "C",
"bytes": "122218"
},
{
"name": "C++",
"bytes": "5364005"
},
{
"name": "CMake",
"bytes": "83993"
},
{
"name": "Cuda",
"bytes": "925835"
},
{
"name": "Groovy",
"bytes": "2673"
},
{
"name": "Java",
"bytes": "122297"
},
{
"name": "Jupyter Notebook",
"bytes": "1275293"
},
{
"name": "Makefile",
"bytes": "62361"
},
{
"name": "Matlab",
"bytes": "34903"
},
{
"name": "Perl",
"bytes": "1275063"
},
{
"name": "Perl 6",
"bytes": "7280"
},
{
"name": "Python",
"bytes": "5583154"
},
{
"name": "R",
"bytes": "311543"
},
{
"name": "Scala",
"bytes": "1012427"
},
{
"name": "Shell",
"bytes": "263635"
},
{
"name": "Smalltalk",
"bytes": "43774"
}
],
"symlink_target": ""
}
|
"""
====================================================================
pipeline_chiptools - homer and deeptools implimentation
====================================================================
Overview
========
The aim of this pipeline is to create peaklists in :term:`bed` files from
aligned reads in :term:`bam` files that can then be taken on to downstream
analysis (e.g., quantification of peaks etc.). The pipeline wraps up
homer and deeptools software for ATAC and ChIP-seq analysis.
This pipeline also performs motif analysis and basic QC analysis
(i.e., basic tag information, read length distribution, clonal tag distribution
(clonal read depth), autocorrelation analysis (distribution of distances
between adjacent reads in the genome) and sequence bias analysis).
Principal targets
-----------------
standard
perform all workflows minus the motif generation
full
run all chiptools tasks
Functionality
+=============
- Takes paired-end or single end :term:`Bam` files you want to call peaks on
(e.g. ChIP-Seq or ATAC-Seq samples and their appropriate 'input' controls).
- Creates Tag directories for ChIP and Input :term:'Bam' files
- Runs homer peakcaller (findPeaks)
- Produces peak lists in bed files to take forward for downstream analysis.
- Performs motif discovery analysis
- Performs peak annotation
- Finds differential and common peaks between replicates (reproducibility)
and between samples (differential binding)
Usage
=====
See :ref:`PipelineSettingUp` and :ref:`PipelineRunning` on general
information how to use CGAT pipelines.
Configuration
-------------
For homer to work with your desired genome please follow the instructions:
perl {path_to_cgat_pipelines}/conda-install/envs/py36-v1/share/homer-4.9.1-5/configureHomer.pl -install hg19
This will download the genome and you can now use homer to process your samples.
The pipeline requires a configured :file:`pipeline.ini` file.
CGATReport report requires a :file:`conf.py` and optionally a
:file:`cgatreport.ini` file (see :ref:`PipelineReporting`).
Default configuration files can be generated by executing:
python <srcdir>/pipeline_chiptools.py config
Input files
-----------
The pipeline requres a sample `bam` file and an optional input `bam` file to
perform background evaluation. The bam file should be indexed.
The pipeline requires a pipeline.ini file that needs to be popeluated with information
that will allow the pipeline to execute correctly. This can be generated using the
command:
cgatflow chiptools config
A folder called design.tsv, which is a tab seperated file also needs to be supplied
for running both homer and deeptools. The file needs to be specified with the following
headers
SampleID Tissue Factor Condition Treatment Replicate bamReads ControlID bamControl
For running deeptools you also need to specify a bed file of interested regions. Please
see the pipeline.ini under the deep options for more details.
Code
====
"""
from ruffus import *
import sys
import os
import CGAT.Experiment as E
import CGATPipelines.Pipeline as P
import CGATPipelines.PipelinePeakcalling as PipelinePeakcalling
import CGAT.BamTools as Bamtools
import CGAT.IOTools as IOTools
import matplotlib.pyplot as plt
import pandas as pd
# load options from the config file
PARAMS = P.getParameters(
["%s/pipeline.ini" % os.path.splitext(__file__)[0],
"../pipeline.ini",
"pipeline.ini"])
#######################################################################
# Check for design file & Match ChIP/ATAC-Seq Bams with Inputs ########
#######################################################################
# This section checks for the design table and generates:
# 1. A dictionary, inputD, linking each input file and each of the various
# IDR subfiles to the appropriate input, as specified in the design table
# 2. A pandas dataframe, df, containing the information from the
# design table.
# 3. INPUTBAMS: a list of control (input) bam files to use as background for
# peakcalling.
# 4. CHIPBAMS: a list of experimental bam files on which to call peaks on.
# if design table is missing the input and chip bams to empty list. This gets
# round the import tests
if os.path.exists("design.tsv"):
df, inputD = PipelinePeakcalling.readDesignTable("design.tsv", "none")
INPUTBAMS = list(df['bamControl'].values)
CHIPBAMS = list(df['bamReads'].values)
TOTALBAMS = INPUTBAMS + CHIPBAMS
# I have defined a dict of the samples to I can parse the correct
# inputs into bamCompare
SAMPLE_DICT = {}
for chip, inputs in zip(CHIPBAMS, INPUTBAMS):
key = chip
value = inputs
SAMPLE_DICT[key] = value
else:
E.warn("design.tsv is not located within the folder")
INPUTBAMS = []
CHIPBAMS = []
TOTALBAMS = []
SAMPLE_DICT = {}
#########################################################################
# Connect to database
#########################################################################
def connect():
'''
Setup a connection to an sqlite database
'''
dbh = sqlite3.connect(PARAMS['database'])
return dbh
###########################################################################
# start of pipelined tasks
# 1) Preprocessing Steps - Filter bam files & generate bam stats
###########################################################################
@transform("design.tsv", suffix(".tsv"), ".load")
def loadDesignTable(infile, outfile):
''' load design.tsv to database '''
P.load(infile, outfile)
#####################################################
# makeTagDirectory Inputs
#####################################################
@active_if(PARAMS['homer'])
@follows(mkdir("homer"))
@follows(mkdir("homer/Tag.dir"))
@follows(loadDesignTable)
@transform(INPUTBAMS, regex("(.*).bam"),
r"homer/Tag.dir/\1/\1.txt")
def makeTagDirectoryInput(infile, outfile):
'''
This will create a tag file for each bam file
for a CHIP-seq experiment
'''
bamstrip = infile.strip(".bam")
samfile = bamstrip + ".sam"
statement = '''
samtools view %(infile)s > homer/Tag.dir/%(samfile)s;
cd homer/Tag.dir/ ;
makeTagDirectory %(bamstrip)s
%(samfile)s
-genome %(homer_maketagdir_genome)s -checkGC
&> %(bamstrip)s.makeTagInput.log;
touch %(bamstrip)s/%(bamstrip)s.txt &&
sleep 60'''
P.run()
#####################################################
# makeTagDirectory ChIPs
#####################################################
@active_if(PARAMS['homer'])
@follows(mkdir("homer"))
@follows(mkdir("homer/Tag.dir"))
@follows(makeTagDirectoryInput)
@transform(CHIPBAMS, regex("(.*).bam"),
r"homer/Tag.dir/\1/\1.txt")
def makeTagDirectoryChips(infile, outfile):
'''
This will create a tag file for each bam file
for a CHIP-seq experiment
'''
bamstrip = infile.strip(".bam")
samfile = bamstrip + ".sam"
statement = '''
samtools view %(infile)s > homer/Tag.dir/%(samfile)s &&
cd homer/Tag.dir/ &&
makeTagDirectory %(bamstrip)s
%(samfile)s
-genome %(homer_maketagdir_genome)s -checkGC
&> %(bamstrip)s.makeTagChip.log &&
touch %(bamstrip)s/%(bamstrip)s.txt &&
sleep 60'''
P.run()
@active_if(PARAMS['homer'])
@transform((makeTagDirectoryChips),
regex("homer/Tag.dir/(.*)/(.*).txt"),
r"homer/Tag.dir/\1/regions.txt")
def findPeaks(infile, outfile):
'''
This function will find peaks in your samples.
Arguments
---------
infiles : string
this is a list of tag directories
directory: string
This is the directory where the tag file will be placed
'''
directory = infile.strip(".txt")
_, _, directory, _ = directory.split("/")
bamfile = directory + ".bam"
df_slice = df[df['bamReads'] == bamfile]
input_bam = df_slice['bamControl'].values[0]
input_bam = input_bam.strip(".bam")
statement = '''cd homer/Tag.dir/ &&
findPeaks %(directory)s -style %(homer_findpeaks_style)s -o %(homer_findpeaks_output)s
%(homer_findpeaks_options)s -i %(input_bam)s &> %(directory)s.findpeaks.log'''
P.run()
@active_if(PARAMS['homer'])
@transform(findPeaks,
regex("homer/Tag.dir/(.*)/regions.txt"),
r"homer/Tag.dir/\1/\1.bed")
def bedConversion(infile, outfile):
'''
The peaks identified will be converted to a bed file for further downstream
processing outside of this pipeline.
'''
statement = '''pos2bed.pl %(homer_bed_options)s %(infile)s > %(outfile)s'''
P.run()
@active_if(PARAMS['homer'])
@transform(findPeaks,
regex("homer/Tag.dir/(.*)/regions.txt"),
r"homer/Tag.dir/\1/annotate.txt")
def annotatePeaks(infile, outfile):
'''
The peaks identified in your tag directory will be annotated according
to the specified genome.
'''
statement = '''annotatePeaks.pl %(infile)s %(homer_annotatepeaks_genome)s &> Annotate.log > %(outfile)s'''
P.run()
@active_if(PARAMS['homer'])
@follows(mkdir("homer/motif.dir"))
@transform(findPeaks,
regex("homer/Tag.dir/(.*)/regions.txt"),
r"homer/motif.dir/\1/motifs.txt")
def findMotifs(infile, outfile):
'''
This will find known motifs enrched in yopur peaks lists using the
Jaspar database.
'''
_, _, directory, _ = infile.split("/")
statement = '''findMotifsGenome.pl %(infile)s %(homer_motif_genome)s homer/motif.dir/%(directory)s -size %(homer_motif_size)i
&> homer/motif.dir/%(directory)s.log'''
P.run()
@active_if(PARAMS['homer'])
@active_if(PARAMS['homer_diffannotat_raw'])
@follows(mkdir("homer/raw_annotate.dir"))
@merge(makeTagDirectoryChips, "homer/raw_annotate.dir/countTable.peaks.txt")
def annotatePeaksRaw(infiles, outfile):
'''
This function will annotate peaks according to the genome that is specified in the
pipeline.ini section. It will take an unprocessed Tag directory as an input.
'''
directories = []
for infile in infiles:
directory = infile.split("/")[2]
directories.append("homer/Tag.dir/" + directory + "/")
directories = " ".join(directories)
statement = '''annotatePeaks.pl %(homer_annotate_raw_region)s %(homer_annotate_raw_genome)s
-d %(directories)s > homer/raw_annotate.dir/countTable.peaks.txt'''
P.run()
@active_if(PARAMS['homer'])
@active_if(PARAMS['homer_diff_expr'])
@follows(mkdir("homer/diffExprs.dir"))
@transform(annotatePeaksRaw,
suffix(".peaks.txt"),
r"homer/raw_annotate.dir/\1.diffexprs.txt")
def getDiffExprs(infile, outfile):
'''
Once the peaks have been annotated and the reads counted, differential
expression is then performed.
'''
# in the future this should be read from the design file AC 11/12/2017
statement = '''getDiffExpression.pl %(infile)s
%(homer_diff_expr_options)s %(homer_diff_expr_group)s
> homer/diffExprs.dir/diffOutput.txt'''
P.run()
@active_if(PARAMS['homer'])
@active_if(PARAMS['homer_diff_repeats'])
@follows(mkdir("homer/Replicates.dir"))
@follows(makeTagDirectoryChips)
@originate("homer/Replicates.dir/outputPeaks.txt")
def getDiffPeaksReplicates(outfile):
'''
The function will determine the statistically enriched peaks accross
replicates.
The output of the function is a homer peak file that contains several
columns of annotation, normalised read counts and differential enriched statistics
from DESEq2.
'''
replicates = set(df["Replicate"])
for x in replicates:
subdf = df[df["Replicate"] == x]
bams = subdf["bamReads"].values
bam_strip = []
for bam in bams:
bam = bam.strip(".bam") + "/"
bam_strip.append(bam)
bam_strip = " ".join(bam_strip)
inputs = subdf["bamControl"].values
input_strip = []
for inp in inputs:
inp = inp.strip(".bam") + "/"
input_strip.append(inp)
input_strip = " ".join(input_strip)
statement = '''getDifferentialPeaksReplicates.pl -t %(bam_strip)s
-i %(input_strip)s -genome %(homer_diff_repeats_genome)s %(homer_diff_repeats_options)s>
homer/Replicates.dir/Repeat-%(x)s.outputPeaks.txt'''
P.run()
##################################################################################################
# This is the section where the deeptool (http://deeptools.readthedocs.io/en/latest/index.html#)
# deepTools is a suite of python tools particularly developed for the efficient analysis
# of high-throughput sequencing data, such as ChIP-seq, RNA-seq or MNase-seq
# Functions are specified
##################################################################################################
@active_if(PARAMS['deeptools'])
@follows(mkdir("deepTools/Plot.dir/Coverage.dir"))
@follows(loadDesignTable)
@merge([CHIPBAMS, INPUTBAMS], "deepTools/Plot.dir/Coverage.dir/coverage_plot.eps")
def coverage_plot(infiles, outfile):
'''
This tool is useful to assess the sequencing depth of a given sample.
It samples 1 million bp, counts the number of overlapping reads and
can report a histogram that tells you how many bases are covered how
many times. Multiple BAM files are accepted, but they all should
correspond to the same genome assembly.
'''
infile = [item for sublist in infiles for item in sublist]
infile = " ".join(infile)
if PARAMS['deep_ignore_dups']:
duplicates = "--ignoreDuplicates"
elif not PARAMS['deep_ignore_dups']:
duplicates = ""
else:
raise ValueError('''Please set a ignore_dups value in the
pipeline.ini''')
statement = '''plotCoverage -b %(infile)s
--plotFile %(outfile)s
--plotTitle "coverage_plot"
--outRawCounts deepTools/Plot.dir/Coverage.dir/coverage_plot.tab
%(duplicates)s
--minMappingQuality %(deep_mapping_qual)s'''
P.run()
@follows(coverage_plot)
@active_if(PARAMS['deeptools'])
@follows(mkdir("deepTools/Plot.dir/Fingerprint.dir"))
@follows(loadDesignTable)
@merge([CHIPBAMS, INPUTBAMS],
"deepTools/Plot.dir/Fingerprint.dir/fingerprints.eps")
def fingerprint_plot(infiles, outfile):
'''
This quality control will most likely be of interest for you
if you are dealing with ChIP-seq samples as a pressing question
in ChIP-seq experiments is Did my ChIP work?, i.e. did the
antibody-treatment enrich sufficiently so that the ChIP signal
can be separated from the background signal? (After all, around
90% of all DNA fragments in a ChIP experiment will represent
the genomic background).
This tool samples indexed BAM files and plots a profile of
cumulative read coverages for each. All reads overlapping a
window (bin) of the specified length are counted; these counts
are sorted and the cumulative sum is finally plotted.
'''
infile = [item for sublist in infiles for item in sublist]
infile = " ".join(infile)
if PARAMS['deep_ignore_dups']:
duplicates = "--ignoreDuplicates"
elif not PARAMS['deep_ignore_dups']:
duplicates = ""
else:
raise ValueError('''Please set a ignore_dups value in the
pipeline.ini''')
statement = '''plotFingerprint -b %(infile)s
--plotFile %(outfile)s
--plotTitle "Fingerprints of samples"
--outRawCounts deepTools/Plot.dir/Fingerprint.dir/fingerprints_plot.tab
%(duplicates)s
--minMappingQuality %(deep_mapping_qual)s'''
P.run()
@follows(fingerprint_plot)
@active_if(PARAMS['deeptools'])
@active_if(PARAMS['deep_paired_end'])
@follows(mkdir("deepTools/Plot.dir/FragmentSize.dir"))
@follows(loadDesignTable)
@merge([CHIPBAMS, INPUTBAMS],
"deepTools/Plot.dir/FragmentSize.dir/FragmentSize.png")
def fragment_size(infiles, outfile):
'''
This tool calculates the fragment sizes for read pairs
given a BAM file from paired-end sequencing.Several regions
are sampled depending on the size of the genome and number
of processors to estimate thesummary statistics on the fragment
lengths. Properly paired reads are preferred for computation,
i.e., it will only use discordant pairs if no concordant
alignments overlap with a given region. The default setting
simply prints the summary statistics to the screen.
'''
infile = [item for sublist in infiles for item in sublist]
infile = " ".join(infile)
if PARAMS['deep_logscale']:
logscale = ("--logScale %s") % (PARAMS['deep_logscale'])
else:
logscale = ""
statement = '''bamPEFragmentSize -b %(infile)s
--histogram %(outfile)s
%(logscale)s &&
sleep 60'''
P.run()
@follows(fragment_size)
@active_if(PARAMS['deeptools'])
@active_if(PARAMS['deep_bam_coverage'])
@follows(mkdir("deepTools/Bwfiles.dir/bamCoverage.dir"))
@transform(TOTALBAMS, regex("(.*).bam"),
r"deepTools/Bwfiles.dir/bamCoverage.dir/\1.bw")
def bamCoverage(infiles, outfile):
'''
This tool takes an alignment of reads or fragments as
input (BAM file) and generates a coverage track (bigWig
or bedGraph) as output. The coverage is calculated as
the number of reads per bin, where bins are short
consecutive counting windows of a defined size. It is
possible to extended the length of the reads to better
reflect the actual fragment length. bamCoverage offers
normalization by scaling factor, Reads Per Kilobase per
Million mapped reads (RPKM), and 1x depth (reads per
genome coverage, RPGC).
'''
if PARAMS['deep_ignore_norm'] is not "":
normalise = '--ignoreForNormalization '
norm_value = PARAMS['deep_ignore_norm']
else:
normalise = ''
norm_value = ''
if PARAMS['deep_extendreads']:
extend = '--extendReads'
elif not PARAMS['deep_extendreads']:
extend = ''
else:
raise ValueError('''Please set the extendreads to a value 0 or 1''')
statement = '''bamCoverage --bam %(infiles)s
-o %(outfile)s
-of bigwig
--binSize %(deep_binsize)s
%(normalise)s %(norm_value)s
%(extend)s
%(deep_bamcoverage_options)s &&
sleep 60'''
P.run()
@follows(fragment_size)
@active_if(PARAMS['deeptools'])
@active_if(PARAMS['deep_bam_compare'])
@follows(loadDesignTable)
@follows(mkdir("deepTools/Bwfiles.dir/bamCompare.dir"))
@transform(CHIPBAMS,
suffix('.bam'),
add_inputs(SAMPLE_DICT),
r"deepTools/Bwfiles.dir/bamCompare.dir/\1.bw")
def bamCompare(infiles, outfile):
'''
This tool compares two BAM files based on the number of
mapped reads. To compare the BAM files, the genome is
partitioned into bins of equal size, then the number of reads
found in each bin is counted per file, and finally a summary
value is reported. This value can be the ratio of the number
of reads per bin, the log2 of the ratio, or the difference.
This tool can normalize the number of reads in each BAM file
using the SES method proposed by Diaz et al. (2012) Normalization,
bias correction, and peak calling for ChIP-seq. Statistical
Applications in Genetics and Molecular Biology, 11(3).
Normalization based on read counts is also available. The
output is either a bedgraph or bigWig file containing the
bin location and the resulting comparison value. By default,
if reads are paired, the fragment length reported in the
BAM file is used. Each mate, however, is treated independently
to avoid a bias when a mixture of concordant and discordant
pairs is present. This means that each end will be extended
to match the fragment length.
'''
chipbam = infiles[0]
inputbams = infiles[1]
inputbam = inputbams[chipbam]
statement = '''
bamCompare -b1 %(chipbam)s
-b2 %(inputbam)s
-o %(outfile)s
-of bigwig
%(deep_bamcompare_options)s &&
sleep 60'''
P.run()
@follows(bamCompare)
@active_if(PARAMS['deeptools'])
@follows(loadDesignTable)
@follows(mkdir("deepTools/Summary.dir"))
@merge([CHIPBAMS, INPUTBAMS], "deepTools/Summary.dir/Bam_Summary.npz")
def multiBamSummary(infiles, outfile):
'''
multiBamSummary computes the read coverages for genomic regions
for typically two or more BAM files. The analysis can be
performed for the entire genome by running the program in
bins mode. If you want to count the read coverage for
specific regions only, use the BED-file mode instead.
The standard output of multiBamSummary is a compressed
numpy array (.npz). It can be directly used to calculate
and visualize pairwise correlation values between the read
coverages using the tool plotCorrelation. Similarly,
plotPCA can be used for principal component analysis of
the read coverages using the .npz file. Note that using
a single bigWig file is only recommended if you want to
produce a bedGraph file (i.e., with the --outRawCounts
option; the default output file cannot be used by ANY
deepTools program if only a single file was supplied!).
'''
infile = [item for sublist in infiles for item in sublist]
infile = " ".join(infile)
if PARAMS['deep_mode_setting'] == 'None':
mode_set = 'bins'
mode_region = ''
else:
mode_set = 'BED-file --BED '
mode_region = PARAMS['deep_mode_setting']
if PARAMS['deep_ignore_dups']:
duplicates = "--ignoreDuplicates"
elif not PARAMS['deep_ignore_dups']:
duplicates = ""
else:
raise ValueError('''Please set a ignore_dups value in the
pipeline.ini''')
statement = '''
multiBamSummary %(mode_set)s %(mode_region)s
-b %(infile)s
-o %(outfile)s
--outRawCounts deepTools/Summary.dir/Bam_Summary.tab
--minMappingQuality %(deep_mapping_qual)s
%(deep_summary_options)s &&
sleep 60'''
P.run()
@active_if(PARAMS['deeptools'])
@merge([bamCoverage, bamCompare], "deepTools/Summary.dir/bw_summary.npz")
def multiBwSummary(infiles, outfile):
'''
This performs and summary accross all of the big wig files
and is similar to that impliemnted for the multi bam Summary.
'''
infiles = " ".join(infiles)
if PARAMS['deep_mode_setting'] == 'None':
mode_set = 'bins'
mode_region = ''
else:
mode_set = 'BED-file --BED '
mode_region = PARAMS['deep_mode_setting']
statement = '''multiBigwigSummary %(mode_set)s %(mode_region)s
-b %(infiles)s
-out %(outfile)s
--outRawCounts deepTools/Summary.dir/Bw_Summary.tab
%(deep_summary_options)s &&
sleep 60'''
P.run()
@active_if(PARAMS['deeptools'])
@follows(mkdir("deepTools/Plot.dir/Summary.dir/"))
@transform((multiBamSummary, multiBwSummary),
regex("\S+/(\S+).npz"),
r"deepTools/Plot.dir/Summary.dir/\1corr")
def plotCorrelation(infiles, outfile):
'''
Tool for the analysis and visualization of sample
correlations based on the output of multiBamSummary
or multiBigwigSummary. Pearson or Spearman methods
are available to compute correlation coefficients.
Results can be saved as multiple scatter plots depicting
the pairwise correlations or as a clustered heatmap,
where the colors represent the correlation coefficients
and the clusters are joined using the Nearest Point
Algorithm (also known as single). Optionally,
the values can be saved as tables, too.
'''
if PARAMS['deep_plot'] == 'heatmap':
colormap = ("--colorMap %s") % (PARAMS['deep_colormap'])
else:
colormap = ""
statement = '''plotCorrelation -in %(infiles)s -o %(outfile)s
--corMethod %(deep_cormethod)s -p %(deep_plot)s
%(colormap)s
--plotFileFormat %(deep_filetype)s
--skipZeros %(deep_plot_options)s &&
sleep 60'''
P.run()
@active_if(PARAMS['deeptools'])
@transform((multiBamSummary, multiBwSummary),
regex("\S+/(\S+).npz"),
r"deepTools/Plot.dir/Summary.dir/\1PCA")
def plotPCA(infiles, outfile):
'''
This will plot a PCA of the samples.
'''
statement = '''plotPCA -in %(infiles)s -o %(outfile)s
--plotFileFormat %(deep_filetype)s
%(deep_plot_options)s'''
P.run()
@active_if(PARAMS['deeptools'])
@follows(mkdir("deepTools/Plot.dir/matrix.dir/"))
@merge([bamCoverage, bamCompare],
"deepTools/Plot.dir/matrix.dir/matrix.gz")
def computeMatrix(infile, outfile):
'''
This computes a count matrix for downstream processing of the data.
'''
infile = " ".join(infile)
if 'reference-point' in PARAMS['deep_startfactor']:
reference_point = '--referencePoint'
regions = PARAMS['deep_regions']
region_length = " "
elif "scale-regions" in PARAMS['deep_startfactor']:
reference_point == '--regionBodyLength'
regions = " "
region_length = PARAMS['deep_region_length']
else:
raise(ValueError("Please supply a valid startfactor"))
if ".gz" in PARAMS['deep_bedfile']:
infile = PARAMS['deep_bedfile']
bedfile = IOTools.openFile(infile, "r")
else:
bedfile = PARAMS['deep_bedfile']
if PARAMS['deep_brslength'] is not "":
upstream = ("--upstream %s") % (PARAMS['deep_brslength'])
if PARAMS['deep_arslength'] is not "":
downstream = ("--downstream %s") % (PARAMS['deep_arslength'])
if PARAMS['deep_matrix_bin_size'] is not "":
binsize = ("--binSize %s") % (PARAMS['deep_matrix_bin_size'])
else:
binsize = ""
if PARAMS['deep_out_namematrix'] is not "":
outmatrix = ("--outFileNameMatrix %s") % (PARAMS['deep_out_namematrix'])
else:
outmatrix = ""
if PARAMS['deep_out_sorted'] is not "":
sortedfile = ("--outFileSortedRegions %s") % (PARAMS['deep_out_sorted'])
else:
sortedfile = ""
statement = '''computeMatrix %(deep_startfactor)s -S %(infile)s
-R %(bedfile)s
%(reference_point)s %(regions)s %(region_length)s
%(upstream)s
%(downstream)s
%(binsize)s
--skipZeros
-o %(outfile)s
%(outmatrix)s
%(sortedfile)s'''
P.run()
@active_if(PARAMS['deeptools'])
@transform(computeMatrix,
regex("\S+/\S+/\S+/(\S+).gz"),
r"deepTools/Plot.dir/matrix.dir/\1_heatmap.eps")
def plotHeatmap(infile, outfile):
'''
This tool creates a heatmap for scores associated with genomic regions.
The program requires a matrix file generated by the tool computeMatrix.
'''
infile = "".join(infile)
statement = '''plotHeatmap -m %(infile)s
-o %(outfile)s
--outFileNameMatrix %(deep_out_namematrix)s
--outFileSortedRegions %(deep_out_sorted)s
--dpi %(deep_dpi)s
--colorMap %(deep_colormap)s
--kmeans %(deep_kmeans)s
--legendLocation %(deep_legendlocation)s
--refPointLabel %(deep_refpointlabel)s'''
P.run()
@active_if(PARAMS['deeptools'])
@transform(computeMatrix,
regex("\S+/\S+/\S+/(\S+).gz"),
r"deepTools/Plot.dir/matrix.dir/\1_profile.eps")
def plotProfile(infile, outfile):
'''
This tool creates a profile plot for scores over sets of genomic
regions. Typically, these regions are genes, but any other regions
defined in BED will work. A matrix generated by computeMatrix
is required.
'''
infile = "".join(infile)
if PARAMS['deep_pergroup'] is not "":
pergroup = ("--perGroup %s") % (PARAMS['deep_pergroup'])
else:
pergroup = ""
statement = '''plotProfile -m %(infile)s
-o %(outfile)s
--kmeans %(deep_kmeans)s
--plotType %(deep_plottype)s
--dpi %(deep_dpi)s
%(pergroup)s
--legendLocation %(deep_legendlocation)s
--refPointLabel %(deep_refpointlabel)s'''
P.run()
# ---------------------------------------------------
# Generic pipeline tasks
@follows(loadDesignTable,
bedConversion,
annotatePeaks,
annotatePeaksRaw,
getDiffExprs,
getDiffPeaksReplicates,
coverage_plot,
fingerprint_plot,
bamCompare,
bamCoverage,
multiBamSummary,
multiBwSummary,
plotCorrelation,
plotPCA,
computeMatrix,
plotHeatmap,
plotProfile)
def standard():
'''a target without the motif generation'''
pass
@follows(makeTagDirectoryInput,
loadDesignTable,
bedConversion,
annotatePeaks,
annotatePeaksRaw,
getDiffExprs,
getDiffPeaksReplicates,
coverage_plot,
fingerprint_plot,
bamCompare,
bamCoverage,
multiBamSummary,
multiBwSummary,
plotCorrelation,
plotPCA,
computeMatrix,
plotHeatmap,
plotProfile)
def full():
pass
@follows(mkdir("Jupyter_report.dir"))
def renderJupyterReport():
'''build Jupyter notebook report'''
report_path = os.path.abspath(os.path.join(os.path.dirname(__file__),
'pipeline_docs',
'pipeline_chiptools',
'Jupyter_report'))
statement = ''' cp %(report_path)s/* Jupyter_report.dir/ ; cd Jupyter_report.dir/;
jupyter nbconvert --ExecutePreprocessor.timeout=None
--to html --execute *.ipynb;
'''
P.run()
# We will implement this when the new version of multiqc is available
@follows(mkdir("MultiQC_report.dir"))
@originate("MultiQC_report.dir/multiqc_report.html")
def renderMultiqc(infile):
'''build mulitqc report'''
statement = '''LANG=en_GB.UTF-8 multiqc . -f;
mv multiqc_report.html MultiQC_report.dir/'''
P.run()
@follows(renderJupyterReport)
def build_report():
pass
def main(argv=None):
if argv is None:
argv = sys.argv
P.main(argv)
if __name__ == "__main__":
sys.exit(P.main(sys.argv))
|
{
"content_hash": "b7e6c218b00282f7504a08f84af7fbc4",
"timestamp": "",
"source": "github",
"line_count": 971,
"max_line_length": 129,
"avg_line_length": 32.38002059732235,
"alnum_prop": 0.6114627397347413,
"repo_name": "CGATOxford/CGATPipelines",
"id": "b21555efb7c99a43ae270cbc0c661bf538947ce8",
"size": "31441",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CGATPipelines/pipeline_chiptools.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4360"
},
{
"name": "HTML",
"bytes": "40732"
},
{
"name": "JavaScript",
"bytes": "302029"
},
{
"name": "Jupyter Notebook",
"bytes": "4393775"
},
{
"name": "Makefile",
"bytes": "45084"
},
{
"name": "Python",
"bytes": "5357820"
},
{
"name": "R",
"bytes": "62312"
},
{
"name": "Shell",
"bytes": "67312"
}
],
"symlink_target": ""
}
|
def truncate(self, size=None):
self.db.put(self.key, '', txn=self.txn, dlen=self.len - size, doff=size)
|
{
"content_hash": "801bf034ed212afa25f21abe540b9c38",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 76,
"avg_line_length": 54,
"alnum_prop": 0.6666666666666666,
"repo_name": "moagstar/python-uncompyle6",
"id": "189fe19da8f9fd4cf86114b4ee6f69de029d02a5",
"size": "180",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/simple_source/bug_pypy27/02_call_method.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3365"
},
{
"name": "Makefile",
"bytes": "8956"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Python",
"bytes": "1690289"
},
{
"name": "Shell",
"bytes": "535"
}
],
"symlink_target": ""
}
|
"""
This is revision from 3058ab9d9d4875589638cc45e84b59e7e1d7c9c3 of
https://github.com/ojii/django-load.
ANY changes to this file, be it upstream fixes or changes for the cms *must* be
documented clearly within this file with comments.
For documentation on how to use the functions described in this file, please
refer to http://django-load.readthedocs.org/en/latest/index.html.
"""
import imp
import traceback # changed
from importlib import import_module
from django.utils.six.moves import filter, map
from .compat.dj import installed_apps
def get_module(app, modname, verbose, failfast):
"""
Internal function to load a module from a single app.
"""
module_name = '%s.%s' % (app, modname)
# the module *should* exist - raise an error if it doesn't
app_mod = import_module(app)
try:
imp.find_module(modname, app_mod.__path__ if hasattr(app_mod, '__path__') else None)
except ImportError:
# this ImportError will be due to the module not existing
# so here we can silently ignore it. But an ImportError
# when we import_module() should not be ignored
if failfast:
raise
elif verbose:
print(u"Could not find %r from %r" % (modname, app)) # changed
traceback.print_exc() # changed
return None
module = import_module(module_name)
if verbose:
print(u"Loaded %r from %r" % (modname, app))
return module
def load(modname, verbose=False, failfast=False):
"""
Loads all modules with name 'modname' from all installed apps.
If verbose is True, debug information will be printed to stdout.
If failfast is True, import errors will not be surpressed.
"""
for app in installed_apps():
get_module(app, modname, verbose, failfast)
def iterload(modname, verbose=False, failfast=False):
"""
Loads all modules with name 'modname' from all installed apps and returns
and iterator of those modules.
If verbose is True, debug information will be printed to stdout.
If failfast is True, import errors will not be surpressed.
"""
return filter(None, (get_module(app, modname, verbose, failfast)
for app in installed_apps()))
def load_object(import_path):
"""
Loads an object from an 'import_path', like in MIDDLEWARE_CLASSES and the
likes.
Import paths should be: "mypackage.mymodule.MyObject". It then imports the
module up until the last dot and tries to get the attribute after that dot
from the imported module.
If the import path does not contain any dots, a TypeError is raised.
If the module cannot be imported, an ImportError is raised.
If the attribute does not exist in the module, a AttributeError is raised.
"""
if '.' not in import_path:
raise TypeError(
"'import_path' argument to 'django_load.core.load_object' must "
"contain at least one dot."
)
module_name, object_name = import_path.rsplit('.', 1)
module = import_module(module_name)
return getattr(module, object_name)
def iterload_objects(import_paths):
"""
Load a list of objects.
"""
return map(load_object, import_paths)
def get_subclasses(c):
"""
Get all subclasses of a given class
"""
return c.__subclasses__() + sum(map(get_subclasses, c.__subclasses__()), [])
def load_from_file(module_path):
"""
Load a python module from its absolute filesystem path
"""
from imp import load_module, PY_SOURCE
imported = None
if module_path:
with open(module_path, 'r') as openfile:
imported = load_module("mod", openfile, module_path, ('imported', 'r', PY_SOURCE))
return imported
|
{
"content_hash": "964ec1940b7ef2d81aadb8fbb74c06bb",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 94,
"avg_line_length": 32.111111111111114,
"alnum_prop": 0.6656907106734097,
"repo_name": "vxsx/django-cms",
"id": "0d97c46a4b9d013b5d411cf9d79581933e9a0e30",
"size": "3781",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "cms/utils/django_load.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "133419"
},
{
"name": "HTML",
"bytes": "154109"
},
{
"name": "JavaScript",
"bytes": "1172445"
},
{
"name": "Python",
"bytes": "1996894"
},
{
"name": "Shell",
"bytes": "28"
}
],
"symlink_target": ""
}
|
import itertools
class Solution(object):
def maxIncreaseKeepingSkyline(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
row_maxes = [max(row) for row in grid]
col_maxes = [max(col) for col in itertools.izip(*grid)]
return sum(min(row_maxes[r], col_maxes[c])-val \
for r, row in enumerate(grid) \
for c, val in enumerate(row))
|
{
"content_hash": "f894e831fe52955c332cb19bc47c8964",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 63,
"avg_line_length": 29.266666666666666,
"alnum_prop": 0.5421412300683371,
"repo_name": "kamyu104/LeetCode",
"id": "902b5482545902d262245d41895f24761941e118",
"size": "1903",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Python/max-increase-to-keep-city-skyline.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "1008761"
},
{
"name": "Go",
"bytes": "1907"
},
{
"name": "Java",
"bytes": "8367"
},
{
"name": "Python",
"bytes": "1421980"
},
{
"name": "SQLPL",
"bytes": "822"
},
{
"name": "Shell",
"bytes": "3218"
}
],
"symlink_target": ""
}
|
"""This code example creates a new creative wrapper.
Creative wrappers must be associated with a LabelType.CREATIVE_WRAPPER label and
applied to ad units by AdUnit.appliedLabels. To determine which creative
wrappers exist, run get_all_creative_wrappers.py
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
# Import appropriate modules from the client library.
from googleads import ad_manager
LABEL_ID = 'INSERT_CREATIVE_WRAPPER_LABEL_ID_HERE'
def main(client, label_id):
# Initialize appropriate service.
creative_wrapper_service = client.GetService('CreativeWrapperService',
version='v202205')
# Create creative wrapper objects.
creative_wrapper = {
# A label can only be associated with one creative wrapper.
'labelId': label_id,
'ordering': 'INNER',
'htmlHeader': '<b>My creative wrapper header</b>',
'htmlFooter': '<b>My creative wrapper footer</b>'
}
# Add creative wrapper.
creative_wrappers = creative_wrapper_service.createCreativeWrappers(
[creative_wrapper])
# Display results.
for creative_wrapper in creative_wrappers:
print('Creative wrapper with ID "%s" applying to label "%s" was '
'created.' % (creative_wrapper['id'], creative_wrapper['labelId']))
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client, LABEL_ID)
|
{
"content_hash": "9fa95743dbabef8a5c9a82b75122a06b",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 80,
"avg_line_length": 35.12765957446808,
"alnum_prop": 0.7129012719563901,
"repo_name": "googleads/googleads-python-lib",
"id": "dca07888baf2b8fecc1d7862bda93778d51bd173",
"size": "2273",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "examples/ad_manager/v202205/creative_wrapper_service/create_creative_wrappers.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "403821"
}
],
"symlink_target": ""
}
|
import logging
import os
import sys
import time
import numpy as np
import pytest
import ray
import ray.cluster_utils
from ray._private.test_utils import RayTestTimeoutException, wait_for_condition
logger = logging.getLogger(__name__)
def test_gpu_ids(shutdown_only):
num_gpus = 3
ray.init(num_cpus=num_gpus, num_gpus=num_gpus)
def get_gpu_ids(num_gpus_per_worker):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == num_gpus_per_worker
assert os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids] # noqa
)
for gpu_id in gpu_ids:
assert gpu_id in range(num_gpus)
return gpu_ids
f0 = ray.remote(num_gpus=0)(lambda: get_gpu_ids(0))
f1 = ray.remote(num_gpus=1)(lambda: get_gpu_ids(1))
f2 = ray.remote(num_gpus=2)(lambda: get_gpu_ids(2))
# Wait for all workers to start up.
@ray.remote
def f():
time.sleep(0.2)
return os.getpid()
start_time = time.time()
while True:
num_workers_started = len(set(ray.get([f.remote() for _ in range(num_gpus)])))
if num_workers_started == num_gpus:
break
if time.time() > start_time + 10:
raise RayTestTimeoutException(
"Timed out while waiting for workers to start up."
)
list_of_ids = ray.get([f0.remote() for _ in range(10)])
assert list_of_ids == 10 * [[]]
ray.get([f1.remote() for _ in range(10)])
ray.get([f2.remote() for _ in range(10)])
# Test that actors have CUDA_VISIBLE_DEVICES set properly.
@ray.remote
class Actor0:
def __init__(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 0
assert os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids] # noqa
)
# Set self.x to make sure that we got here.
self.x = 1
def test(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 0
assert os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]
)
return self.x
@ray.remote(num_gpus=1)
class Actor1:
def __init__(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
assert os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]
)
# Set self.x to make sure that we got here.
self.x = 1
def test(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
assert os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]
)
return self.x
a0 = Actor0.remote()
ray.get(a0.test.remote())
a1 = Actor1.remote()
ray.get(a1.test.remote())
def test_zero_cpus(shutdown_only):
ray.init(num_cpus=0)
# We should be able to execute a task that requires 0 CPU resources.
@ray.remote(num_cpus=0)
def f():
return 1
ray.get(f.remote())
# We should be able to create an actor that requires 0 CPU resources.
@ray.remote(num_cpus=0)
class Actor:
def method(self):
pass
a = Actor.remote()
x = a.method.remote()
ray.get(x)
def test_zero_cpus_actor(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(num_cpus=0)
valid_node = cluster.add_node(num_cpus=2)
ray.init(address=cluster.address)
@ray.remote
class Foo:
def method(self):
return ray._private.worker.global_worker.node.unique_id
# Make sure tasks and actors run on the remote raylet.
a = Foo.remote()
assert valid_node.unique_id == ray.get(a.method.remote())
def test_fractional_resources(shutdown_only):
ray.init(num_cpus=6, num_gpus=3, resources={"Custom": 1})
@ray.remote(num_gpus=0.5)
class Foo1:
def method(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
return gpu_ids[0]
foos = [Foo1.remote() for _ in range(6)]
gpu_ids = ray.get([f.method.remote() for f in foos])
for i in range(3):
assert gpu_ids.count(i) == 2
del foos
@ray.remote
class Foo2:
def method(self):
pass
# Create an actor that requires 0.7 of the custom resource.
f1 = Foo2._remote([], {}, resources={"Custom": 0.7})
ray.get(f1.method.remote())
# Make sure that we cannot create an actor that requires 0.7 of the
# custom resource. TODO(rkn): Re-enable this once ray.wait is
# implemented.
f2 = Foo2._remote([], {}, resources={"Custom": 0.7})
ready, _ = ray.wait([f2.method.remote()], timeout=0.5)
assert len(ready) == 0
# Make sure we can start an actor that requries only 0.3 of the custom
# resource.
f3 = Foo2._remote([], {}, resources={"Custom": 0.3})
ray.get(f3.method.remote())
del f1, f3
# Make sure that we get exceptions if we submit tasks that require a
# fractional number of resources greater than 1.
@ray.remote(num_cpus=1.5)
def test():
pass
with pytest.raises(ValueError):
test.remote()
with pytest.raises(ValueError):
Foo2._remote([], {}, resources={"Custom": 1.5})
def test_fractional_memory_round_down(shutdown_only):
@ray.remote
def test():
pass
with ray.init(num_cpus=1, _memory=2):
ray.get(test.options(memory=2.9).remote(), timeout=2)
with ray.init(num_cpus=1, _memory=0.2):
ray.get(test.options(memory=0.5).remote(), timeout=2)
with ray.init(num_cpus=1, _memory=2.2):
ray.get(test.options(memory=2.9).remote(), timeout=2)
with pytest.raises(ray.exceptions.GetTimeoutError):
ray.get(test.options(memory=3.1).remote(), timeout=2)
def test_multiple_raylets(ray_start_cluster):
# This test will define a bunch of tasks that can only be assigned to
# specific raylets, and we will check that they are assigned
# to the correct raylets.
cluster = ray_start_cluster
cluster.add_node(num_cpus=11, num_gpus=0)
cluster.add_node(num_cpus=5, num_gpus=5)
cluster.add_node(num_cpus=10, num_gpus=1)
ray.init(address=cluster.address)
cluster.wait_for_nodes()
# Define a bunch of remote functions that all return the socket name of
# the plasma store. Since there is a one-to-one correspondence between
# plasma stores and raylets (at least right now), this can be
# used to identify which raylet the task was assigned to.
# This must be run on the zeroth raylet.
@ray.remote(num_cpus=11)
def run_on_0():
return ray._private.worker.global_worker.node.plasma_store_socket_name
# This must be run on the first raylet.
@ray.remote(num_gpus=2)
def run_on_1():
return ray._private.worker.global_worker.node.plasma_store_socket_name
# This must be run on the second raylet.
@ray.remote(num_cpus=6, num_gpus=1)
def run_on_2():
return ray._private.worker.global_worker.node.plasma_store_socket_name
# This can be run anywhere.
@ray.remote(num_cpus=0, num_gpus=0)
def run_on_0_1_2():
return ray._private.worker.global_worker.node.plasma_store_socket_name
# This must be run on the first or second raylet.
@ray.remote(num_gpus=1)
def run_on_1_2():
return ray._private.worker.global_worker.node.plasma_store_socket_name
# This must be run on the zeroth or second raylet.
@ray.remote(num_cpus=8)
def run_on_0_2():
return ray._private.worker.global_worker.node.plasma_store_socket_name
def run_lots_of_tasks():
names = []
results = []
for i in range(100):
index = np.random.randint(6)
if index == 0:
names.append("run_on_0")
results.append(run_on_0.remote())
elif index == 1:
names.append("run_on_1")
results.append(run_on_1.remote())
elif index == 2:
names.append("run_on_2")
results.append(run_on_2.remote())
elif index == 3:
names.append("run_on_0_1_2")
results.append(run_on_0_1_2.remote())
elif index == 4:
names.append("run_on_1_2")
results.append(run_on_1_2.remote())
elif index == 5:
names.append("run_on_0_2")
results.append(run_on_0_2.remote())
return names, results
client_table = ray.nodes()
store_names = []
store_names += [
client["ObjectStoreSocketName"]
for client in client_table
if client["Resources"].get("GPU", 0) == 0
]
store_names += [
client["ObjectStoreSocketName"]
for client in client_table
if client["Resources"].get("GPU", 0) == 5
]
store_names += [
client["ObjectStoreSocketName"]
for client in client_table
if client["Resources"].get("GPU", 0) == 1
]
assert len(store_names) == 3
def validate_names_and_results(names, results):
for name, result in zip(names, ray.get(results)):
if name == "run_on_0":
assert result in [store_names[0]]
elif name == "run_on_1":
assert result in [store_names[1]]
elif name == "run_on_2":
assert result in [store_names[2]]
elif name == "run_on_0_1_2":
assert result in [store_names[0], store_names[1], store_names[2]]
elif name == "run_on_1_2":
assert result in [store_names[1], store_names[2]]
elif name == "run_on_0_2":
assert result in [store_names[0], store_names[2]]
else:
raise Exception("This should be unreachable.")
assert set(ray.get(results)) == set(store_names)
names, results = run_lots_of_tasks()
validate_names_and_results(names, results)
# Make sure the same thing works when this is nested inside of a task.
@ray.remote
def run_nested1():
names, results = run_lots_of_tasks()
return names, results
@ray.remote
def run_nested2():
names, results = ray.get(run_nested1.remote())
return names, results
names, results = ray.get(run_nested2.remote())
validate_names_and_results(names, results)
def test_custom_resources(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(num_cpus=1, resources={"CustomResource": 0})
custom_resource_node = cluster.add_node(num_cpus=1, resources={"CustomResource": 1})
ray.init(address=cluster.address)
@ray.remote
def f():
return ray._private.worker.global_worker.node.unique_id
@ray.remote(resources={"CustomResource": 1})
def g():
return ray._private.worker.global_worker.node.unique_id
@ray.remote(resources={"CustomResource": 1})
def h():
ray.get([f.remote() for _ in range(5)])
return ray._private.worker.global_worker.node.unique_id
# The g tasks should be scheduled only on the second raylet.
raylet_ids = set(ray.get([g.remote() for _ in range(50)]))
assert len(raylet_ids) == 1
assert list(raylet_ids)[0] == custom_resource_node.unique_id
# Make sure that resource bookkeeping works when a task that uses a
# custom resources gets blocked.
ray.get([h.remote() for _ in range(5)])
def test_node_id_resource(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(num_cpus=3)
cluster.add_node(num_cpus=3)
ray.init(address=cluster.address)
local_node = ray._private.state.current_node_id()
# Note that these will have the same IP in the test cluster
assert len(ray._private.state.node_ids()) == 2
assert local_node in ray._private.state.node_ids()
@ray.remote(resources={local_node: 1})
def f():
return ray._private.state.current_node_id()
# Check the node id resource is automatically usable for scheduling.
assert ray.get(f.remote()) == ray._private.state.current_node_id()
def test_two_custom_resources(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(num_cpus=3, resources={"CustomResource1": 1, "CustomResource2": 2})
custom_resource_node = cluster.add_node(
num_cpus=3, resources={"CustomResource1": 3, "CustomResource2": 4}
)
ray.init(address=cluster.address)
@ray.remote
def foo():
# Sleep a while to emulate a slow operation. This is needed to make
# sure tasks are scheduled to different nodes.
time.sleep(0.1)
return ray._private.worker.global_worker.node.unique_id
# Make sure each node has at least one idle worker.
wait_for_condition(lambda: len(set(ray.get([foo.remote() for _ in range(6)]))) == 2)
# Make sure the resource view is refreshed.
time.sleep(1)
@ray.remote(resources={"CustomResource1": 1})
def f():
time.sleep(0.001)
return ray._private.worker.global_worker.node.unique_id
@ray.remote(resources={"CustomResource2": 1})
def g():
time.sleep(0.001)
return ray._private.worker.global_worker.node.unique_id
@ray.remote(resources={"CustomResource1": 1, "CustomResource2": 3})
def h():
time.sleep(0.001)
return ray._private.worker.global_worker.node.unique_id
@ray.remote(resources={"CustomResource1": 4})
def j():
time.sleep(0.001)
return ray._private.worker.global_worker.node.unique_id
@ray.remote(resources={"CustomResource3": 1})
def k():
time.sleep(0.001)
return ray._private.worker.global_worker.node.unique_id
# The f and g tasks should be scheduled on both raylets.
assert len(set(ray.get([f.remote() for _ in range(500)]))) == 2
assert len(set(ray.get([g.remote() for _ in range(500)]))) == 2
# The h tasks should be scheduled only on the second raylet.
raylet_ids = set(ray.get([h.remote() for _ in range(50)]))
assert len(raylet_ids) == 1
assert list(raylet_ids)[0] == custom_resource_node.unique_id
# Make sure that tasks with unsatisfied custom resource requirements do
# not get scheduled.
ready_ids, remaining_ids = ray.wait([j.remote(), k.remote()], timeout=0.5)
assert ready_ids == []
def test_many_custom_resources(shutdown_only):
# This eventually turns into a command line argument which on windows is
# limited to 32,767 characters.
if sys.platform == "win32":
num_custom_resources = 4000
else:
num_custom_resources = 10000
total_resources = {
str(i): np.random.randint(1, 7) for i in range(num_custom_resources) # noqa
}
ray.init(num_cpus=5, resources=total_resources)
def f():
return 1
remote_functions = []
for _ in range(20):
num_resources = np.random.randint(0, num_custom_resources + 1)
permuted_resources = np.random.permutation(num_custom_resources)[:num_resources]
random_resources = {str(i): total_resources[str(i)] for i in permuted_resources}
remote_function = ray.remote(resources=random_resources)(f)
remote_functions.append(remote_function)
remote_functions.append(ray.remote(f))
remote_functions.append(ray.remote(resources=total_resources)(f))
results = []
for remote_function in remote_functions:
results.append(remote_function.remote())
results.append(remote_function.remote())
results.append(remote_function.remote())
ray.get(results)
# TODO: 5 retry attempts may be too little for Travis and we may need to
# increase it if this test begins to be flaky on Travis.
def test_zero_capacity_deletion_semantics(shutdown_only):
ray.init(num_cpus=2, num_gpus=1, resources={"test_resource": 1})
def delete_miscellaneous_item(resources):
del resources["memory"]
del resources["object_store_memory"]
for key in list(resources.keys()):
if key.startswith("node:"):
del resources[key]
def test():
resources = ray.available_resources()
MAX_RETRY_ATTEMPTS = 5
retry_count = 0
delete_miscellaneous_item(resources)
while resources and retry_count < MAX_RETRY_ATTEMPTS:
time.sleep(0.1)
resources = ray.available_resources()
delete_miscellaneous_item(resources)
retry_count += 1
if retry_count >= MAX_RETRY_ATTEMPTS:
raise RuntimeError(
"Resources were available even after {} retries.".format(
MAX_RETRY_ATTEMPTS
),
resources,
)
return resources
function = ray.remote(num_cpus=2, num_gpus=1, resources={"test_resource": 1})(test)
cluster_resources = ray.get(function.remote())
# All cluster resources should be utilized and
# cluster_resources must be empty
assert cluster_resources == {}
if __name__ == "__main__":
if os.environ.get("PARALLEL_CI"):
sys.exit(pytest.main(["-n", "auto", "--boxed", "-vs", __file__]))
else:
sys.exit(pytest.main(["-sv", __file__]))
|
{
"content_hash": "2deae8967a6f6662b93e06ed7aefaade",
"timestamp": "",
"source": "github",
"line_count": 526,
"max_line_length": 88,
"avg_line_length": 32.90114068441065,
"alnum_prop": 0.6050502715821102,
"repo_name": "ray-project/ray",
"id": "e1086c6e83f853ce9181685ad3e6cedd4df00e64",
"size": "17322",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/ray/tests/test_advanced_2.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "37490"
},
{
"name": "C++",
"bytes": "5972422"
},
{
"name": "CSS",
"bytes": "10912"
},
{
"name": "Cython",
"bytes": "227477"
},
{
"name": "Dockerfile",
"bytes": "20210"
},
{
"name": "HTML",
"bytes": "30382"
},
{
"name": "Java",
"bytes": "1160849"
},
{
"name": "JavaScript",
"bytes": "1128"
},
{
"name": "Jinja",
"bytes": "6371"
},
{
"name": "Jupyter Notebook",
"bytes": "1615"
},
{
"name": "Makefile",
"bytes": "234"
},
{
"name": "PowerShell",
"bytes": "1114"
},
{
"name": "Python",
"bytes": "19539109"
},
{
"name": "Shell",
"bytes": "134583"
},
{
"name": "Starlark",
"bytes": "334862"
},
{
"name": "TypeScript",
"bytes": "190599"
}
],
"symlink_target": ""
}
|
'''
@author: Michael Vorotyntsev
@email: linkofwise@gmail.com
@github: unaxfromsibiria
'''
import codecs
import logging
import json
from enum import Enum
class ClientGroupEnum(Enum):
Service = 1
Server = 2
Manager = 3
class Configuration(dict):
_logger = None
def __init__(self, **source):
self.update(source)
self._logger = logging.getLogger(self.get('logger'))
@property
def port(self):
return self.get('port')
@property
def host(self):
return self.get('host')
@property
def logger(self):
return self._logger
@property
def is_completed(self):
return True
@property
def worker_count(self):
return int(self.get('workers') or 1)
@property
def manage_worker_count(self):
return int(self.get('manage_workers') or 1)
@property
def reconnect_delay(self):
return float(self.get('reconnect_delay') or 2)
class JsonFileConfiguration(Configuration):
def __init__(self, path):
with codecs.open(path) as conf:
json_text = conf.read_all()
data = json.loads(json_text)
assert isinstance(data, dict)
super().__init__(**data)
|
{
"content_hash": "fd82c372ca9531c5185256ee33cb4448",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 60,
"avg_line_length": 19.919354838709676,
"alnum_prop": 0.6089068825910932,
"repo_name": "unaxfromsibiria/roombnode",
"id": "2c4f89caf329535077a66df324416839c7e726b0",
"size": "1235",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/roombnode/common.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "26931"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from datetime import datetime, timedelta
import gettext
from importlib import import_module
import os
from unittest import TestCase, skipIf
try:
import pytz
except ImportError:
pytz = None
from django import forms
from django.conf import settings
from django.contrib import admin
from django.contrib.admin import widgets
from django.contrib.admin.tests import AdminSeleniumWebDriverTestCase
from django.core.files.storage import default_storage
from django.core.files.uploadedfile import SimpleUploadedFile
from django.db.models import CharField, DateField
from django.test import TestCase as DjangoTestCase
from django.test import override_settings
from django.utils import six
from django.utils import translation
from . import models
from .widgetadmin import site as widget_admin_site
admin_static_prefix = lambda: {
'ADMIN_STATIC_PREFIX': "%sadmin/" % settings.STATIC_URL,
}
class AdminFormfieldForDBFieldTests(TestCase):
"""
Tests for correct behavior of ModelAdmin.formfield_for_dbfield
"""
def assertFormfield(self, model, fieldname, widgetclass, **admin_overrides):
"""
Helper to call formfield_for_dbfield for a given model and field name
and verify that the returned formfield is appropriate.
"""
# Override any settings on the model admin
class MyModelAdmin(admin.ModelAdmin):
pass
for k in admin_overrides:
setattr(MyModelAdmin, k, admin_overrides[k])
# Construct the admin, and ask it for a formfield
ma = MyModelAdmin(model, admin.site)
ff = ma.formfield_for_dbfield(model._meta.get_field(fieldname), request=None)
# "unwrap" the widget wrapper, if needed
if isinstance(ff.widget, widgets.RelatedFieldWidgetWrapper):
widget = ff.widget.widget
else:
widget = ff.widget
# Check that we got a field of the right type
self.assertTrue(
isinstance(widget, widgetclass),
"Wrong widget for %s.%s: expected %s, got %s" % (
model.__class__.__name__,
fieldname,
widgetclass,
type(widget),
)
)
# Return the formfield so that other tests can continue
return ff
def test_DateField(self):
self.assertFormfield(models.Event, 'start_date', widgets.AdminDateWidget)
def test_DateTimeField(self):
self.assertFormfield(models.Member, 'birthdate', widgets.AdminSplitDateTime)
def test_TimeField(self):
self.assertFormfield(models.Event, 'start_time', widgets.AdminTimeWidget)
def test_TextField(self):
self.assertFormfield(models.Event, 'description', widgets.AdminTextareaWidget)
def test_URLField(self):
self.assertFormfield(models.Event, 'link', widgets.AdminURLFieldWidget)
def test_IntegerField(self):
self.assertFormfield(models.Event, 'min_age', widgets.AdminIntegerFieldWidget)
def test_CharField(self):
self.assertFormfield(models.Member, 'name', widgets.AdminTextInputWidget)
def test_EmailField(self):
self.assertFormfield(models.Member, 'email', widgets.AdminEmailInputWidget)
def test_FileField(self):
self.assertFormfield(models.Album, 'cover_art', widgets.AdminFileWidget)
def test_ForeignKey(self):
self.assertFormfield(models.Event, 'main_band', forms.Select)
def test_raw_id_ForeignKey(self):
self.assertFormfield(models.Event, 'main_band', widgets.ForeignKeyRawIdWidget,
raw_id_fields=['main_band'])
def test_radio_fields_ForeignKey(self):
ff = self.assertFormfield(models.Event, 'main_band', widgets.AdminRadioSelect,
radio_fields={'main_band': admin.VERTICAL})
self.assertEqual(ff.empty_label, None)
def test_many_to_many(self):
self.assertFormfield(models.Band, 'members', forms.SelectMultiple)
def test_raw_id_many_to_many(self):
self.assertFormfield(models.Band, 'members', widgets.ManyToManyRawIdWidget,
raw_id_fields=['members'])
def test_filtered_many_to_many(self):
self.assertFormfield(models.Band, 'members', widgets.FilteredSelectMultiple,
filter_vertical=['members'])
def test_formfield_overrides(self):
self.assertFormfield(models.Event, 'start_date', forms.TextInput,
formfield_overrides={DateField: {'widget': forms.TextInput}})
def test_formfield_overrides_widget_instances(self):
"""
Test that widget instances in formfield_overrides are not shared between
different fields. (#19423)
"""
class BandAdmin(admin.ModelAdmin):
formfield_overrides = {
CharField: {'widget': forms.TextInput(attrs={'size': '10'})}
}
ma = BandAdmin(models.Band, admin.site)
f1 = ma.formfield_for_dbfield(models.Band._meta.get_field('name'), request=None)
f2 = ma.formfield_for_dbfield(models.Band._meta.get_field('style'), request=None)
self.assertNotEqual(f1.widget, f2.widget)
self.assertEqual(f1.widget.attrs['maxlength'], '100')
self.assertEqual(f2.widget.attrs['maxlength'], '20')
self.assertEqual(f2.widget.attrs['size'], '10')
def test_field_with_choices(self):
self.assertFormfield(models.Member, 'gender', forms.Select)
def test_choices_with_radio_fields(self):
self.assertFormfield(models.Member, 'gender', widgets.AdminRadioSelect,
radio_fields={'gender': admin.VERTICAL})
def test_inheritance(self):
self.assertFormfield(models.Album, 'backside_art', widgets.AdminFileWidget)
def test_m2m_widgets(self):
"""m2m fields help text as it applies to admin app (#9321)."""
class AdvisorAdmin(admin.ModelAdmin):
filter_vertical = ['companies']
self.assertFormfield(models.Advisor, 'companies', widgets.FilteredSelectMultiple,
filter_vertical=['companies'])
ma = AdvisorAdmin(models.Advisor, admin.site)
f = ma.formfield_for_dbfield(models.Advisor._meta.get_field('companies'), request=None)
self.assertEqual(six.text_type(f.help_text), 'Hold down "Control", or "Command" on a Mac, to select more than one.')
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF='admin_widgets.urls')
class AdminFormfieldForDBFieldWithRequestTests(DjangoTestCase):
fixtures = ["admin-widgets-users.xml"]
def test_filter_choices_by_request_user(self):
"""
Ensure the user can only see their own cars in the foreign key dropdown.
"""
self.client.login(username="super", password="secret")
response = self.client.get("/admin_widgets/cartire/add/")
self.assertNotContains(response, "BMW M3")
self.assertContains(response, "Volkswagon Passat")
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF='admin_widgets.urls')
class AdminForeignKeyWidgetChangeList(DjangoTestCase):
fixtures = ["admin-widgets-users.xml"]
def setUp(self):
self.client.login(username="super", password="secret")
def test_changelist_ForeignKey(self):
response = self.client.get('/admin_widgets/car/')
self.assertContains(response, '/auth/user/add/')
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF='admin_widgets.urls')
class AdminForeignKeyRawIdWidget(DjangoTestCase):
fixtures = ["admin-widgets-users.xml"]
def setUp(self):
self.client.login(username="super", password="secret")
def test_nonexistent_target_id(self):
band = models.Band.objects.create(name='Bogey Blues')
pk = band.pk
band.delete()
post_data = {
"main_band": '%s' % pk,
}
# Try posting with a non-existent pk in a raw id field: this
# should result in an error message, not a server exception.
response = self.client.post('/admin_widgets/event/add/', post_data)
self.assertContains(response,
'Select a valid choice. That choice is not one of the available choices.')
def test_invalid_target_id(self):
for test_str in ('Iñtërnâtiônàlizætiøn', "1234'", -1234):
# This should result in an error message, not a server exception.
response = self.client.post('/admin_widgets/event/add/',
{"main_band": test_str})
self.assertContains(response,
'Select a valid choice. That choice is not one of the available choices.')
def test_url_params_from_lookup_dict_any_iterable(self):
lookup1 = widgets.url_params_from_lookup_dict({'color__in': ('red', 'blue')})
lookup2 = widgets.url_params_from_lookup_dict({'color__in': ['red', 'blue']})
self.assertEqual(lookup1, {'color__in': 'red,blue'})
self.assertEqual(lookup1, lookup2)
def test_url_params_from_lookup_dict_callable(self):
def my_callable():
return 'works'
lookup1 = widgets.url_params_from_lookup_dict({'myfield': my_callable})
lookup2 = widgets.url_params_from_lookup_dict({'myfield': my_callable()})
self.assertEqual(lookup1, lookup2)
class FilteredSelectMultipleWidgetTest(DjangoTestCase):
def test_render(self):
w = widgets.FilteredSelectMultiple('test', False)
self.assertHTMLEqual(
w.render('test', 'test'),
'<select multiple="multiple" name="test" class="selectfilter">\n</select><script type="text/javascript">addEvent(window, "load", function(e) {SelectFilter.init("id_test", "test", 0, "%(ADMIN_STATIC_PREFIX)s"); });</script>\n' % admin_static_prefix()
)
def test_stacked_render(self):
w = widgets.FilteredSelectMultiple('test', True)
self.assertHTMLEqual(
w.render('test', 'test'),
'<select multiple="multiple" name="test" class="selectfilterstacked">\n</select><script type="text/javascript">addEvent(window, "load", function(e) {SelectFilter.init("id_test", "test", 1, "%(ADMIN_STATIC_PREFIX)s"); });</script>\n' % admin_static_prefix()
)
class AdminDateWidgetTest(DjangoTestCase):
def test_attrs(self):
"""
Ensure that user-supplied attrs are used.
Refs #12073.
"""
w = widgets.AdminDateWidget()
self.assertHTMLEqual(
w.render('test', datetime(2007, 12, 1, 9, 30)),
'<input value="2007-12-01" type="text" class="vDateField" name="test" size="10" />',
)
# pass attrs to widget
w = widgets.AdminDateWidget(attrs={'size': 20, 'class': 'myDateField'})
self.assertHTMLEqual(
w.render('test', datetime(2007, 12, 1, 9, 30)),
'<input value="2007-12-01" type="text" class="myDateField" name="test" size="20" />',
)
class AdminTimeWidgetTest(DjangoTestCase):
def test_attrs(self):
"""
Ensure that user-supplied attrs are used.
Refs #12073.
"""
w = widgets.AdminTimeWidget()
self.assertHTMLEqual(
w.render('test', datetime(2007, 12, 1, 9, 30)),
'<input value="09:30:00" type="text" class="vTimeField" name="test" size="8" />',
)
# pass attrs to widget
w = widgets.AdminTimeWidget(attrs={'size': 20, 'class': 'myTimeField'})
self.assertHTMLEqual(
w.render('test', datetime(2007, 12, 1, 9, 30)),
'<input value="09:30:00" type="text" class="myTimeField" name="test" size="20" />',
)
class AdminSplitDateTimeWidgetTest(DjangoTestCase):
def test_render(self):
w = widgets.AdminSplitDateTime()
self.assertHTMLEqual(
w.render('test', datetime(2007, 12, 1, 9, 30)),
'<p class="datetime">Date: <input value="2007-12-01" type="text" class="vDateField" name="test_0" size="10" /><br />Time: <input value="09:30:00" type="text" class="vTimeField" name="test_1" size="8" /></p>',
)
def test_localization(self):
w = widgets.AdminSplitDateTime()
with self.settings(USE_L10N=True), translation.override('de-at'):
w.is_localized = True
self.assertHTMLEqual(
w.render('test', datetime(2007, 12, 1, 9, 30)),
'<p class="datetime">Datum: <input value="01.12.2007" type="text" class="vDateField" name="test_0" size="10" /><br />Zeit: <input value="09:30:00" type="text" class="vTimeField" name="test_1" size="8" /></p>',
)
class AdminURLWidgetTest(DjangoTestCase):
def test_render(self):
w = widgets.AdminURLFieldWidget()
self.assertHTMLEqual(
w.render('test', ''),
'<input class="vURLField" name="test" type="url" />'
)
self.assertHTMLEqual(
w.render('test', 'http://example.com'),
'<p class="url">Currently:<a href="http://example.com">http://example.com</a><br />Change:<input class="vURLField" name="test" type="url" value="http://example.com" /></p>'
)
def test_render_idn(self):
w = widgets.AdminURLFieldWidget()
self.assertHTMLEqual(
w.render('test', 'http://example-äüö.com'),
'<p class="url">Currently: <a href="http://xn--example--7za4pnc.com">http://example-äüö.com</a><br />Change:<input class="vURLField" name="test" type="url" value="http://example-äüö.com" /></p>'
)
def test_render_quoting(self):
# WARNING: Don't use assertHTMLEqual in that testcase!
# assertHTMLEqual will get rid of some escapes which are tested here!
w = widgets.AdminURLFieldWidget()
self.assertEqual(
w.render('test', 'http://example.com/<sometag>some text</sometag>'),
'<p class="url">Currently: <a href="http://example.com/%3Csometag%3Esome%20text%3C/sometag%3E">http://example.com/<sometag>some text</sometag></a><br />Change: <input class="vURLField" name="test" type="url" value="http://example.com/<sometag>some text</sometag>" /></p>'
)
self.assertEqual(
w.render('test', 'http://example-äüö.com/<sometag>some text</sometag>'),
'<p class="url">Currently: <a href="http://xn--example--7za4pnc.com/%3Csometag%3Esome%20text%3C/sometag%3E">http://example-äüö.com/<sometag>some text</sometag></a><br />Change: <input class="vURLField" name="test" type="url" value="http://example-äüö.com/<sometag>some text</sometag>" /></p>'
)
self.assertEqual(
w.render('test', 'http://www.example.com/%C3%A4"><script>alert("XSS!")</script>"'),
'<p class="url">Currently: <a href="http://www.example.com/%C3%A4%22%3E%3Cscript%3Ealert(%22XSS!%22)%3C/script%3E%22">http://www.example.com/%C3%A4"><script>alert("XSS!")</script>"</a><br />Change: <input class="vURLField" name="test" type="url" value="http://www.example.com/%C3%A4"><script>alert("XSS!")</script>"" /></p>'
)
class AdminFileWidgetTest(DjangoTestCase):
def test_render(self):
band = models.Band.objects.create(name='Linkin Park')
album = band.album_set.create(
name='Hybrid Theory', cover_art=r'albums\hybrid_theory.jpg'
)
w = widgets.AdminFileWidget()
self.assertHTMLEqual(
w.render('test', album.cover_art),
'<p class="file-upload">Currently: <a href="%(STORAGE_URL)salbums/hybrid_theory.jpg">albums\hybrid_theory.jpg</a> <span class="clearable-file-input"><input type="checkbox" name="test-clear" id="test-clear_id" /> <label for="test-clear_id">Clear</label></span><br />Change: <input type="file" name="test" /></p>' % {
'STORAGE_URL': default_storage.url('')
},
)
self.assertHTMLEqual(
w.render('test', SimpleUploadedFile('test', b'content')),
'<input type="file" name="test" />',
)
@override_settings(ROOT_URLCONF='admin_widgets.urls')
class ForeignKeyRawIdWidgetTest(DjangoTestCase):
def test_render(self):
band = models.Band.objects.create(name='Linkin Park')
band.album_set.create(
name='Hybrid Theory', cover_art=r'albums\hybrid_theory.jpg'
)
rel = models.Album._meta.get_field('band').rel
w = widgets.ForeignKeyRawIdWidget(rel, widget_admin_site)
self.assertHTMLEqual(
w.render('test', band.pk, attrs={}), (
'<input type="text" name="test" value="%(bandpk)s" class="vForeignKeyRawIdAdminField" />'
'<a href="/admin_widgets/band/?_to_field=id" class="related-lookup" id="lookup_id_test" title="Lookup"></a>'
' <strong>Linkin Park</strong>'
) % {'bandpk': band.pk}
)
def test_relations_to_non_primary_key(self):
# Check that ForeignKeyRawIdWidget works with fields which aren't
# related to the model's primary key.
apple = models.Inventory.objects.create(barcode=86, name='Apple')
models.Inventory.objects.create(barcode=22, name='Pear')
core = models.Inventory.objects.create(
barcode=87, name='Core', parent=apple
)
rel = models.Inventory._meta.get_field('parent').rel
w = widgets.ForeignKeyRawIdWidget(rel, widget_admin_site)
self.assertHTMLEqual(
w.render('test', core.parent_id, attrs={}), (
'<input type="text" name="test" value="86" class="vForeignKeyRawIdAdminField" />'
'<a href="/admin_widgets/inventory/?_to_field=barcode" class="related-lookup" id="lookup_id_test" title="Lookup">'
'</a> <strong>Apple</strong>'
)
)
def test_fk_related_model_not_in_admin(self):
# FK to a model not registered with admin site. Raw ID widget should
# have no magnifying glass link. See #16542
big_honeycomb = models.Honeycomb.objects.create(location='Old tree')
big_honeycomb.bee_set.create()
rel = models.Bee._meta.get_field('honeycomb').rel
w = widgets.ForeignKeyRawIdWidget(rel, widget_admin_site)
self.assertHTMLEqual(
w.render('honeycomb_widget', big_honeycomb.pk, attrs={}),
'<input type="text" name="honeycomb_widget" value="%(hcombpk)s" /> <strong>Honeycomb object</strong>' % {'hcombpk': big_honeycomb.pk}
)
def test_fk_to_self_model_not_in_admin(self):
# FK to self, not registered with admin site. Raw ID widget should have
# no magnifying glass link. See #16542
subject1 = models.Individual.objects.create(name='Subject #1')
models.Individual.objects.create(name='Child', parent=subject1)
rel = models.Individual._meta.get_field('parent').rel
w = widgets.ForeignKeyRawIdWidget(rel, widget_admin_site)
self.assertHTMLEqual(
w.render('individual_widget', subject1.pk, attrs={}),
'<input type="text" name="individual_widget" value="%(subj1pk)s" /> <strong>Individual object</strong>' % {'subj1pk': subject1.pk}
)
def test_proper_manager_for_label_lookup(self):
# see #9258
rel = models.Inventory._meta.get_field('parent').rel
w = widgets.ForeignKeyRawIdWidget(rel, widget_admin_site)
hidden = models.Inventory.objects.create(
barcode=93, name='Hidden', hidden=True
)
child_of_hidden = models.Inventory.objects.create(
barcode=94, name='Child of hidden', parent=hidden
)
self.assertHTMLEqual(
w.render('test', child_of_hidden.parent_id, attrs={}), (
'<input type="text" name="test" value="93" class="vForeignKeyRawIdAdminField" />'
'<a href="/admin_widgets/inventory/?_to_field=barcode" class="related-lookup" id="lookup_id_test" title="Lookup">'
'</a> <strong>Hidden</strong>'
)
)
@override_settings(ROOT_URLCONF='admin_widgets.urls')
class ManyToManyRawIdWidgetTest(DjangoTestCase):
def test_render(self):
band = models.Band.objects.create(name='Linkin Park')
m1 = models.Member.objects.create(name='Chester')
m2 = models.Member.objects.create(name='Mike')
band.members.add(m1, m2)
rel = models.Band._meta.get_field('members').rel
w = widgets.ManyToManyRawIdWidget(rel, widget_admin_site)
self.assertHTMLEqual(
w.render('test', [m1.pk, m2.pk], attrs={}), (
'<input type="text" name="test" value="%(m1pk)s,%(m2pk)s" class="vManyToManyRawIdAdminField" />'
'<a href="/admin_widgets/member/" class="related-lookup" id="lookup_id_test" title="Lookup"></a>'
) % dict(m1pk=m1.pk, m2pk=m2.pk)
)
self.assertHTMLEqual(
w.render('test', [m1.pk]), (
'<input type="text" name="test" value="%(m1pk)s" class="vManyToManyRawIdAdminField">'
'<a href="/admin_widgets/member/" class="related-lookup" id="lookup_id_test" title="Lookup"></a>'
) % dict(m1pk=m1.pk)
)
def test_m2m_related_model_not_in_admin(self):
# M2M relationship with model not registered with admin site. Raw ID
# widget should have no magnifying glass link. See #16542
consultor1 = models.Advisor.objects.create(name='Rockstar Techie')
c1 = models.Company.objects.create(name='Doodle')
c2 = models.Company.objects.create(name='Pear')
consultor1.companies.add(c1, c2)
rel = models.Advisor._meta.get_field('companies').rel
w = widgets.ManyToManyRawIdWidget(rel, widget_admin_site)
self.assertHTMLEqual(
w.render('company_widget1', [c1.pk, c2.pk], attrs={}),
'<input type="text" name="company_widget1" value="%(c1pk)s,%(c2pk)s" />' % {'c1pk': c1.pk, 'c2pk': c2.pk}
)
self.assertHTMLEqual(
w.render('company_widget2', [c1.pk]),
'<input type="text" name="company_widget2" value="%(c1pk)s" />' % {'c1pk': c1.pk}
)
class RelatedFieldWidgetWrapperTests(DjangoTestCase):
def test_no_can_add_related(self):
rel = models.Individual._meta.get_field('parent').rel
w = widgets.AdminRadioSelect()
# Used to fail with a name error.
w = widgets.RelatedFieldWidgetWrapper(w, rel, widget_admin_site)
self.assertFalse(w.can_add_related)
def test_select_multiple_widget_cant_change_delete_related(self):
rel = models.Individual._meta.get_field('parent').rel
widget = forms.SelectMultiple()
wrapper = widgets.RelatedFieldWidgetWrapper(
widget, rel, widget_admin_site,
can_add_related=True,
can_change_related=True,
can_delete_related=True,
)
self.assertTrue(wrapper.can_add_related)
self.assertFalse(wrapper.can_change_related)
self.assertFalse(wrapper.can_delete_related)
def test_on_delete_cascade_rel_cant_delete_related(self):
rel = models.Individual._meta.get_field('soulmate').rel
widget = forms.Select()
wrapper = widgets.RelatedFieldWidgetWrapper(
widget, rel, widget_admin_site,
can_add_related=True,
can_change_related=True,
can_delete_related=True,
)
self.assertTrue(wrapper.can_add_related)
self.assertTrue(wrapper.can_change_related)
self.assertFalse(wrapper.can_delete_related)
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF='admin_widgets.urls')
class DateTimePickerSeleniumFirefoxTests(AdminSeleniumWebDriverTestCase):
available_apps = ['admin_widgets'] + AdminSeleniumWebDriverTestCase.available_apps
fixtures = ['admin-widgets-users.xml']
webdriver_class = 'selenium.webdriver.firefox.webdriver.WebDriver'
def test_show_hide_date_time_picker_widgets(self):
"""
Ensure that pressing the ESC key closes the date and time picker
widgets.
Refs #17064.
"""
from selenium.webdriver.common.keys import Keys
self.admin_login(username='super', password='secret', login_url='/')
# Open a page that has a date and time picker widgets
self.selenium.get('%s%s' % (self.live_server_url,
'/admin_widgets/member/add/'))
# First, with the date picker widget ---------------------------------
# Check that the date picker is hidden
self.assertEqual(
self.get_css_value('#calendarbox0', 'display'), 'none')
# Click the calendar icon
self.selenium.find_element_by_id('calendarlink0').click()
# Check that the date picker is visible
self.assertEqual(
self.get_css_value('#calendarbox0', 'display'), 'block')
# Press the ESC key
self.selenium.find_element_by_tag_name('body').send_keys([Keys.ESCAPE])
# Check that the date picker is hidden again
self.assertEqual(
self.get_css_value('#calendarbox0', 'display'), 'none')
# Then, with the time picker widget ----------------------------------
# Check that the time picker is hidden
self.assertEqual(
self.get_css_value('#clockbox0', 'display'), 'none')
# Click the time icon
self.selenium.find_element_by_id('clocklink0').click()
# Check that the time picker is visible
self.assertEqual(
self.get_css_value('#clockbox0', 'display'), 'block')
# Press the ESC key
self.selenium.find_element_by_tag_name('body').send_keys([Keys.ESCAPE])
# Check that the time picker is hidden again
self.assertEqual(
self.get_css_value('#clockbox0', 'display'), 'none')
def test_calendar_nonday_class(self):
"""
Ensure cells that are not days of the month have the `nonday` CSS class.
Refs #4574.
"""
self.admin_login(username='super', password='secret', login_url='/')
# Open a page that has a date and time picker widgets
self.selenium.get('%s%s' % (self.live_server_url,
'/admin_widgets/member/add/'))
# fill in the birth date.
self.selenium.find_element_by_id('id_birthdate_0').send_keys('2013-06-01')
# Click the calendar icon
self.selenium.find_element_by_id('calendarlink0').click()
# get all the tds within the calendar
calendar0 = self.selenium.find_element_by_id('calendarin0')
tds = calendar0.find_elements_by_tag_name('td')
# make sure the first and last 6 cells have class nonday
for td in tds[:6] + tds[-6:]:
self.assertEqual(td.get_attribute('class'), 'nonday')
def test_calendar_selected_class(self):
"""
Ensure cell for the day in the input has the `selected` CSS class.
Refs #4574.
"""
self.admin_login(username='super', password='secret', login_url='/')
# Open a page that has a date and time picker widgets
self.selenium.get('%s%s' % (self.live_server_url,
'/admin_widgets/member/add/'))
# fill in the birth date.
self.selenium.find_element_by_id('id_birthdate_0').send_keys('2013-06-01')
# Click the calendar icon
self.selenium.find_element_by_id('calendarlink0').click()
# get all the tds within the calendar
calendar0 = self.selenium.find_element_by_id('calendarin0')
tds = calendar0.find_elements_by_tag_name('td')
# verify the selected cell
selected = tds[6]
self.assertEqual(selected.get_attribute('class'), 'selected')
self.assertEqual(selected.text, '1')
def test_calendar_no_selected_class(self):
"""
Ensure no cells are given the selected class when the field is empty.
Refs #4574.
"""
self.admin_login(username='super', password='secret', login_url='/')
# Open a page that has a date and time picker widgets
self.selenium.get('%s%s' % (self.live_server_url,
'/admin_widgets/member/add/'))
# Click the calendar icon
self.selenium.find_element_by_id('calendarlink0').click()
# get all the tds within the calendar
calendar0 = self.selenium.find_element_by_id('calendarin0')
tds = calendar0.find_elements_by_tag_name('td')
# verify there are no cells with the selected class
selected = [td for td in tds if td.get_attribute('class') == 'selected']
self.assertEqual(len(selected), 0)
def test_calendar_show_date_from_input(self):
"""
Ensure that the calendar show the date from the input field for every
locale supported by django.
"""
self.admin_login(username='super', password='secret', login_url='/')
# Enter test data
member = models.Member.objects.create(name='Bob', birthdate=datetime(1984, 5, 15), gender='M')
# Get month names translations for every locales
month_string = 'January February March April May June July August September October November December'
path = os.path.join(os.path.dirname(import_module('django.contrib.admin').__file__), 'locale')
for language_code, language_name in settings.LANGUAGES:
try:
catalog = gettext.translation('djangojs', path, [language_code])
except IOError:
continue
if month_string in catalog._catalog:
month_names = catalog._catalog[month_string]
else:
month_names = month_string
# Get the expected caption
may_translation = month_names.split(' ')[4]
expected_caption = '{0:s} {1:d}'.format(may_translation, 1984)
# Test with every locale
with override_settings(LANGUAGE_CODE=language_code, USE_L10N=True):
# Open a page that has a date picker widget
self.selenium.get('{}{}'.format(self.live_server_url,
'/admin_widgets/member/{}/'.format(member.pk)))
# Click on the calendar icon
self.selenium.find_element_by_id('calendarlink0').click()
# Get the calendar caption
calendar0 = self.selenium.find_element_by_id('calendarin0')
caption = calendar0.find_element_by_tag_name('caption')
# Make sure that the right month and year are displayed
self.assertEqual(caption.text, expected_caption)
class DateTimePickerSeleniumChromeTests(DateTimePickerSeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.chrome.webdriver.WebDriver'
class DateTimePickerSeleniumIETests(DateTimePickerSeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.ie.webdriver.WebDriver'
@skipIf(pytz is None, "this test requires pytz")
@override_settings(TIME_ZONE='Asia/Singapore')
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF='admin_widgets.urls')
class DateTimePickerShortcutsSeleniumFirefoxTests(AdminSeleniumWebDriverTestCase):
available_apps = ['admin_widgets'] + AdminSeleniumWebDriverTestCase.available_apps
fixtures = ['admin-widgets-users.xml']
webdriver_class = 'selenium.webdriver.firefox.webdriver.WebDriver'
def test_date_time_picker_shortcuts(self):
"""
Ensure that date/time/datetime picker shortcuts work in the current time zone.
Refs #20663.
This test case is fairly tricky, it relies on selenium still running the browser
in the default time zone "America/Chicago" despite `override_settings` changing
the time zone to "Asia/Singapore".
"""
self.admin_login(username='super', password='secret', login_url='/')
error_margin = timedelta(seconds=10)
# If we are neighbouring a DST, we add an hour of error margin.
tz = pytz.timezone('America/Chicago')
utc_now = datetime.now(pytz.utc)
tz_yesterday = (utc_now - timedelta(days=1)).astimezone(tz).tzname()
tz_tomorrow = (utc_now + timedelta(days=1)).astimezone(tz).tzname()
if tz_yesterday != tz_tomorrow:
error_margin += timedelta(hours=1)
now = datetime.now()
self.selenium.get('%s%s' % (self.live_server_url,
'/admin_widgets/member/add/'))
self.selenium.find_element_by_id('id_name').send_keys('test')
# Click on the "today" and "now" shortcuts.
shortcuts = self.selenium.find_elements_by_css_selector(
'.field-birthdate .datetimeshortcuts')
for shortcut in shortcuts:
shortcut.find_element_by_tag_name('a').click()
# Check that there is a time zone mismatch warning.
# Warning: This would effectively fail if the TIME_ZONE defined in the
# settings has the same UTC offset as "Asia/Singapore" because the
# mismatch warning would be rightfully missing from the page.
self.selenium.find_elements_by_css_selector(
'.field-birthdate .timezonewarning')
# Submit the form.
self.selenium.find_element_by_tag_name('form').submit()
self.wait_page_loaded()
# Make sure that "now" in javascript is within 10 seconds
# from "now" on the server side.
member = models.Member.objects.get(name='test')
self.assertGreater(member.birthdate, now - error_margin)
self.assertLess(member.birthdate, now + error_margin)
class DateTimePickerShortcutsSeleniumChromeTests(DateTimePickerShortcutsSeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.chrome.webdriver.WebDriver'
class DateTimePickerShortcutsSeleniumIETests(DateTimePickerShortcutsSeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.ie.webdriver.WebDriver'
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF='admin_widgets.urls')
class HorizontalVerticalFilterSeleniumFirefoxTests(AdminSeleniumWebDriverTestCase):
available_apps = ['admin_widgets'] + AdminSeleniumWebDriverTestCase.available_apps
fixtures = ['admin-widgets-users.xml']
webdriver_class = 'selenium.webdriver.firefox.webdriver.WebDriver'
def setUp(self):
self.lisa = models.Student.objects.create(name='Lisa')
self.john = models.Student.objects.create(name='John')
self.bob = models.Student.objects.create(name='Bob')
self.peter = models.Student.objects.create(name='Peter')
self.jenny = models.Student.objects.create(name='Jenny')
self.jason = models.Student.objects.create(name='Jason')
self.cliff = models.Student.objects.create(name='Cliff')
self.arthur = models.Student.objects.create(name='Arthur')
self.school = models.School.objects.create(name='School of Awesome')
super(HorizontalVerticalFilterSeleniumFirefoxTests, self).setUp()
def assertActiveButtons(self, mode, field_name, choose, remove,
choose_all=None, remove_all=None):
choose_link = '#id_%s_add_link' % field_name
choose_all_link = '#id_%s_add_all_link' % field_name
remove_link = '#id_%s_remove_link' % field_name
remove_all_link = '#id_%s_remove_all_link' % field_name
self.assertEqual(self.has_css_class(choose_link, 'active'), choose)
self.assertEqual(self.has_css_class(remove_link, 'active'), remove)
if mode == 'horizontal':
self.assertEqual(self.has_css_class(choose_all_link, 'active'), choose_all)
self.assertEqual(self.has_css_class(remove_all_link, 'active'), remove_all)
def execute_basic_operations(self, mode, field_name):
from_box = '#id_%s_from' % field_name
to_box = '#id_%s_to' % field_name
choose_link = 'id_%s_add_link' % field_name
choose_all_link = 'id_%s_add_all_link' % field_name
remove_link = 'id_%s_remove_link' % field_name
remove_all_link = 'id_%s_remove_all_link' % field_name
# Initial positions ---------------------------------------------------
self.assertSelectOptions(from_box,
[str(self.arthur.id), str(self.bob.id),
str(self.cliff.id), str(self.jason.id),
str(self.jenny.id), str(self.john.id)])
self.assertSelectOptions(to_box,
[str(self.lisa.id), str(self.peter.id)])
self.assertActiveButtons(mode, field_name, False, False, True, True)
# Click 'Choose all' --------------------------------------------------
if mode == 'horizontal':
self.selenium.find_element_by_id(choose_all_link).click()
elif mode == 'vertical':
# There 's no 'Choose all' button in vertical mode, so individually
# select all options and click 'Choose'.
for option in self.selenium.find_elements_by_css_selector(from_box + ' > option'):
option.click()
self.selenium.find_element_by_id(choose_link).click()
self.assertSelectOptions(from_box, [])
self.assertSelectOptions(to_box,
[str(self.lisa.id), str(self.peter.id),
str(self.arthur.id), str(self.bob.id),
str(self.cliff.id), str(self.jason.id),
str(self.jenny.id), str(self.john.id)])
self.assertActiveButtons(mode, field_name, False, False, False, True)
# Click 'Remove all' --------------------------------------------------
if mode == 'horizontal':
self.selenium.find_element_by_id(remove_all_link).click()
elif mode == 'vertical':
# There 's no 'Remove all' button in vertical mode, so individually
# select all options and click 'Remove'.
for option in self.selenium.find_elements_by_css_selector(to_box + ' > option'):
option.click()
self.selenium.find_element_by_id(remove_link).click()
self.assertSelectOptions(from_box,
[str(self.lisa.id), str(self.peter.id),
str(self.arthur.id), str(self.bob.id),
str(self.cliff.id), str(self.jason.id),
str(self.jenny.id), str(self.john.id)])
self.assertSelectOptions(to_box, [])
self.assertActiveButtons(mode, field_name, False, False, True, False)
# Choose some options ------------------------------------------------
from_lisa_select_option = self.get_select_option(from_box, str(self.lisa.id))
# Check the title attribute is there for tool tips: ticket #20821
self.assertEqual(from_lisa_select_option.get_attribute('title'), from_lisa_select_option.get_attribute('text'))
from_lisa_select_option.click()
self.get_select_option(from_box, str(self.jason.id)).click()
self.get_select_option(from_box, str(self.bob.id)).click()
self.get_select_option(from_box, str(self.john.id)).click()
self.assertActiveButtons(mode, field_name, True, False, True, False)
self.selenium.find_element_by_id(choose_link).click()
self.assertActiveButtons(mode, field_name, False, False, True, True)
self.assertSelectOptions(from_box,
[str(self.peter.id), str(self.arthur.id),
str(self.cliff.id), str(self.jenny.id)])
self.assertSelectOptions(to_box,
[str(self.lisa.id), str(self.bob.id),
str(self.jason.id), str(self.john.id)])
# Check the tooltip is still there after moving: ticket #20821
to_lisa_select_option = self.get_select_option(to_box, str(self.lisa.id))
self.assertEqual(to_lisa_select_option.get_attribute('title'), to_lisa_select_option.get_attribute('text'))
# Remove some options -------------------------------------------------
self.get_select_option(to_box, str(self.lisa.id)).click()
self.get_select_option(to_box, str(self.bob.id)).click()
self.assertActiveButtons(mode, field_name, False, True, True, True)
self.selenium.find_element_by_id(remove_link).click()
self.assertActiveButtons(mode, field_name, False, False, True, True)
self.assertSelectOptions(from_box,
[str(self.peter.id), str(self.arthur.id),
str(self.cliff.id), str(self.jenny.id),
str(self.lisa.id), str(self.bob.id)])
self.assertSelectOptions(to_box,
[str(self.jason.id), str(self.john.id)])
# Choose some more options --------------------------------------------
self.get_select_option(from_box, str(self.arthur.id)).click()
self.get_select_option(from_box, str(self.cliff.id)).click()
self.selenium.find_element_by_id(choose_link).click()
self.assertSelectOptions(from_box,
[str(self.peter.id), str(self.jenny.id),
str(self.lisa.id), str(self.bob.id)])
self.assertSelectOptions(to_box,
[str(self.jason.id), str(self.john.id),
str(self.arthur.id), str(self.cliff.id)])
def test_basic(self):
self.school.students = [self.lisa, self.peter]
self.school.alumni = [self.lisa, self.peter]
self.school.save()
self.admin_login(username='super', password='secret', login_url='/')
self.selenium.get(
'%s%s' % (self.live_server_url, '/admin_widgets/school/%s/' % self.school.id))
self.wait_page_loaded()
self.execute_basic_operations('vertical', 'students')
self.execute_basic_operations('horizontal', 'alumni')
# Save and check that everything is properly stored in the database ---
self.selenium.find_element_by_xpath('//input[@value="Save"]').click()
self.wait_page_loaded()
self.school = models.School.objects.get(id=self.school.id) # Reload from database
self.assertEqual(list(self.school.students.all()),
[self.arthur, self.cliff, self.jason, self.john])
self.assertEqual(list(self.school.alumni.all()),
[self.arthur, self.cliff, self.jason, self.john])
def test_filter(self):
"""
Ensure that typing in the search box filters out options displayed in
the 'from' box.
"""
from selenium.webdriver.common.keys import Keys
self.school.students = [self.lisa, self.peter]
self.school.alumni = [self.lisa, self.peter]
self.school.save()
self.admin_login(username='super', password='secret', login_url='/')
self.selenium.get(
'%s%s' % (self.live_server_url, '/admin_widgets/school/%s/' % self.school.id))
for field_name in ['students', 'alumni']:
from_box = '#id_%s_from' % field_name
to_box = '#id_%s_to' % field_name
choose_link = '#id_%s_add_link' % field_name
remove_link = '#id_%s_remove_link' % field_name
input = self.selenium.find_element_by_css_selector('#id_%s_input' % field_name)
# Initial values
self.assertSelectOptions(from_box,
[str(self.arthur.id), str(self.bob.id),
str(self.cliff.id), str(self.jason.id),
str(self.jenny.id), str(self.john.id)])
# Typing in some characters filters out non-matching options
input.send_keys('a')
self.assertSelectOptions(from_box, [str(self.arthur.id), str(self.jason.id)])
input.send_keys('R')
self.assertSelectOptions(from_box, [str(self.arthur.id)])
# Clearing the text box makes the other options reappear
input.send_keys([Keys.BACK_SPACE])
self.assertSelectOptions(from_box, [str(self.arthur.id), str(self.jason.id)])
input.send_keys([Keys.BACK_SPACE])
self.assertSelectOptions(from_box,
[str(self.arthur.id), str(self.bob.id),
str(self.cliff.id), str(self.jason.id),
str(self.jenny.id), str(self.john.id)])
# -----------------------------------------------------------------
# Check that choosing a filtered option sends it properly to the
# 'to' box.
input.send_keys('a')
self.assertSelectOptions(from_box, [str(self.arthur.id), str(self.jason.id)])
self.get_select_option(from_box, str(self.jason.id)).click()
self.selenium.find_element_by_css_selector(choose_link).click()
self.assertSelectOptions(from_box, [str(self.arthur.id)])
self.assertSelectOptions(to_box,
[str(self.lisa.id), str(self.peter.id),
str(self.jason.id)])
self.get_select_option(to_box, str(self.lisa.id)).click()
self.selenium.find_element_by_css_selector(remove_link).click()
self.assertSelectOptions(from_box,
[str(self.arthur.id), str(self.lisa.id)])
self.assertSelectOptions(to_box,
[str(self.peter.id), str(self.jason.id)])
input.send_keys([Keys.BACK_SPACE]) # Clear text box
self.assertSelectOptions(from_box,
[str(self.arthur.id), str(self.bob.id),
str(self.cliff.id), str(self.jenny.id),
str(self.john.id), str(self.lisa.id)])
self.assertSelectOptions(to_box,
[str(self.peter.id), str(self.jason.id)])
# -----------------------------------------------------------------
# Check that pressing enter on a filtered option sends it properly
# to the 'to' box.
self.get_select_option(to_box, str(self.jason.id)).click()
self.selenium.find_element_by_css_selector(remove_link).click()
input.send_keys('ja')
self.assertSelectOptions(from_box, [str(self.jason.id)])
input.send_keys([Keys.ENTER])
self.assertSelectOptions(to_box, [str(self.peter.id), str(self.jason.id)])
input.send_keys([Keys.BACK_SPACE, Keys.BACK_SPACE])
# Save and check that everything is properly stored in the database ---
self.selenium.find_element_by_xpath('//input[@value="Save"]').click()
self.wait_page_loaded()
self.school = models.School.objects.get(id=self.school.id) # Reload from database
self.assertEqual(list(self.school.students.all()),
[self.jason, self.peter])
self.assertEqual(list(self.school.alumni.all()),
[self.jason, self.peter])
class HorizontalVerticalFilterSeleniumChromeTests(HorizontalVerticalFilterSeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.chrome.webdriver.WebDriver'
class HorizontalVerticalFilterSeleniumIETests(HorizontalVerticalFilterSeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.ie.webdriver.WebDriver'
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF='admin_widgets.urls')
class AdminRawIdWidgetSeleniumFirefoxTests(AdminSeleniumWebDriverTestCase):
available_apps = ['admin_widgets'] + AdminSeleniumWebDriverTestCase.available_apps
fixtures = ['admin-widgets-users.xml']
webdriver_class = 'selenium.webdriver.firefox.webdriver.WebDriver'
def setUp(self):
models.Band.objects.create(id=42, name='Bogey Blues')
models.Band.objects.create(id=98, name='Green Potatoes')
super(AdminRawIdWidgetSeleniumFirefoxTests, self).setUp()
def test_ForeignKey(self):
self.admin_login(username='super', password='secret', login_url='/')
self.selenium.get(
'%s%s' % (self.live_server_url, '/admin_widgets/event/add/'))
main_window = self.selenium.current_window_handle
# No value has been selected yet
self.assertEqual(
self.selenium.find_element_by_id('id_main_band').get_attribute('value'),
'')
# Open the popup window and click on a band
self.selenium.find_element_by_id('lookup_id_main_band').click()
self.selenium.switch_to.window('id_main_band')
self.wait_page_loaded()
link = self.selenium.find_element_by_link_text('Bogey Blues')
self.assertIn('/band/42/', link.get_attribute('href'))
link.click()
# The field now contains the selected band's id
self.selenium.switch_to.window(main_window)
self.wait_for_value('#id_main_band', '42')
# Reopen the popup window and click on another band
self.selenium.find_element_by_id('lookup_id_main_band').click()
self.selenium.switch_to.window('id_main_band')
self.wait_page_loaded()
link = self.selenium.find_element_by_link_text('Green Potatoes')
self.assertIn('/band/98/', link.get_attribute('href'))
link.click()
# The field now contains the other selected band's id
self.selenium.switch_to.window(main_window)
self.wait_for_value('#id_main_band', '98')
def test_many_to_many(self):
self.admin_login(username='super', password='secret', login_url='/')
self.selenium.get(
'%s%s' % (self.live_server_url, '/admin_widgets/event/add/'))
main_window = self.selenium.current_window_handle
# No value has been selected yet
self.assertEqual(
self.selenium.find_element_by_id('id_supporting_bands').get_attribute('value'),
'')
# Open the popup window and click on a band
self.selenium.find_element_by_id('lookup_id_supporting_bands').click()
self.selenium.switch_to.window('id_supporting_bands')
self.wait_page_loaded()
link = self.selenium.find_element_by_link_text('Bogey Blues')
self.assertIn('/band/42/', link.get_attribute('href'))
link.click()
# The field now contains the selected band's id
self.selenium.switch_to.window(main_window)
self.wait_for_value('#id_supporting_bands', '42')
# Reopen the popup window and click on another band
self.selenium.find_element_by_id('lookup_id_supporting_bands').click()
self.selenium.switch_to.window('id_supporting_bands')
self.wait_page_loaded()
link = self.selenium.find_element_by_link_text('Green Potatoes')
self.assertIn('/band/98/', link.get_attribute('href'))
link.click()
# The field now contains the two selected bands' ids
self.selenium.switch_to.window(main_window)
self.wait_for_value('#id_supporting_bands', '42,98')
class AdminRawIdWidgetSeleniumChromeTests(AdminRawIdWidgetSeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.chrome.webdriver.WebDriver'
class AdminRawIdWidgetSeleniumIETests(AdminRawIdWidgetSeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.ie.webdriver.WebDriver'
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
ROOT_URLCONF='admin_widgets.urls')
class RelatedFieldWidgetSeleniumFirefoxTests(AdminSeleniumWebDriverTestCase):
available_apps = ['admin_widgets'] + AdminSeleniumWebDriverTestCase.available_apps
fixtures = ['admin-widgets-users.xml']
webdriver_class = 'selenium.webdriver.firefox.webdriver.WebDriver'
def test_ForeignKey_using_to_field(self):
self.admin_login(username='super', password='secret', login_url='/')
self.selenium.get('%s%s' % (
self.live_server_url,
'/admin_widgets/profile/add/'))
main_window = self.selenium.current_window_handle
# Click the Add User button to add new
self.selenium.find_element_by_id('add_id_user').click()
self.selenium.switch_to.window('id_user')
self.wait_for('#id_password')
password_field = self.selenium.find_element_by_id('id_password')
password_field.send_keys('password')
username_field = self.selenium.find_element_by_id('id_username')
username_value = 'newuser'
username_field.send_keys(username_value)
save_button_css_selector = '.submit-row > input[type=submit]'
self.selenium.find_element_by_css_selector(save_button_css_selector).click()
self.selenium.switch_to.window(main_window)
# The field now contains the new user
self.wait_for('#id_user option[value="newuser"]')
# Click the Change User button to change it
self.selenium.find_element_by_id('change_id_user').click()
self.selenium.switch_to_window('id_user')
self.wait_page_loaded()
username_field = self.selenium.find_element_by_id('id_username')
username_value = 'changednewuser'
username_field.clear()
username_field.send_keys(username_value)
save_button_css_selector = '.submit-row > input[type=submit]'
self.selenium.find_element_by_css_selector(save_button_css_selector).click()
self.selenium.switch_to_window(main_window)
# Wait up to 2 seconds for the new option to show up after clicking save in the popup.
self.selenium.implicitly_wait(2)
self.selenium.find_element_by_css_selector('#id_user option[value=changednewuser]')
self.selenium.implicitly_wait(0)
# Go ahead and submit the form to make sure it works
self.selenium.find_element_by_css_selector(save_button_css_selector).click()
self.wait_for_text('li.success', 'The profile "changednewuser" was added successfully.')
profiles = models.Profile.objects.all()
self.assertEqual(len(profiles), 1)
self.assertEqual(profiles[0].user.username, username_value)
class RelatedFieldWidgetSeleniumChromeTests(RelatedFieldWidgetSeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.chrome.webdriver.WebDriver'
class RelatedFieldWidgetSeleniumIETests(RelatedFieldWidgetSeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.ie.webdriver.WebDriver'
|
{
"content_hash": "1ffa6acb61f67853170e25dcdbb1a2ee",
"timestamp": "",
"source": "github",
"line_count": 1193,
"max_line_length": 406,
"avg_line_length": 45.923721709974856,
"alnum_prop": 0.6248562615218939,
"repo_name": "Sonicbids/django",
"id": "c5795c87fb4124a2254cdf74c7d7f93bed074ae8",
"size": "54836",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tests/admin_widgets/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "53544"
},
{
"name": "JavaScript",
"bytes": "106009"
},
{
"name": "Makefile",
"bytes": "5765"
},
{
"name": "Python",
"bytes": "10479615"
},
{
"name": "Shell",
"bytes": "10452"
}
],
"symlink_target": ""
}
|
from __future__ import division, unicode_literals
import re
import numpy as np
from monty.io import zopen
from pymatgen.electronic_structure.core import Spin, Orbital
"""
Module for reading Lobster output files. For more information
on LOBSTER see www.cohp.de.
"""
__author__ = "Marco Esters"
__copyright__ = "Copyright 2017, The Materials Project"
__version__ = "0.2"
__maintainer__ = "Marco Esters"
__email__ = "esters@uoregon.edu"
__date__ = "Dec 13, 2017"
class Cohpcar(object):
"""
Class to read COHPCAR/COOPCAR files generated by LOBSTER.
Args:
are_coops: Determines if the file is a list of COHPs or COOPs.
Default is False for COHPs.
filename: Name of the COHPCAR file. If it is None, the default
file name will be chosen, depending on the value of are_coops.
.. attribute: cohp_data
Dict that contains the COHP data of the form:
{bond: {"COHP": {Spin.up: cohps, Spin.down:cohps},
"ICOHP": {Spin.up: icohps, Spin.down: icohps},
"length": bond length}
Also contains an entry for the average, which does not have
a "length" key.
.. attribute: efermi
The Fermi energy in eV.
.. attribute: energies
Sequence of energies in eV. Note that LOBSTER shifts the energies
so that the Fermi energy is at zero.
.. attribute: is_spin_polarized
Boolean to indicate if the calculation is spin polarized.
"""
def __init__(self, are_coops=False, filename=None):
self.are_coops = are_coops
if filename is None:
filename = "COOPCAR.lobster" if are_coops \
else "COHPCAR.lobster"
with zopen(filename, "rt") as f:
contents = f.read().split("\n")
# The parameters line is the second line in a COHPCAR file. It
# contains all parameters that are needed to map the file.
parameters = contents[1].split()
# Subtract 1 to skip the average
num_bonds = int(parameters[0]) - 1
self.efermi = float(parameters[-1])
if int(parameters[1]) == 2:
spins = [Spin.up, Spin.down]
self.is_spin_polarized = True
else:
spins = [Spin.up]
self.is_spin_polarized = False
# The COHP data start in row num_bonds + 3
data = np.array([np.array(row.split(), dtype=float)
for row in contents[num_bonds+3:]]).transpose()
self.energies = data[0]
cohp_data = {"average": {"COHP": {spin: data[1+2*s*(num_bonds+1)]
for s, spin in enumerate(spins)},
"ICOHP": {spin: data[2+2*s*(num_bonds+1)]
for s, spin in enumerate(spins)}}}
orb_cohp = {}
for bond in range(num_bonds):
bond_data = self._get_bond_data(contents[3+bond])
label = bond_data["label"]
orbs = bond_data["orbitals"]
cohp = {spin: data[2*(bond+s*(num_bonds+1))+3]
for s, spin in enumerate(spins)}
icohp = {spin: data[2*(bond+s*(num_bonds+1))+4]
for s, spin in enumerate(spins)}
if orbs is None:
cohp_data[label] = {"COHP": cohp, "ICOHP": icohp,
"length": bond_data["length"],
"sites": bond_data["sites"]}
elif label in orb_cohp:
orb_cohp[label].update({bond_data["orb_label"]:
{"COHP": cohp, "ICOHP": icohp,
"orbitals": orbs}})
else:
if label not in cohp_data:
cohp_data[label] = {"COHP": None, "ICOHP": None,
"length": bond_data["length"],
"sites": bond_data["sites"]}
orb_cohp[label] = {bond_data["orb_label"]: {"COHP": cohp,
"ICOHP": icohp,
"orbitals": orbs}}
self.orb_res_cohp = orb_cohp if orb_cohp else None
self.cohp_data = cohp_data
@staticmethod
def _get_bond_data(line):
"""
Subroutine to extract bond label, site indices, and length from
a LOBSTER header line. The site indices are zero-based, so they
can be easily used with a Structure object.
Example header line: No.4:Fe1->Fe9(2.4524893531900283)
Example header line for orbtial-resolved COHP:
No.1:Fe1[3p_x]->Fe2[3d_x^2-y^2](2.456180552772262)
Args:
line: line in the COHPCAR header describing the bond.
Returns:
Dict with the bond label, the bond length, a tuple of the site
indices, a tuple containing the orbitals (if orbital-resolved),
and a label for the orbitals (if orbital-resolved).
"""
orb_labs = ["s", "p_y", "p_z", "p_x", "d_xy", "d_yz", "d_z^2",
"d_xz", "d_x^2-y^2", "4f_y(3x^2-y^2)", "4f_xyz",
"4f_yz^2", "4f_z^3", "4f_xz^2", "4f_z(x^2-y^2)"]
line = line.split("(")
length = float(line[-1][:-1])
# Replacing "->" with ":" makes splitting easier
sites = line[0].replace("->", ":").split(":")[1:3]
site_indices = tuple(int(re.split(r"\D+", site)[1]) - 1
for site in sites)
species = tuple(re.split(r"\d+", site)[0] for site in sites)
if "[" in sites[0]:
orbs = [re.findall(r"\[(.*)\]", site)[0] for site in sites]
orbitals = [tuple((int(orb[0]), Orbital(orb_labs.index(orb[1:]))))
for orb in orbs]
orb_label = "%d%s-%d%s" % (orbitals[0][0], orbitals[0][1].name,
orbitals[1][0], orbitals[1][1].name)
else:
orbitals = None
orb_label = None
label = "%s%d-%s%d" % (species[0], site_indices[0] + 1,
species[1], site_indices[1] + 1)
bond_data = {"label": label, "length": length, "sites": site_indices,
"orbitals": orbitals, "orb_label": orb_label}
return bond_data
class Icohplist(object):
"""
Class to read ICOHPLIST/ICOOPLIST files generated by LOBSTER.
Args:
are_coops: Determines if the file is a list of ICOHPs or ICOOPs.
Defaults to False for ICOHPs.
filename: Name of the ICOHPLIST file. If it is None, the default
file name will be chosen, depending on the value of are_coops.
.. attribute: are_coops
Boolean to indicate if the populations are COOPs or COHPs.
.. attribute: is_spin_polarized
Boolean to indicate if the calculation is spin polarized.
.. attribute: icohplist
Dict containing the listfile data of the form:
{bond: "length": bond length,
"number_of_bonds": number of bonds
"icohp": {Spin.up: ICOHP(Ef) spin up, Spin.down: ...}}
"""
def __init__(self, are_coops=False, filename=None):
self.are_coops = are_coops
if filename is None:
filename = "ICOOPLIST.lobster" if are_coops \
else "ICOHPLIST.lobster"
# LOBSTER list files have an extra trailing blank line
# and we don't need the header.
with zopen(filename) as f:
data = f.read().split("\n")[1:-1]
if len(data) == 0:
raise IOError("ICOHPLIST file contains no data.")
# If the calculation is spin polarized, the line in the middle
# of the file will be another header line.
if "distance" in data[len(data)//2]:
num_bonds = len(data)//2
if num_bonds == 0:
raise IOError("ICOHPLIST file contains no data.")
self.is_spin_polarized = True
else:
num_bonds = len(data)
self.is_spin_polarized = False
icohplist = {}
for bond in range(num_bonds):
line = data[bond].split()
label = "%s-%s" % (line[1], line[2])
length = float(line[3])
icohp = float(line[4])
num = int(line[5])
icohplist[label] = {"length": length, "number_of_bonds": num,
"icohp": {Spin.up: icohp}}
if self.is_spin_polarized:
icohp = float(data[bond+num_bonds+1].split()[4])
icohplist[label]["icohp"][Spin.down] = icohp
self.icohplist = icohplist
|
{
"content_hash": "4e896eee104fcba04c5fa70acf5a1dea",
"timestamp": "",
"source": "github",
"line_count": 229,
"max_line_length": 78,
"avg_line_length": 38.275109170305676,
"alnum_prop": 0.5220764403879065,
"repo_name": "czhengsci/pymatgen",
"id": "1b9a5bd80c17b67e6dd12962b2d1530308b580cd",
"size": "8874",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "pymatgen/io/lobster.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "5938"
},
{
"name": "CSS",
"bytes": "7550"
},
{
"name": "Common Lisp",
"bytes": "3029065"
},
{
"name": "HTML",
"bytes": "827"
},
{
"name": "Makefile",
"bytes": "5573"
},
{
"name": "Perl",
"bytes": "229104"
},
{
"name": "Propeller Spin",
"bytes": "4026362"
},
{
"name": "Python",
"bytes": "6706935"
},
{
"name": "Roff",
"bytes": "1135003"
}
],
"symlink_target": ""
}
|
"""
<DefineSource>
@Date : Fri Nov 14 13:20:38 2014 \n
@Author : Erwan Ledoux \n\n
</DefineSource>
Connecter instances catch grasped variables and makes an attention also on it.
"""
#<DefineAugmentation>
import ShareYourSystem as SYS
BaseModuleStr="ShareYourSystem.Standards.Teamers.Parenter"
DecorationModuleStr="ShareYourSystem.Standards.Classors.Classer"
SYS.setSubModule(globals())
#</DefineAugmentation>
#<ImportSpecificModules>
import copy
#</ImportSpecificModules>
#<DefineClass>
@DecorationClass()
class ConnecterClass(BaseClass):
#Definition
RepresentingKeyStrsList=[
'ConnectingGraspClueVariablesList',
'ConnectingCatchCollectionStr',
'ConnectingAttentionCollectionStr',
'ConnectedCatchDerivePointersList',
'ConnectedCatchKeyStrsList'
]
def default_init(self,
_ConnectingGraspClueVariablesList=None,
_ConnectingCatchCollectionStr="To",
_ConnectingAttentionCollectionStr="From",
_ConnectedCatchDerivePointersList=None,
_ConnectedCatchKeyStrsList=None,
**_KwargVariablesDict
):
#Call the parent __init__ method
BaseClass.__init__(self,**_KwargVariablesDict)
def do_connect(self):
#debug
'''
self.debug(('self.',self,['ConnectingGraspClueVariablesList']))
'''
#catch
self.ConnectedCatchDerivePointersList=map(
lambda __ConnectingGraspVariable:
copy.copy(
self.grasp(
__ConnectingGraspVariable
).catch(
self.ConnectingCatchCollectionStr
).attention(
self.ConnectingAttentionCollectionStr,
__ConnectingGraspVariable['AttentionUpdateVariable']
if 'AttentionUpdateVariable' in __ConnectingGraspVariable
else {}
).CatchedDerivePointerVariable
),
self.ConnectingGraspClueVariablesList
)
#map
self.ConnectedCatchKeyStrsList=map(
lambda __ConnectedDerivePointer:
__ConnectedDerivePointer.CatchKeyStr,
self.ConnectedCatchDerivePointersList
)
#debug
'''
self.debug(('self.',self,['ConnectedCatchKeyStrsList']))
'''
#</DefineClass>
|
{
"content_hash": "7838ab199e54b7855ecdf816559aeeec",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 78,
"avg_line_length": 23.941860465116278,
"alnum_prop": 0.725594949004371,
"repo_name": "Ledoux/ShareYourSystem",
"id": "2e80858ef355693892cd0f3099285dff20004594",
"size": "2083",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Pythonlogy/draft/Teamers_Connecter/__init__.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "86"
},
{
"name": "C++",
"bytes": "4244220"
},
{
"name": "CSS",
"bytes": "142769"
},
{
"name": "CoffeeScript",
"bytes": "37331"
},
{
"name": "HTML",
"bytes": "36211676"
},
{
"name": "JavaScript",
"bytes": "2147968"
},
{
"name": "Jupyter Notebook",
"bytes": "7930602"
},
{
"name": "Makefile",
"bytes": "6362"
},
{
"name": "PHP",
"bytes": "11096341"
},
{
"name": "Python",
"bytes": "5700092"
},
{
"name": "Ruby",
"bytes": "60"
},
{
"name": "Scala",
"bytes": "2412"
},
{
"name": "Shell",
"bytes": "2525"
},
{
"name": "Swift",
"bytes": "154"
},
{
"name": "TeX",
"bytes": "2556"
},
{
"name": "XSLT",
"bytes": "20993"
}
],
"symlink_target": ""
}
|
import shutil
import json
from zipfile import ZipFile
import pandas
from django.contrib.gis.geos import Polygon
import os
import sh
from osgeo import osr, ogr
from . import Driver
from pandas import DataFrame
from shapely import wkb
from django.template.defaultfilters import slugify
import re
def ogrfield(elt):
return re.sub('-', '_', slugify(elt).encode('ascii'))[0:10]
def identity(x):
return '"' + x + '"' if isinstance(x, basestring) else str(x)
dtypes = {
'int64': ogr.OFTInteger,
'float64': ogr.OFTReal,
'object': ogr.OFTString,
'datetime64[ns]': ogr.OFTDateTime
}
geomTypes = {
'GeometryCollection': ogr.wkbGeometryCollection,
'LinearRing': ogr.wkbLinearRing,
'LineString': ogr.wkbLineString,
'MultiLineString': ogr.wkbMultiLineString,
'MultiPoint': ogr.wkbMultiPoint,
'MultiPolygon': ogr.wkbMultiPolygon,
'Point': ogr.wkbPoint,
'Polygon': ogr.wkbPolygon
}
def transform(geom, crx):
if crx:
geom.Transform(crx)
return geom
class ShapefileDriver(Driver):
@classmethod
def supports_multiple_layers(cls):
return False
@classmethod
def supports_configuration(cls):
return False
def ready_data_resource(self, **kwargs):
"""Other keyword args get passed in as a matter of course, like BBOX, time, and elevation, but this basic driver
ignores them"""
slug, srs = super(ShapefileDriver, self).ready_data_resource(**kwargs)
return slug, srs, {
'type': 'shape',
"file": self.cached_basename + '.shp'
}
def clear_cached_files(self):
sh.rm('-f', sh.glob(os.path.join(self.cache_path, '*.shp')))
sh.rm('-f', sh.glob(os.path.join(self.cache_path, '*.shx')))
sh.rm('-f', sh.glob(os.path.join(self.cache_path, '*.dbf')))
sh.rm('-f', sh.glob(os.path.join(self.cache_path, '*.prj')))
def compute_spatial_metadata(self, **kwargs):
"""Other keyword args get passed in as a matter of course, like BBOX, time, and elevation, but this basic driver
ignores them"""
super(ShapefileDriver, self).compute_spatial_metadata(**kwargs)
self.clear_cached_files()
archive = ZipFile(self.cached_basename + self.src_ext)
projection_found = False
for name in archive.namelist():
xtn = name.split('.')[-1].lower()
if xtn in {'shp', 'shx', 'dbf', 'prj'} and "__MACOSX" not in name:
projection_found = projection_found or xtn == 'prj'
with open(self.cached_basename + '.' + xtn, 'wb') as fout:
with archive.open(name) as fin:
chunk = fin.read(65536)
while chunk:
fout.write(chunk)
chunk = fin.read(65536)
if not projection_found:
with open(self.cached_basename + '.prj', 'w') as f:
srs = osr.SpatialReference()
srs.ImportFromEPSG(4326)
f.write(srs.ExportToWkt())
ds = ogr.Open(self.cached_basename + '.shp')
lyr = ds.GetLayerByIndex(0) if 'sublayer' not in kwargs else ds.GetLayerByName(kwargs['sublayer'])
xmin, xmax, ymin, ymax = lyr.GetExtent()
crs = lyr.GetSpatialRef()
self.resource.spatial_metadata.native_srs = crs.ExportToProj4()
e4326 = osr.SpatialReference()
e4326.ImportFromEPSG(4326)
crx = osr.CoordinateTransformation(crs, e4326)
x04326, y04326, _ = crx.TransformPoint(xmin, ymin)
x14326, y14326, _ = crx.TransformPoint(xmax, ymax)
self.resource.spatial_metadata.bounding_box = Polygon.from_bbox((x04326, y04326, x14326, y14326))
self.resource.spatial_metadata.native_bounding_box = Polygon.from_bbox((xmin, ymin, xmax, ymax))
self.resource.spatial_metadata.three_d = False
self.resource.spatial_metadata.save()
self.resource.save()
def get_data_fields(self, **kwargs):
_, _, result = self.ready_data_resource(**kwargs)
ds = ogr.Open(result['file'])
lyr = ds.GetLayerByIndex(0) if 'layer' not in kwargs else ds.GetLayerByName(kwargs['sublayer'])
return [(field.name, field.GetTypeName(), field.width) for field in lyr.schema]
def get_data_for_point(self, wherex, wherey, srs, **kwargs):
result, x1, y1, epsilon = super(ShapefileDriver, self).get_data_for_point(wherex, wherey, srs, **kwargs)
ds = ogr.Open(result['file'])
lyr = ds.GetLayerByIndex(0) if 'sublayer' not in kwargs else ds.GetLayerByName(kwargs['sublayer'])
if epsilon==0:
lyr.SetSpatialFilter(ogr.CreateGeometryFromWkt("POINT({x1} {y1})".format(**locals())))
else:
from django.contrib.gis import geos
wkt = geos.Point(x1,y1).buffer(epsilon).wkt
print wkt
lyr.SetSpatialFilter(ogr.CreateGeometryFromWkt(wkt))
return [f.items() for f in lyr]
def attrquery(self, key, value):
key, op = key.split('__')
op = {
'gt' : ">",
'gte' : ">=",
'lt' : "<",
'lte' : '<=',
'startswith' : 'LIKE',
'endswith' : 'LIKE',
'istartswith' : 'ILIKE',
'iendswith' : 'ILIKE',
'icontains' : "ILIKE",
'contains' : "LIKE",
'in' : 'IN',
'ne' : "<>"
}[op]
value = {
'gt': identity,
'gte': identity,
'lt': identity,
'lte': identity,
'startswith': lambda x : '%' + x,
'endswith': lambda x : x + '%',
'istartswith': lambda x : '%' + x,
'iendswith': lambda x : x + '%',
'icontains': lambda x : '%' + x + '%',
'contains': lambda x: '%' + x + '%',
'in': lambda x: x if isinstance(x, basestring) else '(' + ','.join(identity(a) for a in x) + ')',
'ne': identity
}[op](value)
return ' '.join([key, op, value])
def as_dataframe(self, **kwargs):
"""
Creates a dataframe object for a shapefile's main layer using layer_as_dataframe. This object is cached on disk for
layer use, but the cached copy will only be picked up if the shapefile's mtime is older than the dataframe's mtime.
:param shp: The shapefile
:return:
"""
dfx_path = self.get_filename('dfx')
shp_path = self.get_filename('shp')
if len(kwargs) != 0:
ds = ogr.Open(shp_path)
lyr = ds.GetLayerByIndex(0)
crx=xrc=None
if 'bbox' in kwargs:
minx,miny,maxx,maxy = kwargs['bbox']
if 'srs' in kwargs:
if isinstance(kwargs['srs'], basestring):
s_srs = osr.SpatialReference()
if kwargs['srs'].lower().startswith('epsg:'):
s_srs.ImportFromEPSG(int(kwargs['srs'].split(':')[1]))
else:
s_srs.ImportFromProj4(kwargs['srs'])
else:
s_srs = kwargs['srs']
t_srs = self.resource.srs
if s_srs.ExportToProj4() != t_srs.ExportToProj4():
crx = osr.CoordinateTransformation(s_srs, t_srs)
minx, miny, _ = crx.TransformPoint(minx, miny)
maxx, maxy, _ = crx.TransformPoint(maxx, maxy)
xrc = osr.CoordinateTransformation(t_srs, s_srs)
lyr.SetSpatialFilterRect(minx, miny, maxx, maxy)
elif 'boundary' in kwargs:
boundary = ogr.Geometry(geomTypes[kwargs['boundary_type']], kwargs["boundary"])
lyr.SetSpatialFilter(boundary)
if 'query' in kwargs:
if isinstance(kwargs['query'], basestring):
query = json.loads(kwargs['query'])
else:
query = kwargs['query']
for key, value in query.items():
attrq= self.attrquery(key, value) if '__' in key else key, '='
lyr.SetAttributeFilter(attrq)
start = kwargs['start'] if 'start' in kwargs else 0
count = kwargs['count'] if 'count' in kwargs else len(lyr) - start
records = []
for i in range(start):
lyr.next()
for i in range(count):
f = lyr.next()
if f.geometry():
records.append(dict(fid=i, geometry=wkb.loads(transform(f.geometry(), xrc).ExportToWkb()), **f.items()))
df = DataFrame.from_records(
data=records,
index='fid'
)
if 'sort_by' in kwargs:
df = df.sort_index(by=kwargs['sort_by'])
return df
elif hasattr(self, '_df'):
return self._df
elif os.path.exists(dfx_path) and os.stat(dfx_path).st_mtime >= os.stat(shp_path).st_mtime:
if self.resource.big:
self._df = pandas.read_hdf(dfx_path, 'df')
else:
self._df = pandas.read_pickle(dfx_path)
return self._df
else:
ds = ogr.Open(shp_path)
lyr = ds.GetLayerByIndex(0)
df= DataFrame.from_records(
data=[dict(fid=f.GetFID(), geometry=wkb.loads(f.geometry().ExportToWkb()), **f.items()) for f in lyr if f.geometry()],
index='fid'
)
if self.resource.big:
df.to_hdf(dfx_path, 'df')
else:
df.to_pickle(dfx_path)
self._df = df
return self._df
@classmethod
def from_dataframe(cls, df, shp, srs):
"""Write an dataframe object out as a shapefile"""
drv = ogr.GetDriverByName('ESRI Shapefile')
if os.path.exists(shp):
shutil.rmtree(shp)
os.mkdir(shp)
ds = drv.CreateDataSource(shp)
keys = df.keys()
fieldDefns = [ogr.FieldDefn(ogrfield(name), dtypes[df[name].dtype.name]) for name in keys if name != 'geometry']
geomType = geomTypes[(f for f in df['geometry']).next().type]
l = ds.CreateLayer(
name=os.path.split(shp)[-1],
srs=srs,
geom_type=geomType
)
for f in fieldDefns:
l.CreateField(f)
for i, record in df.iterrows():
feature = ogr.Feature(l.GetLayerDefn())
for field, value in ((k, v) for k, v in record.to_dict().items() if k != 'geometry'):
if isinstance(value, basestring):
value=value.encode('ascii')
feature.SetField(ogrfield(field), value)
feature.SetGeometry(ogr.CreateGeometryFromWkb(record['geometry'].wkb))
l.CreateFeature(feature)
del ds
driver = ShapefileDriver
|
{
"content_hash": "c2ec078371f728d3c13fa99fa3412578",
"timestamp": "",
"source": "github",
"line_count": 307,
"max_line_length": 134,
"avg_line_length": 35.9185667752443,
"alnum_prop": 0.5420331912578217,
"repo_name": "JeffHeard/terrapyn",
"id": "e8bffe95711703e4eaccb008adb7bfad5cd89c88",
"size": "11063",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "geocms/drivers/shapefile.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2265"
},
{
"name": "CoffeeScript",
"bytes": "37900"
},
{
"name": "HTML",
"bytes": "38695"
},
{
"name": "JavaScript",
"bytes": "3675861"
},
{
"name": "Python",
"bytes": "572443"
}
],
"symlink_target": ""
}
|
"""Calculate the l-core of a multiplex network.
"""
import networkx as nx
def l_core(mg, l):
"""Return the l-core of mg.
"""
import copy
core = copy.deepcopy(mg)
for u,v in mg.edges():
if len(mg[u][v][mg.cid]) < l:
core.remove_edge(u,v)
core.remove_nodes_from(nx.isolates(core))
core.remove_empty_layers()
return core
|
{
"content_hash": "8ef8d2a8f69679e4e5ab745376529736",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 47,
"avg_line_length": 18.8,
"alnum_prop": 0.5851063829787234,
"repo_name": "wuhaochen/multinet",
"id": "d9d547bc610ec8aa689f0b31511f671a8538082a",
"size": "376",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "multinet/l_core.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "53130"
},
{
"name": "Shell",
"bytes": "451"
}
],
"symlink_target": ""
}
|
import base64
from selenium.webdriver.remote.command import Command
from selenium.webdriver.remote.webdriver import WebDriver as RemoteWebDriver
from selenium.common.exceptions import WebDriverException
from .service import Service
from .options import Options
class WebDriver(RemoteWebDriver):
"""
Controls the ChromeDriver and allows you to drive the browser.
You will need to download the ChromeDriver executable from
http://chromedriver.storage.googleapis.com/index.html
"""
def __init__(self, executable_path="chromedriver", port=0,
chrome_options=None, service_args=None,
desired_capabilities=None, service_log_path=None):
"""
Creates a new instance of the chrome driver.
Starts the service and then creates new instance of chrome driver.
:Args:
- executable_path - path to the executable. If the default is used it assumes the executable is in the $PATH
- port - port you would like the service to run, if left as 0, a free port will be found.
- desired_capabilities: Dictionary object with non-browser specific
capabilities only, such as "proxy" or "loggingPref".
- chrome_options: this takes an instance of ChromeOptions
"""
if chrome_options is None:
# desired_capabilities stays as passed in
if desired_capabilities is None:
desired_capabilities = Options().to_capabilities()
else:
if desired_capabilities is None:
desired_capabilities = options.to_capabilities()
else:
desired_capabilities.update(options.to_capabilities())
self.service = Service(executable_path, port=port,
service_args=service_args, log_path=service_log_path)
self.service.start()
try:
RemoteWebDriver.__init__(self,
command_executor=self.service.service_url,
desired_capabilities=desired_capabilities,
keep_alive=True)
except:
self.quit()
raise
self._is_remote = False
def quit(self):
"""
Closes the browser and shuts down the ChromeDriver executable
that is started when starting the ChromeDriver
"""
try:
RemoteWebDriver.quit(self)
except:
# We don't care about the message because something probably has gone wrong
pass
finally:
self.service.stop()
|
{
"content_hash": "9783969dd2dc6bb8386ed7596db5805b",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 117,
"avg_line_length": 38.53030303030303,
"alnum_prop": 0.6366496264254817,
"repo_name": "isaksky/selenium",
"id": "556f78ebdb694981094027e26835a70d9e97dc96",
"size": "3161",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "py/selenium/webdriver/chrome/webdriver.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ASP",
"bytes": "22395"
},
{
"name": "AppleScript",
"bytes": "2614"
},
{
"name": "C",
"bytes": "396578"
},
{
"name": "C#",
"bytes": "2380824"
},
{
"name": "C++",
"bytes": "1575592"
},
{
"name": "CSS",
"bytes": "212143"
},
{
"name": "Java",
"bytes": "4913359"
},
{
"name": "JavaScript",
"bytes": "17819489"
},
{
"name": "Objective-C",
"bytes": "13249"
},
{
"name": "PHP",
"bytes": "13253"
},
{
"name": "Python",
"bytes": "707224"
},
{
"name": "Ragel in Ruby Host",
"bytes": "3086"
},
{
"name": "Ruby",
"bytes": "803696"
},
{
"name": "Shell",
"bytes": "1138"
},
{
"name": "XSLT",
"bytes": "1047"
}
],
"symlink_target": ""
}
|
from pyscf import lib
from pyscf.tdscf import rhf
class TDA(rhf.TDA):
def gen_vind(self, mf):
vind, hdiag = rhf.TDA.gen_vind(self, mf)
def vindp(x):
with lib.temporary_env(mf, exxdiv=None):
return vind(x)
return vindp, hdiag
def nuc_grad_method(self):
raise NotImplementedError
CIS = TDA
class TDHF(rhf.TDHF):
def gen_vind(self, mf):
vind, hdiag = rhf.TDHF.gen_vind(self, mf)
def vindp(x):
with lib.temporary_env(mf, exxdiv=None):
return vind(x)
return vindp, hdiag
def nuc_grad_method(self):
raise NotImplementedError
RPA = TDRHF = TDHF
from pyscf.pbc import scf
scf.hf.RHF.TDA = lib.class_as_method(TDA)
scf.hf.RHF.TDHF = lib.class_as_method(TDHF)
scf.rohf.ROHF.TDA = None
scf.rohf.ROHF.TDHF = None
|
{
"content_hash": "964d163a0d57c244b9134e312e31d2f4",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 52,
"avg_line_length": 22.31578947368421,
"alnum_prop": 0.6132075471698113,
"repo_name": "gkc1000/pyscf",
"id": "ac67a2b47df9c437a37b4da396d658c9444de096",
"size": "1601",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyscf/pbc/tdscf/rhf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "2749942"
},
{
"name": "C++",
"bytes": "20522"
},
{
"name": "CMake",
"bytes": "29300"
},
{
"name": "Common Lisp",
"bytes": "40269"
},
{
"name": "Cuda",
"bytes": "12405"
},
{
"name": "Fortran",
"bytes": "1104054"
},
{
"name": "Jupyter Notebook",
"bytes": "42844"
},
{
"name": "Makefile",
"bytes": "6797"
},
{
"name": "Python",
"bytes": "10739278"
},
{
"name": "Shell",
"bytes": "5480"
},
{
"name": "VBA",
"bytes": "577"
}
],
"symlink_target": ""
}
|
"""
Count how many letters in a word
and print them sorted
"""
print "Please type in a word"
word = raw_input()
freq = {}
for letter in word:
count = freq.setdefault(letter, 0)
freq[letter] = count + 1
print freq
for k in sorted(freq, key=freq.get, reverse=True):
print "%s => %d" % (k, freq[k])
|
{
"content_hash": "7bfd178ba0985ad7b2ef56197e5828a5",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 50,
"avg_line_length": 15.75,
"alnum_prop": 0.6285714285714286,
"repo_name": "ynonp/python-examples",
"id": "dc3c3f9dec6aee334487324a684ecfc5d142988e",
"size": "315",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "09_datastructures/01_lettercount.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "47176"
}
],
"symlink_target": ""
}
|
class Typed:
def __init__(self, name, expected_type):
self.name = name
self.expected_type = expected_type
def __get__(self, instance, cls):
if instance is None:
return self
else:
return instance.__dict__[self.name]
def __set__(self, instance, value):
if not isinstance(value, self.expected_type):
raise TypeError('Expected ' + str(self.expected_type))
instance.__dict__[self.name] = value
def __delete__(self, instance):
del instance.__dict__[self.name]
# Class decorator that applies it to selected attributes
def typeassert(**kwargs):
def decorate(cls):
for name, expected_type in kwargs.items():
# Attach a Typed descriptor to the class
setattr(cls, name, Typed(name, expected_type))
return cls
return decorate
# Example use
@typeassert(name=str, shares=int, price=float)
class Stock:
def __init__(self, name, shares, price):
self.name = name
self.shares = shares
self.price = price
if __name__ == '__main__':
s = Stock('ACME', 100, 490.1)
print(s.name, s.shares, s.price)
s.shares = 50
try:
s.shares = 'a lot'
except TypeError as e:
print(e)
|
{
"content_hash": "af768a3fa5da343c573607ce508a1ad8",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 66,
"avg_line_length": 28.90909090909091,
"alnum_prop": 0.5864779874213837,
"repo_name": "tuanavu/python-cookbook-3rd",
"id": "94603707a2346e4148767635f6cd9c7cca6ffd1e",
"size": "1314",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/8/creating_a_new_kind_of_class_or_instance_attribute/example2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "20265"
},
{
"name": "CSS",
"bytes": "184"
},
{
"name": "Jupyter Notebook",
"bytes": "219413"
},
{
"name": "Makefile",
"bytes": "231"
},
{
"name": "Python",
"bytes": "250592"
},
{
"name": "Shell",
"bytes": "179"
}
],
"symlink_target": ""
}
|
'''
[109]
on segments of the TIGER corpus
+1, +2 and +1 or +2 & 5 relations mentioned in Hinrich's email
# parameter with number of considered segments
'''
import collections
import os
import GlobalVariables_Katharina_SVD as gl
from scipy.sparse import csr_matrix, issparse
from scipy.sparse.linalg import eigsh
from numpy import dot, zeros, kron, array, eye, ones, savetxt, loadtxt
import ExtRescal.rescal_new as rescal
import logging
import threading
#import cPickle as pickle
import math
import numpy as np
import datetime
import scipy
import time
import sys
import operator
savedir = "/mounts/data/proj/kann/SegEm/results/test_20151005/"
def initialize(): # a list of dictionary
print('\nINITIALIZE ', datetime.datetime.now().time())
# make 4 dictionaries, 1 for +1, one for +2, one for +1 or +2, 1 for number of words in the corpus
# the last 5 are the 'new' relations Hinrich defined in his email
for i in range(0,9):
D=collections.defaultdict(int)
gl.dictionaryList.append(D)
gl.wordFrequency = collections.defaultdict(int)
def ReadFile_01(file, limit, lowerlimit, numberSegments):
print('\nREADFILE_01 ', datetime.datetime.now().time())
print (file)
#for line in file.readlines():
for num_lines, line in enumerate(file):
if limit > 0 and num_lines > limit:
break
if num_lines < lowerlimit:
continue
#line='<B> '+line[:len(line)-1]+' </B>'
#print line
#tokens=line.strip().lower().split(' ') # consider lowercases
tokens=line.strip().split(' ') # keep the original word forms
indexes = []
for token in tokens:
if token == '':
continue
if token in gl.wordFrequency:
gl.wordFrequency[token] += 1
else:
gl.wordFrequency[token] = 1
# sort the dictionary by its values
frequWords = sorted(gl.wordFrequency.items(), key=operator.itemgetter(1))
finalResult = []
for i in range(noSegments):
finalResult.append(frequWords[len(frequWords)-i-1][0]) # TODO: put[0] here
print('Done.')
#print(finalResult)
#exit(0)
return finalResult
def tokenIsLastSegment(i, tokens):
if i == len(tokens)-1:
return True
if tokens[i][0] == '2':
return True
if tokens[i][0] == '1':
if tokens[i+1][0] == '1':
return True
if tokens[i][0] == '0':
if not tokens[i+1][0] == '2':
return True
return False
def ReadFile(file, limit, lowerlimit, frequentWords):
print('\nREADFILE ', datetime.datetime.now().time())
print (file)
#for line in file.readlines():
for num_lines, line in enumerate(file):
if limit > 0 and num_lines > limit:
break
if num_lines < lowerlimit:
continue
#line='<B> '+line[:len(line)-1]+' </B>'
#print line
#tokens=line.strip().lower().split(' ') # consider lowercases
tokens=line.strip().split(' ') # keep the original word forms
indexes = []
indicesOfLastSeg = []
isLastSegment = False
for i in range(len(tokens)):
token = tokens[i]
isLastSegment = False
if tokenIsLastSegment(i, tokens):
#print(token + ' is a last segment.')
isLastSegment = True
gl.corpusLength += 1
if token == '':
continue
if not token in frequentWords:
#print('Rare word: ' + token)
token = '<RARE>'
#if token == tokens[-2]: #remember index of the last element of the sentence
# gl.lastPeriod = len(indexes) # not -1 because it has still not been increased
middle=gl.wordlist.get(token, -1)
if middle == -1: # if is a new word
gl.wordlist[token] = gl.dimension
middle=gl.dimension
gl.dimension+= 1 # gl.dimension count the current word amount
if gl.dimension % 10000 == 0:
print (gl.dimension)
gl.dictionaryList[1][middle] = 0
indexes.append(middle) # 'indexes stores all the word index'
if isLastSegment:
indicesOfLastSeg.append(middle)
currentPosi=len(indexes)-1
leftRange=gl.window if currentPosi>= gl.window else currentPosi
#new from Katharina
if currentPosi>0:
context=indexes[currentPosi-1]
gl.dictionaryList[0][(context, middle)]+=1 #update slice 0
gl.dictionaryList[3][(context, middle)]+=1 #update slice 3
if currentPosi>1:
context=indexes[currentPosi-2]
gl.dictionaryList[2][(context, middle)]+=1 #update slice 2
gl.dictionaryList[3][(context, middle)]+=1 #next update for slice 3
# for the last segments of words:
if len(indicesOfLastSeg) > 1:
gl.dictionaryList[4][(middle, indicesOfLastSeg[len(indicesOfLastSeg)-2])] += 1 # last segment 2 words ago
if len(indicesOfLastSeg) > 0:
gl.dictionaryList[5][(middle, indicesOfLastSeg[len(indicesOfLastSeg)-1])] += 1 # last segment 1 word ago
endPreviousWord = 0
endPreviousWordNew = 0
if len(indicesOfLastSeg) > 1 and isLastSegment:
for k in range(3):
if indexes[currentPosi-k-1] == indicesOfLastSeg[len(indicesOfLastSeg)-2]:
endPreviousWord = k
#print('endPreviousWord: ' + str(k))
break
gl.dictionaryList[6][(indexes[currentPosi-k-1], middle)] += 1 # same word TODO: no test here if currentPosi-i exists... necessary??
#print('gl.dictionaryList[6] '+ str(indexes[currentPosi-k-1]) + '; ' + str(middle))
if len(indicesOfLastSeg) > 2 and isLastSegment:
for k in range(5):
#print(indexes[currentPosi-k-1-endPreviousWord])
#print(indicesOfLastSeg[len(indicesOfLastSeg)-3])
if indexes[currentPosi-k-endPreviousWord-1] == indicesOfLastSeg[len(indicesOfLastSeg)-3]:
endPreviousWordNew = k + endPreviousWord
#print('endPreviousWordNew: ' + str(endPreviousWordNew))
break
gl.dictionaryList[7][(indexes[currentPosi-k-endPreviousWord-1], middle)] += 1 # 1 word after TODO: no test here if currentPosi-i exists... necessary??
#print('gl.dictionaryList[7] '+ str(indexes[currentPosi-k-endPreviousWord-1]) + '; ' + str(middle))
if len(indicesOfLastSeg) > 3 and isLastSegment:
for k in range(5):
if indexes[currentPosi-k-endPreviousWordNew-1] == indicesOfLastSeg[len(indicesOfLastSeg)-4]:
break
gl.dictionaryList[8][(indexes[currentPosi-k-endPreviousWordNew-1], middle)] += 1 # 2 words after TODO: no test here if currentPosi-i exists... necessary??
#print('gl.dictionaryList[8] '+ str(indexes[currentPosi-k-endPreviousWordNew-1]) + '; ' + str(middle))
'''
if currentPosi>2:
context=indexes[currentPosi-3]
gl.dictionaryList[1][(middle, context)]+=0.6 #still update slice 1
gl.dictionaryList[1][(context, middle)]+=0.6 #a symmetric relation
if currentPosi>3:
context=indexes[currentPosi-4]
gl.dictionaryList[2][(middle, context)]+=0.4 #update slice 2
gl.dictionaryList[2][(context, middle)]+=0.4 #a symmetric relation
if currentPosi>4:
context=indexes[currentPosi-5]
gl.dictionaryList[2][(middle, context)]+=0.2 #still update slice 2
gl.dictionaryList[2][(context, middle)]+=0.2 #a symmetric relation
'''
gl.dictionaryList[1][middle] += 1
#for token, value in gl.dictionaryList[1].items():
#print(gl.wordlist[token] + ', ' + str(value) + '\n')
#exit(0)
print('Number of words: ' + str(gl.dimension))
def Traverse(folders, frequentWords):
print('\nTRAVERSE ', datetime.datetime.now().time())
fileNo=0
for rootDir in folders:
if os.path.isfile(rootDir): #is file
file = open(rootDir)
ReadFile(file, -1, 0, frequentWords)
#ReadFile(file, 10000, 0, frequentWords) #V1
#ReadFile(file, 294757048/3*2, 294757048/3) #V2
#ReadFile(file, -1, 294757048/3*2) #V3
fileNo+=1
else: #is directory
for lists in os.listdir(rootDir):
path = os.path.join(rootDir, lists)
list=[path]
Traverse(list)
def Traverse_01(folders, noSegments):
print('\nTRAVERSE_01 ', datetime.datetime.now().time())
fileNo=0
for rootDir in folders:
if os.path.isfile(rootDir): #is file
file = open(rootDir)
finalResult = ReadFile_01(file, -1, 0, noSegments)
#ReadFile(file, 100, 0) #V1
#ReadFile(file, 294757048/3*2, 294757048/3) #V2
#ReadFile(file, -1, 294757048/3*2) #V3
fileNo+=1
else: #is directory
for lists in os.listdir(rootDir):
path = os.path.join(rootDir, lists)
list=[path]
Traverse_01(list)
return finalResult
def formPPMIandSVD():
print('\nFORMPPMI ', datetime.datetime.now().time())
#print('Dimension: ' + str(gl.dimension))
#ppmi = np.zeros(shape=(gl.dimension,gl.dimension))
rows=[]
cols=[]
data=[]
for (word, context), value in gl.dictionaryList[0].items():
rows.append(word)
cols.append(context)
newValue = math.log((value * gl.corpusLength)/(gl.dictionaryList[1][word]*gl.dictionaryList[1][context]))
if newValue < 0:
data.append(0)
else:
data.append(math.log((value * gl.corpusLength)/(gl.dictionaryList[1][word]*gl.dictionaryList[1][context])))
ppmi = csr_matrix((data, (rows, cols)), shape= (gl.dimension, gl.dimension))
myMatrix = ppmi
rows=[]
cols=[]
data=[]
for (word, context), value in gl.dictionaryList[2].items():
rows.append(word)
cols.append(context)
newValue = math.log((value * gl.corpusLength)/(gl.dictionaryList[1][word]*gl.dictionaryList[1][context]))
if newValue < 0:
data.append(0)
else:
data.append(math.log((value * gl.corpusLength)/(gl.dictionaryList[1][word]*gl.dictionaryList[1][context])))
ppmi2 = csr_matrix((data, (rows, cols)), shape= (gl.dimension, gl.dimension))
myMatrix2 = ppmi2
rows=[]
cols=[]
data=[]
for (word, context), value in gl.dictionaryList[3].items():
rows.append(word)
cols.append(context)
newValue = math.log((value * gl.corpusLength)/(gl.dictionaryList[1][word]*gl.dictionaryList[1][context]))
if newValue < 0:
data.append(0)
else:
data.append(math.log((value * gl.corpusLength)/(gl.dictionaryList[1][word]*gl.dictionaryList[1][context])))
ppmi3 = csr_matrix((data, (rows, cols)), shape= (gl.dimension, gl.dimension))
myMatrix3 = ppmi3
rows=[]
cols=[]
data=[]
for (word, context), value in gl.dictionaryList[4].items():
rows.append(word)
cols.append(context)
newValue = math.log((value * gl.corpusLength)/(gl.dictionaryList[1][word]*gl.dictionaryList[1][context]))
if newValue < 0:
data.append(0)
else:
data.append(math.log((value * gl.corpusLength)/(gl.dictionaryList[1][word]*gl.dictionaryList[1][context])))
ppmi4 = csr_matrix((data, (rows, cols)), shape= (gl.dimension, gl.dimension))
myMatrix4 = ppmi4
rows=[]
cols=[]
data=[]
for (word, context), value in gl.dictionaryList[5].items():
rows.append(word)
cols.append(context)
newValue = math.log((value * gl.corpusLength)/(gl.dictionaryList[1][word]*gl.dictionaryList[1][context]))
if newValue < 0:
data.append(0)
else:
data.append(math.log((value * gl.corpusLength)/(gl.dictionaryList[1][word]*gl.dictionaryList[1][context])))
ppmi5 = csr_matrix((data, (rows, cols)), shape= (gl.dimension, gl.dimension))
myMatrix5 = ppmi5
rows=[]
cols=[]
data=[]
for (word, context), value in gl.dictionaryList[6].items():
rows.append(word)
cols.append(context)
newValue = math.log((value * gl.corpusLength)/(gl.dictionaryList[1][word]*gl.dictionaryList[1][context]))
if newValue < 0:
data.append(0)
else:
data.append(math.log((value * gl.corpusLength)/(gl.dictionaryList[1][word]*gl.dictionaryList[1][context])))
ppmi6 = csr_matrix((data, (rows, cols)), shape= (gl.dimension, gl.dimension))
myMatrix6 = ppmi6
rows=[]
cols=[]
data=[]
for (word, context), value in gl.dictionaryList[7].items():
rows.append(word)
cols.append(context)
newValue = math.log((value * gl.corpusLength)/(gl.dictionaryList[1][word]*gl.dictionaryList[1][context]))
if newValue < 0:
data.append(0)
else:
data.append(math.log((value * gl.corpusLength)/(gl.dictionaryList[1][word]*gl.dictionaryList[1][context])))
ppmi7 = csr_matrix((data, (rows, cols)), shape= (gl.dimension, gl.dimension))
myMatrix7 = ppmi7
rows=[]
cols=[]
data=[]
for (word, context), value in gl.dictionaryList[8].items():
rows.append(word)
cols.append(context)
newValue = math.log((value * gl.corpusLength)/(gl.dictionaryList[1][word]*gl.dictionaryList[1][context]))
if newValue < 0:
data.append(0)
else:
data.append(math.log((value * gl.corpusLength)/(gl.dictionaryList[1][word]*gl.dictionaryList[1][context])))
ppmi8 = csr_matrix((data, (rows, cols)), shape= (gl.dimension, gl.dimension))
myMatrix8 = ppmi8
print ('Building tensor....')
tensor = [0] * 8 #number is number of tensor slices
tensor[0] = myMatrix # TODO: change this for more slices
tensor[1] = myMatrix2
tensor[2] = myMatrix3
tensor[3] = myMatrix4
tensor[4] = myMatrix5
tensor[5] = myMatrix6
tensor[6] = myMatrix7
tensor[7] = myMatrix8
print('Print the original tensor')
printOriginalTensor(tensor)
#call rescal
print ('Calling rescal...')
#A, R, fit, itr, exectimes = rescal(tensor, 50, init='nvecs', lambda_A=10, lambda_R=10, compute_fit=True)
#A, R, fit, itr, exectimes = rescal.als(tensor, 100)
#printWordEmbeddingSVD(A, 100)
#print ('Calling rescal the 2nd time...')
A, R, fit, itr, exectimes = rescal.als(tensor, 200)
printRelationsTensor(R, 200)
printWordEmbeddingSVD(A, 200)
#print ('Calling rescal the 2nd time...')
#A, R, fit, itr, exectimes = rescal.als(tensor, 300)
#printRelationsTensor(R, 300)
#printWordEmbeddingSVD(A, 300)
def save_sparse_csr(filename,array):
print('\nSAVE_SPARSE_CSR ', datetime.datetime.now().time())
np.savez(filename,data = array.data ,indices=array.indices,
indptr =array.indptr, shape=array.shape )
# not used for now
def load_sparse_csr(filename):
print('\nLOAD_SPARSE_CSR ', datetime.datetime.now().time())
loader = np.load(filename)
return csr_matrix(( loader['data'], loader['indices'], loader['indptr']),
shape = loader['shape'])
def printWordEmbeddingSVD(matrix, dim):
print('\nPRINTWORDEMBEDDINGSVD ', datetime.datetime.now().time())
output= open(savedir + '109_012_wordembeddings_' + str(dim) + '.txt', 'w')
index2word={}
for word, index in gl.wordlist.items():
index2word[index]=word
for i in range(len(matrix)):
output.write(index2word[i]+' ') #this means word printed according id
for length in range(len(matrix[i])):
output.write(str(matrix[i][length])+' ')
output.write('\n')
print ('Wordembeddings are stored over!')
output= open(savedir + '109_012_real_wordembeddings' + str(dim) + '.txt', 'w') #without weird numbers
index2word={}
for word, index in gl.wordlist.items():
index2word[index]=word
for i in range(len(matrix)):
output.write(index2word[i][1:]+' ') #this means word printed according id, without number
for length in range(len(matrix[i])):
output.write(str(matrix[i][length])+' ')
output.write('\n')
print ('\'Real\' wordembeddings are stored over!')
# not used for now
def loadEmbeddingFile(embeddingFile):
print('\nLOADEMBEDDINGFILE ', datetime.datetime.now().time())
embeddingDict={}
file = open(embeddingFile)
for line in file:
tokens=line[:len(line)-2].split(' ') # consider lowercases
values=[]
for i in range(1, len(tokens)):
values.append(float(tokens[i]))
embeddingDict[tokens[0]]=values
print ('Embedding loading finished.')
return embeddingDict
def printRelationsTensor(R, dim):
print('\nPRINTRELATIONTENSOR ', datetime.datetime.now().time())
for j in range(len(R)):
output= savedir + '109_012_R_' + str(j) + '.txt'
np.savetxt(output, R[j])
print ('RelationTensor stored over!')
def printOriginalTensor(R):
print('\nPRINTORIGINALTENSOR ', datetime.datetime.now().time())
for j in range(len(R)):
output= savedir + '109_012_T_' + str(j) + '.txt'
#print(j)
#print(R[j])
save_sparse_csr(output, R[j])
print ('Original Tensor stored over!')
if __name__ == '__main__':
if len(sys.argv) > 1:
noSegments = int(sys.argv[1])-1
else:
noSegments = 89383 # take all; TODO: change if corpus is not TIGER
initialize()
#print('start sleeping...')
#time.sleep(21000) #wait 6 hours, so the text should be ready
#print('...waking up!')
folders=[savedir + 'tiger_text_file_preproc_seg_3']
#folders=['/mounts/data/proj/kann/SegEm/tiger_text_file_preproc_last2']
frequentWords = Traverse_01(folders, noSegments)
#print(frequentWords)
#exit(0)
Traverse(folders, frequentWords)
formPPMIandSVD()
|
{
"content_hash": "1a49ff3959bc97b7ecd1cde0c4885dd2",
"timestamp": "",
"source": "github",
"line_count": 513,
"max_line_length": 176,
"avg_line_length": 37.08771929824562,
"alnum_prop": 0.5779985283296541,
"repo_name": "Kelina/TensorEmbeddings",
"id": "49905a8b753dbce86259da0f7146c889cdc2a9a9",
"size": "19051",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "6_seg/109_RESCAL_EMB_012.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "178218"
},
{
"name": "Shell",
"bytes": "3451"
}
],
"symlink_target": ""
}
|
import os
import socket
from flask import Flask, url_for
from .config import BaseConfig
from .views import register_views
from .extensions import rq, redis
app = Flask(__name__)
##############################
# Load Config
app.config['UPLOAD_FOLDER'] = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'uploads')
app.config.from_object(BaseConfig)
app.config.from_envvar('CONFIG', silent=True)
app.debug = app.config.get('DEBUG', False)
##############################
# Attach Views
register_views(app)
##############################
# Init Extensions
rq.init_app(app)
redis.init_app(app)
##############################
# Configure Templating
@app.template_filter()
def url_for_gif(slug):
filename = slug + ".gif"
if app.config.get('UPLOAD_URL_FORMAT_STRING'):
return app.config.get('UPLOAD_URL_FORMAT_STRING') % {"filename": filename}
else:
return url_for('uploaded_file', filename=filename, _external=True)
@app.template_filter()
def url_for_still(slug):
filename = slug + ".jpg"
if app.config.get('UPLOAD_URL_FORMAT_STRING'):
return app.config.get('UPLOAD_URL_FORMAT_STRING') % {"filename": filename}
else:
return url_for('uploaded_file', filename=filename, _external=True)
@app.context_processor
def inject_globals():
return dict(
g_ENVIRONMENT=app.config.get('ENV'),
g_HOSTNAME=socket.gethostname(),
g_IS_PRODUCTION=('PRODUCTION' == app.config.get('ENV')),
g_SERVER_NAME=app.config.get('SERVER_NAME'),
g_GOOGLE_ANALYTICS_ID=app.config.get('GOOGLE_ANALYTICS_ID')
)
|
{
"content_hash": "e60cd49dd96bb84e2aed6eb899a3417f",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 97,
"avg_line_length": 25.38095238095238,
"alnum_prop": 0.6278924327704816,
"repo_name": "jmhobbs/dun-dun-duh",
"id": "0fd19436451b5ef9fb68911342dab0e34f934602",
"size": "1624",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "dundunduh/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1938"
},
{
"name": "HTML",
"bytes": "14832"
},
{
"name": "JavaScript",
"bytes": "5543"
},
{
"name": "Python",
"bytes": "34757"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Building()
result.template = "object/building/tatooine/shared_guild_university_tatooine_style_01.iff"
result.attribute_template_id = -1
result.stfName("building_name","guild_university")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "57282b474f9d17955522411f5b89183b",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 91,
"avg_line_length": 25.53846153846154,
"alnum_prop": 0.713855421686747,
"repo_name": "obi-two/Rebelion",
"id": "ed14dadf52b949ce926637896749314fc837b78a",
"size": "477",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/building/tatooine/shared_guild_university_tatooine_style_01.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
}
|
from django import forms
from multiemailfield import utils
class MultiEmailWidget(forms.Textarea):
def render(self, name, value, attrs=None):
value = utils.dump(value) or ''
return super(MultiEmailWidget, self).render(name, value, attrs)
|
{
"content_hash": "4225093a88aedcb8baccab6661811a15",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 71,
"avg_line_length": 29,
"alnum_prop": 0.7203065134099617,
"repo_name": "sophilabs/django-multiemail-field",
"id": "2e590875122434f5dc223e500c660b4435334cb5",
"size": "261",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "multiemailfield/widgets.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6885"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('costs', '0012_auto_20171129_2247'),
]
operations = [
migrations.AlterField(
model_name='recurringcost',
name='type',
field=models.CharField(choices=[('normal', "We will not have spent this yet. We will estimate a fixed amount per billing cycle. (You should select a 'liabilities' account)."), ('arrears_balance', "We will have already spent this in the previous billing cycle, so bill the account's balance. (You should select an 'expenses' account)"), ('arrears_transactions', "We will have already spent this in the previous cycle, so bill the total amount spent in the previous cycle. (You should select an 'expenses' account)")], default='normal', max_length=20),
),
]
|
{
"content_hash": "13164bbea09000f37875a28650abccf6",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 562,
"avg_line_length": 49.611111111111114,
"alnum_prop": 0.6842105263157895,
"repo_name": "waldocollective/swiftwind",
"id": "aac7e8cb7c3f9c5eff1db7cbb723300a5291191c",
"size": "966",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "swiftwind/costs/migrations/0013_auto_20171203_1516.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "420"
},
{
"name": "HTML",
"bytes": "29715"
},
{
"name": "Python",
"bytes": "193881"
}
],
"symlink_target": ""
}
|
import django
import django.core.validators
from django.db import migrations, models
import poradnia.users.models
class Migration(migrations.Migration):
dependencies = [("users", "0009_user_codename")]
if django.VERSION[:2] >= (1, 8):
operations = [
migrations.AlterModelManagers(
name="user",
managers=[("objects", poradnia.users.models.CustomUserManager())],
)
]
else:
operations = []
operations += [
migrations.AlterField(
model_name="user",
name="email",
field=models.EmailField(
max_length=254, verbose_name="email address", blank=True
),
),
migrations.AlterField(
model_name="user",
name="groups",
field=models.ManyToManyField(
related_query_name="user",
related_name="user_set",
to="auth.Group",
blank=True,
help_text="The groups this user belongs to. A user will get all permissions granted to each of their groups.",
verbose_name="groups",
),
),
migrations.AlterField(
model_name="user",
name="last_login",
field=models.DateTimeField(
null=True, verbose_name="last login", blank=True
),
),
migrations.AlterField(
model_name="user",
name="username",
field=models.CharField(
error_messages={"unique": "A user with that username already exists."},
max_length=30,
validators=[
django.core.validators.RegexValidator(
"^[\\w.@+-]+$",
"Enter a valid username. This value may contain only letters, numbers and @/./+/-/_ characters.",
"invalid",
)
],
help_text="Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.",
unique=True,
verbose_name="username",
),
),
]
|
{
"content_hash": "6c331b93b88296b49e9fb760eb6b01f3",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 126,
"avg_line_length": 33.22727272727273,
"alnum_prop": 0.49247606019151846,
"repo_name": "watchdogpolska/poradnia.siecobywatelska.pl",
"id": "7673a936dbdbf2553ce3c1ff25c8206db3c5849d",
"size": "2193",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "poradnia/users/migrations/0010_auto_20151217_2355.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "246873"
},
{
"name": "HTML",
"bytes": "74372"
},
{
"name": "JavaScript",
"bytes": "473767"
},
{
"name": "Python",
"bytes": "269647"
}
],
"symlink_target": ""
}
|
'''
Swift utility class
===================
Author: Anthony Stanton <anthony.stanton@gmail.com>
'''
from __future__ import absolute_import
# Import python libs
import logging
from sys import stdout
from os import makedirs
from os.path import dirname, isdir
from errno import EEXIST
# Import Salt libs
import salt.utils
# Get logging started
log = logging.getLogger(__name__)
# Import Swift client libs
HAS_SWIFT = False
try:
from swiftclient import client
HAS_SWIFT = True
except ImportError:
pass
def check_swift():
return HAS_SWIFT
def mkdirs(path):
try:
makedirs(path)
except OSError as err:
if err.errno != EEXIST:
raise
# we've been playing fast and loose with kwargs, but the swiftclient isn't
# going to accept any old thing
def _sanitize(kwargs):
variables = (
'user', 'key', 'authurl',
'retries', 'preauthurl', 'preauthtoken', 'snet',
'starting_backoff', 'max_backoff', 'tenant_name',
'os_options', 'auth_version', 'cacert',
'insecure', 'ssl_compression'
)
ret = {}
for var in kwargs:
if var in variables:
ret[var] = kwargs[var]
return ret
class SaltSwift(object):
'''
Class for all swiftclient functions
'''
def __init__(
self,
user,
tenant_name,
auth_url,
password=None,
auth_version=2,
**kwargs
):
'''
Set up openstack credentials
'''
if not HAS_SWIFT:
log.error('Error:: unable to find swiftclient. Try installing it from the appropriate repository.')
return None
self.kwargs = kwargs.copy()
self.kwargs['user'] = user
self.kwargs['password'] = password
self.kwargs['tenant_name'] = tenant_name
self.kwargs['authurl'] = auth_url
self.kwargs['auth_version'] = auth_version
if 'key' not in self.kwargs:
self.kwargs['key'] = password
self.kwargs = _sanitize(self.kwargs)
self.conn = client.Connection(**self.kwargs)
def get_account(self):
'''
List Swift containers
'''
try:
listing = self.conn.get_account()
return listing
except Exception as exc:
log.error('There was an error::')
if hasattr(exc, 'code') and hasattr(exc, 'msg'):
log.error(' Code: {0}: {1}'.format(exc.code, exc.msg))
log.error(' Content: \n{0}'.format(getattr(exc, 'read', lambda: str(exc))()))
return False
def get_container(self, cont):
'''
List files in a Swift container
'''
try:
listing = self.conn.get_container(cont)
return listing
except Exception as exc:
log.error('There was an error::')
if hasattr(exc, 'code') and hasattr(exc, 'msg'):
log.error(' Code: {0}: {1}'.format(exc.code, exc.msg))
log.error(' Content: \n{0}'.format(getattr(exc, 'read', lambda: str(exc))()))
return False
def put_container(self, cont):
'''
Create a new Swift container
'''
try:
self.conn.put_container(cont)
return True
except Exception as exc:
log.error('There was an error::')
if hasattr(exc, 'code') and hasattr(exc, 'msg'):
log.error(' Code: {0}: {1}'.format(exc.code, exc.msg))
log.error(' Content: \n{0}'.format(getattr(exc, 'read', lambda: str(exc))()))
return False
def delete_container(self, cont):
'''
Delete a Swift container
'''
try:
self.conn.delete_container(cont)
return True
except Exception as exc:
log.error('There was an error::')
if hasattr(exc, 'code') and hasattr(exc, 'msg'):
log.error(' Code: {0}: {1}'.format(exc.code, exc.msg))
log.error(' Content: \n{0}'.format(getattr(exc, 'read', lambda: str(exc))()))
return False
def post_container(self, cont, metadata=None):
'''
Update container metadata
'''
pass
def head_container(self, cont):
'''
Get container metadata
'''
pass
def get_object(self, cont, obj, local_file=None, return_bin=False):
'''
Retrieve a file from Swift
'''
try:
if local_file is None and return_bin is False:
return False
headers, body = self.conn.get_object(cont, obj, resp_chunk_size=65536)
if return_bin is True:
fp = stdout
else:
dirpath = dirname(local_file)
if dirpath and not isdir(dirpath):
mkdirs(dirpath)
fp = salt.utils.fopen(local_file, 'wb')
read_length = 0
for chunk in body:
read_length += len(chunk)
fp.write(chunk)
fp.close()
return True
# ClientException
# file/dir exceptions
except Exception as exc:
log.error('There was an error::')
if hasattr(exc, 'code') and hasattr(exc, 'msg'):
log.error(' Code: {0}: {1}'.format(exc.code, exc.msg))
log.error(' Content: \n{0}'.format(getattr(exc, 'read', lambda: str(exc))()))
return False
def put_object(self, cont, obj, local_file):
'''
Upload a file to Swift
'''
try:
with salt.utils.fopen(local_file, 'rb') as fp_:
self.conn.put_object(cont, obj, fp_)
return True
except Exception as exc:
log.error('There was an error::')
if hasattr(exc, 'code') and hasattr(exc, 'msg'):
log.error(' Code: {0}: {1}'.format(exc.code, exc.msg))
log.error(' Content: \n{0}'.format(getattr(exc, 'read', lambda: str(exc))()))
return False
def delete_object(self, cont, obj):
'''
Delete a file from Swift
'''
try:
self.conn.delete_object(cont, obj)
return True
except Exception as exc:
log.error('There was an error::')
if hasattr(exc, 'code') and hasattr(exc, 'msg'):
log.error(' Code: {0}: {1}'.format(exc.code, exc.msg))
log.error(' Content: \n{0}'.format(getattr(exc, 'read', lambda: str(exc))()))
return False
def head_object(self, cont, obj):
'''
Get object metadata
'''
pass
def post_object(self, cont, obj, metadata):
'''
Update object metadata
'''
pass
|
{
"content_hash": "5c876b3facec45d9bb23087d29a3ea38",
"timestamp": "",
"source": "github",
"line_count": 236,
"max_line_length": 111,
"avg_line_length": 29.110169491525422,
"alnum_prop": 0.5228529839883551,
"repo_name": "smallyear/linuxLearn",
"id": "8b28172acd5ace2021889012e7ee28158f4c8551",
"size": "6894",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "salt/salt/utils/openstack/swift.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "269"
},
{
"name": "CSS",
"bytes": "35"
},
{
"name": "HTML",
"bytes": "23373"
},
{
"name": "JavaScript",
"bytes": "510"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "12800734"
},
{
"name": "Shell",
"bytes": "240576"
}
],
"symlink_target": ""
}
|
"""
WSGI config for sutrofm project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "sutrofm.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
from django.conf import settings
from ws4redis.uwsgi_runserver import uWSGIWebsocketServer
_django_app = get_wsgi_application()
# _websocket_app = uWSGIWebsocketServer()
def application(environ, start_response):
# if environ.get('PATH_INFO').startswith(settings.WEBSOCKET_URL):
# return _websocket_app(environ, start_response)
return _django_app(environ, start_response)
|
{
"content_hash": "f06fdbc326eac7be0d6d979e04871d51",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 79,
"avg_line_length": 39.61764705882353,
"alnum_prop": 0.7906458797327395,
"repo_name": "superemily/sutrofm",
"id": "b24d1cbc5b6539874257d5281222c3cb17453b8c",
"size": "1347",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "sutrofm/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11614"
},
{
"name": "HTML",
"bytes": "19771"
},
{
"name": "JavaScript",
"bytes": "57695"
},
{
"name": "Python",
"bytes": "51885"
},
{
"name": "Shell",
"bytes": "813"
}
],
"symlink_target": ""
}
|
"""(preliminary) Model of a Factory Callable object"""
from basicproperty import propertied, basic, common
from basictypes import list_types, callable
class Factory( callable.Callable ):
"""An object intended to create instances of a type
The factory allows you to create instances of a type
through the GUI. Most factories will allow for
entirely-default calling (i.e. Factory() creates a
new, valid instance). Others may require interactive
definition of certain parameters.
"""
listof_Factories = list_types.listof(
Factory,
name = "listof_Factories",
dataType = 'list.Factories',
)
|
{
"content_hash": "0f64983de46a1192aba3dc0fbbff2006",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 54,
"avg_line_length": 30.1,
"alnum_prop": 0.760797342192691,
"repo_name": "corvust/Transmission-XBMC",
"id": "77051befe70e0deef9887470f54984d8d9a9e966",
"size": "602",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "resources/lib/basictypes/factory.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "286845"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.