hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f739636f301d9e486dd281255508775b18813c2d | 22,410 | py | Python | mmtbx/validation/restraints.py | dperl-sol/cctbx_project | b9e390221a2bc4fd00b9122e97c3b79c632c6664 | [
"BSD-3-Clause-LBNL"
] | 155 | 2016-11-23T12:52:16.000Z | 2022-03-31T15:35:44.000Z | mmtbx/validation/restraints.py | dperl-sol/cctbx_project | b9e390221a2bc4fd00b9122e97c3b79c632c6664 | [
"BSD-3-Clause-LBNL"
] | 590 | 2016-12-10T11:31:18.000Z | 2022-03-30T23:10:09.000Z | mmtbx/validation/restraints.py | dperl-sol/cctbx_project | b9e390221a2bc4fd00b9122e97c3b79c632c6664 | [
"BSD-3-Clause-LBNL"
] | 115 | 2016-11-15T08:17:28.000Z | 2022-02-09T15:30:14.000Z |
"""
Validation of models of any type against basic covalent geometry restraints.
By default this will flag all restrained atoms deviating by more than 4 sigma
from the target value.
"""
from __future__ import absolute_import, division, print_function
from mmtbx.validation import atoms, validation, get_atoms_info
from libtbx.str_utils import make_sub_header
from libtbx import slots_getstate_setstate
from math import sqrt
import sys
__restraint_attr__ = [
"sigma",
"target",
"model",
"delta",
"residual",
] # XXX others?
class restraint(atoms):
n_atoms = None
"""
Base class for covalent sterochemistry restraint outliers (except for
planarity, which is weird and different). Unlike most of the other
outlier implementations elsewhere in the validation module, the restraint
outliers are printed on multiple lines to facilitate display of the atoms
involved.
"""
__slots__ = atoms.__slots__ + __restraint_attr__
def __init__(self, **kwds):
atoms.__init__(self, **kwds)
if (self.n_atoms is not None):
assert (len(self.atoms_info) == self.n_atoms)
if (self.score is None):
self.score = abs(self.delta / self.sigma)
@staticmethod
def header():
return "%-20s %7s %7s %7s %6s %6s %10s" % ("atoms", "ideal", "model",
"delta", "sigma", "residual", "deviation")
def as_table_row_phenix(self):
"""
Values for populating ListCtrl in Phenix GUI.
"""
atoms_str = ", ".join([ a.id_str() for a in self.atoms_info ])
return [ atoms_str, self.target, self.model, self.score ]
def id_str(self, ignore_altloc=None):
return ",".join([ a.id_str() for a in self.atoms_info ])
def as_string(self, prefix=""):
id_strs = [ a.id_str() for a in self.atoms_info ]
id_len = max([ len(s) for s in id_strs ])
lines = []
for atom_str in id_strs :
lines.append("%s%-20s" % (prefix, atom_str))
lines[-1] += " " + self.format_values()
return "\n".join(lines)
def format_values(self):
return "%7.2f %7.2f %7.2f %6.2e %6.2e %4.1f*sigma" % (self.target,
self.model, self.delta, self.sigma, self.residual, self.score)
def __cmp__(self, other):
return cmp(other.score, self.score)
def __eq__(self, other):
return self.score == other.score
def __ne__(self, other):
return self.score != other.score
def __lt__(self, other):
return self.score < other.score
def __le__(self, other):
return self.score <= other.score
def __gt__ (self, other):
return self.score > other.score
def __ge__(self, other):
return self.score >= other.score
def kinemage_key(self):
atom0 = self.atoms_info[0]
# bonds are assigned to the following residue
if len(self.atoms_info)==2:
atom0 = self.atoms_info[1]
# angles are assigned to the central atom's residue
elif len(self.atoms_info)==3:
atom0 = self.atoms_info[1]
# dihedrals are assigned to the following residue - this applies to
# omega dihedral but planes are not a problem
elif len(self.atoms_info)==4:
atom0 = self.atoms_info[2]
atom_names = [ a.name.strip().lower() for a in self.atoms_info ]
kin_key = "%1s%3s%2s%4s%1s %s" % (self.get_altloc(),
atom0.resname.lower(), atom0.chain_id, atom0.resseq, atom0.icode,
"-".join(atom_names))
return kin_key
class bond(restraint):
n_atoms = 2
__bond_attr__ = [
"slack",
"symop",
]
__slots__ = restraint.__slots__ + __bond_attr__
def as_table_row_phenix(self):
return [ self.atoms_info[0].id_str(), self.atoms_info[1].id_str(),
self.target, self.model, self.score ]
@staticmethod
def header():
return "%-20s %5s %6s %6s %6s %6s %8s %10s" % ("atoms", "ideal",
"model", "delta", "sigma", "slack", "residual", "deviation")
def formate_values(self):
return "%5.3f %6.2f %6.3f %6.3f %6.2e %8.2e %4.1f*sigma" % \
(self.target, self.model, self.delta, self.sigma, self.slack,
self.residual, abs(self.score))
def as_kinemage(self):
from mmtbx.kinemage.validation import bond_outlier_as_kinemage
return bond_outlier_as_kinemage(self)
class angle(restraint):
n_atoms = 3
def as_kinemage(self):
from mmtbx.kinemage.validation import angle_outlier_as_kinemage
return angle_outlier_as_kinemage(self)
class dihedral(restraint):
n_atoms = 4
def as_kinemage(self):
return None
class chirality(restraint):
def as_kinemage(self):
from mmtbx.kinemage.validation import chiral_outlier_as_kinemage
return chiral_outlier_as_kinemage(self)
def as_table_row_phenix(self):
"""
Values for populating ListCtrl in Phenix GUI.
"""
atoms_str = ", ".join([ a.id_str() for a in self.atoms_info ])
return [ atoms_str, self.target, self.model, self.score, self.outlier_type() ]
def is_pseudochiral(self):
#Certain atoms are treated like chiral centers because they bond to atoms that have different names without chemical difference.
#VAL CB bonds to CG1 and CG2, for example.
#A large chiral volume outlier relfects a failure to follow chemical naming conventions, not necessarily a major geometry error
#So these pseudochiral centers should be treated differently.
#
#backbone phosphate in nucleic acids
#OP1 and OP2 atoms are chemically identical
resname = self.atoms_info[0].resname
atomname = self.atoms_info[0].name.strip()
if atomname == 'P': return True
#SF4 and F3S are iron-sulfur clusters with frequent naming problems
if resname in ['SF4','F3S']: return True
#Val CG1 and CG2 are chemically identical
if resname == 'VAL' and atomname == 'CB': return True
#LEU CD1 and CD2 are chemically identical
if resname == 'LEU' and atomname == 'CG': return True
#Otherwise
return False
def is_handedness_swap(self):
resname = self.atoms_info[0].resname
if resname in ['PRO','DPR']: #proline has slightly different geometry
if self.score > 22:
return True
elif self.score > 20:
return True
else:
return False
def outlier_type(self):
if self.score <= 4: return None
if not self.is_handedness_swap():
return "Tetrahedral geometry outlier"
else:
if self.is_pseudochiral():
return "Pseudochiral naming error"
else:
return "Chiral handedness swap"
class planarity(restraint):
__slots__ = atoms.__slots__ + [
"rms_deltas",
"delta_max",
"residual",
]
def as_table_row_phenix(self):
atoms_str = ", ".join([ a.id_str() for a in self.atoms_info ])
return [ atoms_str, self.delta_max, self.rms_deltas, self.score ]
@staticmethod
def header():
return "%-20s %10s %10s %10s %10s" % ("atoms", "rms_deltas",
"delta_max", "residual", "deviation")
def format_values(self):
return "%10.3f %10.3f %10.2f %4.1f*sigma" % (self.rms_deltas,
self.delta_max, self.residual, self.score)
def as_kinemage(self):
return None
class restraint_validation(validation):
"""
Base class for collecting information about all restraints of a certain
type, including overall statistics and individual outliers.
"""
restraint_type = None
kinemage_header = None
gui_list_headers = ["Atoms","Ideal value","Model value","Deviation (sigmas)"]
gui_formats = ["%s", "%.3f", "%.3f", "%.1f"]
wx_column_widths = [500, 100, 100, 180]
__restraints_attr__ = [
"min",
"max",
"mean",
"z_min",
"z_max",
"z_mean",
"target",
]
__slots__ = validation.__slots__ + __restraints_attr__
def __init__(self,
pdb_atoms,
sites_cart,
energies_sites,
restraint_proxies,
unit_cell,
ignore_hd=True,
sigma_cutoff=4.0,
outliers_only=True,
use_segids_in_place_of_chainids=False):
validation.__init__(self)
self.z_min = self.z_max = self.z_mean = None
deviations_method = getattr(energies_sites, "%s_deviations" %
self.restraint_type)
self.min, self.max, self.mean = deviations_method()
target = getattr(energies_sites, "%s_residual_sum" %
self.restraint_type)
self.n_total = getattr(energies_sites, "n_%s_proxies" %
self.restraint_type)
if (self.n_total > 0):
self.target = target / self.n_total
else :
self.target = 0
deviations_z_method = getattr(energies_sites, "%s_deviations_z" %
self.restraint_type, None)
if (deviations_z_method is not None):
deviations_z = deviations_z_method()
self.z_min, self.z_max, self.z_mean = deviations_z_method()
self.results = sorted(self.get_outliers(
proxies=restraint_proxies,
unit_cell=unit_cell,
sites_cart=sites_cart,
pdb_atoms=pdb_atoms,
sigma_cutoff=sigma_cutoff,
outliers_only=outliers_only,
use_segids_in_place_of_chainids=use_segids_in_place_of_chainids))
self.n_outliers = len(self.results)
def get_outliers(self, proxies, unit_cell, sites_cart, pdb_atoms,
sigma_cutoff):
raise NotImplementedError()
def show_old_output(self, *args, **kwds):
raise NotImplementedError()
def show(self, out=sys.stdout, prefix=" ", verbose=True):
if (len(self.results) > 0):
print(prefix + self.get_result_class().header(), file=out)
for result in self.results :
print(result.as_string(prefix=prefix), file=out)
self.show_summary(out=out, prefix=prefix)
def show_summary(self, out=sys.stdout, prefix=""):
if (self.n_total == 0):
print(prefix + "No restraints of this type.", file=out)
return
elif (self.n_outliers == 0):
print(prefix + \
"All restrained atoms within 4.0 sigma of ideal values.", file=out)
print("", file=out)
if (self.z_mean is not None):
print(prefix + "Min. delta: %7.3f (Z=%7.3f)" % (self.min,
self.z_min), file=out)
print(prefix + "Max. delta: %7.3f (Z=%7.3f)" % (self.max,
self.z_max), file=out)
print(prefix + "Mean delta: %7.3f (Z=%7.3f)" % (self.mean,
self.z_mean), file=out)
else :
print(prefix + "Min. delta: %7.3f" % self.min, file=out)
print(prefix + "Max. delta: %7.3f" % self.max, file=out)
print(prefix + "Mean delta: %7.3f" % self.mean, file=out)
def as_kinemage(self, chain_id=None):
header = self.kinemage_header
if (header is not None):
kin_blocks = []
for result in self.results :
if (result.is_outlier()) and (result.is_in_chain(chain_id)):
outlier_kin_txt = result.as_kinemage()
if (outlier_kin_txt is not None):
kin_blocks.append(outlier_kin_txt)
return header + "\n".join(kin_blocks)
return None
class bonds(restraint_validation):
restraint_type = "bond"
restraint_label = "Bond length"
kinemage_header = "@subgroup {length devs} dominant\n"
gui_list_headers = ["Atom 1","Atom 2","Ideal value","Model value",
"Deviation (sigmas)"]
gui_formats = ["%s", "%s", "%.3f", "%.3f", "%.1f"]
wx_column_widths = [150, 150, 100, 100, 180]
def get_result_class(self) : return bond
def get_outliers(self, proxies, unit_cell, sites_cart, pdb_atoms,
sigma_cutoff, outliers_only=True,
use_segids_in_place_of_chainids=False):
from scitbx.array_family import flex
from cctbx.geometry_restraints.linking_class import linking_class
origin_ids = linking_class()
site_labels = flex.bool(sites_cart.size(), True).iselection()
sorted_table, not_shown = proxies.get_sorted(
by_value="residual",
sites_cart=sites_cart,
site_labels=site_labels,
origin_id=origin_ids.get_origin_id('covalent geometry'))
# this can happen for C-alpha-only models, etc.
if (sorted_table is None):
return []
outliers = []
for restraint_info in sorted_table :
(i_seq, j_seq, i_seqs, ideal, model, slack, delta, sigma, weight, residual, sym_op_j,
rt_mx) = restraint_info
bond_atoms = get_atoms_info(pdb_atoms, iselection=i_seqs,
use_segids_in_place_of_chainids=use_segids_in_place_of_chainids)
if sym_op_j:
import scitbx
m3 = rt_mx.r().as_double()
m3 = scitbx.matrix.sqr(m3)
t = rt_mx.t().as_double()
t = scitbx.matrix.col((t[0],t[1],t[2]))
xyz = unit_cell.fractionalize(flex.vec3_double([bond_atoms[1].xyz]))
new_xyz = unit_cell.orthogonalize(m3.elems*xyz+t)
bond_atoms[1].xyz = new_xyz[0]
outlier = bond(
atoms_info=bond_atoms,
target=ideal,
model=model,
sigma=sigma,
slack=slack,
delta=delta,
residual=residual,
symop=sym_op_j,
outlier=True,
xyz=get_mean_xyz(bond_atoms))
if (outlier.score > sigma_cutoff):
outliers.append(outlier)
elif (not outliers_only):
outlier.outlier=False
outliers.append(outlier)
return outliers
class angles(restraint_validation):
restraint_type = "angle"
restraint_label = "Bond angle"
kinemage_header = "@subgroup {geom devs} dominant\n"
def get_result_class(self) : return angle
def get_outliers(self, proxies, unit_cell, sites_cart, pdb_atoms,
sigma_cutoff, outliers_only=True,
use_segids_in_place_of_chainids=False):
import cctbx.geometry_restraints
sorted = _get_sorted(proxies,
unit_cell=unit_cell,
sites_cart=sites_cart,
pdb_atoms=pdb_atoms,
use_segids_in_place_of_chainids=use_segids_in_place_of_chainids)
outliers = []
for proxy, proxy_atoms in sorted :
restraint = cctbx.geometry_restraints.angle(
unit_cell=unit_cell,
proxy=proxy,
sites_cart=sites_cart)
outlier = angle(
atoms_info=proxy_atoms,
target=restraint.angle_ideal,
delta=restraint.delta,
model=restraint.angle_model,
sigma=cctbx.geometry_restraints.weight_as_sigma(restraint.weight),
residual=restraint.residual(),
outlier=True,
xyz=proxy_atoms[1].xyz)
if (outlier.score > sigma_cutoff):
outliers.append(outlier)
elif (not outliers_only):
outlier.outlier=False
outliers.append(outlier)
return outliers
class dihedrals(restraint_validation):
restraint_type = "dihedral"
restraint_label = "Dihedral angle"
def get_result_class(self) : return dihedral
def get_outliers(self, proxies, unit_cell, sites_cart, pdb_atoms,
sigma_cutoff, outliers_only=True,
use_segids_in_place_of_chainids=False):
import cctbx.geometry_restraints
sorted = _get_sorted(proxies,
unit_cell=unit_cell,
sites_cart=sites_cart,
pdb_atoms=pdb_atoms)
outliers = []
for proxy, proxy_atoms in sorted :
restraint = cctbx.geometry_restraints.dihedral(
unit_cell=unit_cell,
proxy=proxy,
sites_cart=sites_cart)
outlier = dihedral(
atoms_info=proxy_atoms,
target=restraint.angle_ideal,
delta=restraint.delta,
model=restraint.angle_model,
sigma=cctbx.geometry_restraints.weight_as_sigma(restraint.weight),
residual=restraint.residual(),
xyz=get_mean_xyz([proxy_atoms[1], proxy_atoms[2]]),
outlier=True)
if (outlier.score > sigma_cutoff):
outliers.append(outlier)
elif (not outliers_only):
outlier.outlier=False
outliers.append(outlier)
return outliers
class chiralities(restraint_validation):
restraint_type = "chirality"
restraint_label = "Chiral volume"
kinemage_header = "@subgroup {chiral devs} dominant\n"
gui_list_headers = ["Atoms","Ideal value","Model value",
"Deviation (sigmas)","Probable cause"]
gui_formats = ["%s", "%.3f", "%.3f", "%.1f", "%s"]
wx_column_widths = [250, 100, 100, 180, 250]
def get_result_class(self) : return chirality
def get_outliers(self, proxies, unit_cell, sites_cart, pdb_atoms,
sigma_cutoff, outliers_only=True,
use_segids_in_place_of_chainids=False):
import cctbx.geometry_restraints
sorted = _get_sorted(proxies,
unit_cell=None,
sites_cart=sites_cart,
pdb_atoms=pdb_atoms)
outliers = []
for proxy, proxy_atoms in sorted :
restraint = cctbx.geometry_restraints.chirality(
proxy=proxy,
sites_cart=sites_cart)
outlier = chirality(
atoms_info=proxy_atoms,
target=restraint.volume_ideal,
delta=restraint.delta,
model=restraint.volume_model,
sigma=cctbx.geometry_restraints.weight_as_sigma(restraint.weight),
residual=restraint.residual(),
outlier=True,
xyz=get_mean_xyz(proxy_atoms))
if (outlier.score > sigma_cutoff):
outliers.append(outlier)
elif (not outliers_only):
outlier.outlier=False
outliers.append(outlier)
return outliers
class planarities(restraint_validation):
restraint_type = "planarity"
restraint_label = "Planar group"
gui_list_headers = ["Atoms", "Max. delta", "RMS(delta)", "Deviation (sigmas)"]
gui_formats = ["%s", "%.3f", "%.3f", "%.1f"]
wx_column_widths = [250, 100, 100, 130]
def get_result_class(self) : return planarity
def get_outliers(self, proxies, unit_cell, sites_cart, pdb_atoms,
sigma_cutoff, outliers_only=True,
use_segids_in_place_of_chainids=False):
import cctbx.geometry_restraints
from scitbx.array_family import flex
site_labels = flex.bool(sites_cart.size(), True).iselection()
sorted_table, n_not_shown = proxies.get_sorted(
by_value="residual",
sites_cart=sites_cart,
site_labels=site_labels,
unit_cell=unit_cell)
if (sorted_table is None) : return []
outliers = []
for restraint_info in sorted_table :
(plane_atoms, rms_delta, residual) = restraint_info
i_seqs = [ a[0] for a in plane_atoms ]
deviation = max([ a[1] / a[2] for a in plane_atoms ])
plane_atoms_ = get_atoms_info(pdb_atoms, iselection=i_seqs)
outlier = planarity(
atoms_info=plane_atoms_,
rms_deltas=rms_delta,
residual=residual,
delta_max=max([ a[1] for a in plane_atoms ]),
score=deviation,
outlier=True,
xyz=get_mean_xyz(plane_atoms_))
if (outlier.score > sigma_cutoff):
outliers.append(outlier)
elif (not outliers_only):
outlier.outlier=False
outliers.append(outlier)
return outliers
def get_mean_xyz(atoms):
from scitbx.matrix import col
sum = col(atoms[0].xyz)
for atom in atoms[1:] :
sum += col(atom.xyz)
return sum / len(atoms)
def _get_sorted(O,
unit_cell,
sites_cart,
pdb_atoms,
by_value="residual",
use_segids_in_place_of_chainids=False):
assert by_value in ["residual", "delta"]
if (O.size() == 0): return []
import cctbx.geometry_restraints
from scitbx.array_family import flex
from cctbx.geometry_restraints.linking_class import linking_class
origin_ids = linking_class()
deltas = flex.abs(O.deltas(sites_cart=sites_cart))
residuals = O.residuals(sites_cart=sites_cart)
if (by_value == "residual"):
data_to_sort = residuals
elif (by_value == "delta"):
data_to_sort = deltas
i_proxies_sorted = flex.sort_permutation(data=data_to_sort, reverse=True)
sorted_table = []
for i_proxy in i_proxies_sorted:
proxy = O[i_proxy]
if proxy.origin_id != origin_ids.get_origin_id('covalent geometry'):
continue
sigma = cctbx.geometry_restraints.weight_as_sigma(proxy.weight)
score = sqrt(residuals[i_proxy]) / sigma
proxy_atoms = get_atoms_info(pdb_atoms, iselection=proxy.i_seqs,
use_segids_in_place_of_chainids=use_segids_in_place_of_chainids)
sorted_table.append((proxy, proxy_atoms))
return sorted_table
class combined(slots_getstate_setstate):
"""
Container for individual validations of each of the five covalent restraint
classes.
"""
__geo_types__ = ["bonds", "angles", "dihedrals", "chiralities", "planarities"]
__slots__ = __geo_types__ + ["_use_cdl"]
def __init__(self,
pdb_hierarchy,
xray_structure,
geometry_restraints_manager,
ignore_hd=True,
sigma_cutoff=4.0,
outliers_only=True,
use_segids_in_place_of_chainids=False,
cdl=None):
self._use_cdl = cdl
from mmtbx import restraints
restraints_manager = restraints.manager(
geometry=geometry_restraints_manager)
sites_cart = xray_structure.sites_cart()
hd_selection = xray_structure.hd_selection()
pdb_atoms = pdb_hierarchy.atoms()
if (ignore_hd and hd_selection.count(True) > 0):
restraints_manager = restraints_manager.select(selection = ~hd_selection)
sites_cart = sites_cart.select(~hd_selection)
pdb_atoms = pdb_atoms.select(~hd_selection)
energies_sites = restraints_manager.energies_sites(
sites_cart=sites_cart,
compute_gradients=False).geometry
for geo_type in self.__geo_types__ :
restraint_validation_class = globals()[geo_type]
if (geo_type == "bonds" ):
restraint_proxies = restraints_manager.geometry.pair_proxies(
sites_cart=sites_cart).bond_proxies
else :
restraint_proxies = getattr(restraints_manager.geometry,
"%s_proxies" % restraint_validation_class.restraint_type)
rv = restraint_validation_class(
pdb_atoms=pdb_atoms,
sites_cart=sites_cart,
energies_sites=energies_sites,
restraint_proxies=restraint_proxies,
unit_cell=xray_structure.unit_cell(),
ignore_hd=ignore_hd,
sigma_cutoff=sigma_cutoff,
outliers_only=outliers_only,
use_segids_in_place_of_chainids=use_segids_in_place_of_chainids)
setattr(self, geo_type, rv)
def show(self, out=sys.stdout, prefix="", verbose=True):
for geo_type in self.__geo_types__ :
rv = getattr(self, geo_type)
make_sub_header(rv.restraint_label + "s", out=out)
if (geo_type == "angles") and getattr(self, "_use_cdl", False):
print(" Using conformation-dependent library for mainchain "+\
"bond angle targets", file=out)
print("", file=out)
rv.show(out=out, prefix=prefix)
def get_bonds_angles_rmsds(self):
return (self.bonds.mean, self.angles.mean)
def as_kinemage(self, chain_id=None):
kin_txt = self.angles.as_kinemage(chain_id=chain_id)
kin_txt += "\n"
kin_txt += self.bonds.as_kinemage(chain_id=chain_id)
return kin_txt
| 35.015625 | 132 | 0.678626 |
from __future__ import absolute_import, division, print_function
from mmtbx.validation import atoms, validation, get_atoms_info
from libtbx.str_utils import make_sub_header
from libtbx import slots_getstate_setstate
from math import sqrt
import sys
__restraint_attr__ = [
"sigma",
"target",
"model",
"delta",
"residual",
]
class restraint(atoms):
n_atoms = None
__slots__ = atoms.__slots__ + __restraint_attr__
def __init__(self, **kwds):
atoms.__init__(self, **kwds)
if (self.n_atoms is not None):
assert (len(self.atoms_info) == self.n_atoms)
if (self.score is None):
self.score = abs(self.delta / self.sigma)
@staticmethod
def header():
return "%-20s %7s %7s %7s %6s %6s %10s" % ("atoms", "ideal", "model",
"delta", "sigma", "residual", "deviation")
def as_table_row_phenix(self):
atoms_str = ", ".join([ a.id_str() for a in self.atoms_info ])
return [ atoms_str, self.target, self.model, self.score ]
def id_str(self, ignore_altloc=None):
return ",".join([ a.id_str() for a in self.atoms_info ])
def as_string(self, prefix=""):
id_strs = [ a.id_str() for a in self.atoms_info ]
id_len = max([ len(s) for s in id_strs ])
lines = []
for atom_str in id_strs :
lines.append("%s%-20s" % (prefix, atom_str))
lines[-1] += " " + self.format_values()
return "\n".join(lines)
def format_values(self):
return "%7.2f %7.2f %7.2f %6.2e %6.2e %4.1f*sigma" % (self.target,
self.model, self.delta, self.sigma, self.residual, self.score)
def __cmp__(self, other):
return cmp(other.score, self.score)
def __eq__(self, other):
return self.score == other.score
def __ne__(self, other):
return self.score != other.score
def __lt__(self, other):
return self.score < other.score
def __le__(self, other):
return self.score <= other.score
def __gt__ (self, other):
return self.score > other.score
def __ge__(self, other):
return self.score >= other.score
def kinemage_key(self):
atom0 = self.atoms_info[0]
if len(self.atoms_info)==2:
atom0 = self.atoms_info[1]
elif len(self.atoms_info)==3:
atom0 = self.atoms_info[1]
# dihedrals are assigned to the following residue - this applies to
# omega dihedral but planes are not a problem
elif len(self.atoms_info)==4:
atom0 = self.atoms_info[2]
atom_names = [ a.name.strip().lower() for a in self.atoms_info ]
kin_key = "%1s%3s%2s%4s%1s %s" % (self.get_altloc(),
atom0.resname.lower(), atom0.chain_id, atom0.resseq, atom0.icode,
"-".join(atom_names))
return kin_key
class bond(restraint):
n_atoms = 2
__bond_attr__ = [
"slack",
"symop",
]
__slots__ = restraint.__slots__ + __bond_attr__
def as_table_row_phenix(self):
return [ self.atoms_info[0].id_str(), self.atoms_info[1].id_str(),
self.target, self.model, self.score ]
@staticmethod
def header():
return "%-20s %5s %6s %6s %6s %6s %8s %10s" % ("atoms", "ideal",
"model", "delta", "sigma", "slack", "residual", "deviation")
def formate_values(self):
return "%5.3f %6.2f %6.3f %6.3f %6.2e %8.2e %4.1f*sigma" % \
(self.target, self.model, self.delta, self.sigma, self.slack,
self.residual, abs(self.score))
def as_kinemage(self):
from mmtbx.kinemage.validation import bond_outlier_as_kinemage
return bond_outlier_as_kinemage(self)
class angle(restraint):
n_atoms = 3
def as_kinemage(self):
from mmtbx.kinemage.validation import angle_outlier_as_kinemage
return angle_outlier_as_kinemage(self)
class dihedral(restraint):
n_atoms = 4
def as_kinemage(self):
return None
class chirality(restraint):
def as_kinemage(self):
from mmtbx.kinemage.validation import chiral_outlier_as_kinemage
return chiral_outlier_as_kinemage(self)
def as_table_row_phenix(self):
atoms_str = ", ".join([ a.id_str() for a in self.atoms_info ])
return [ atoms_str, self.target, self.model, self.score, self.outlier_type() ]
def is_pseudochiral(self):
#Certain atoms are treated like chiral centers because they bond to atoms that have different names without chemical difference.
#VAL CB bonds to CG1 and CG2, for example.
#A large chiral volume outlier relfects a failure to follow chemical naming conventions, not necessarily a major geometry error
#So these pseudochiral centers should be treated differently.
#
#backbone phosphate in nucleic acids
#OP1 and OP2 atoms are chemically identical
resname = self.atoms_info[0].resname
atomname = self.atoms_info[0].name.strip()
if atomname == 'P': return True
#SF4 and F3S are iron-sulfur clusters with frequent naming problems
if resname in ['SF4','F3S']: return True
#Val CG1 and CG2 are chemically identical
if resname == 'VAL' and atomname == 'CB': return True
#LEU CD1 and CD2 are chemically identical
if resname == 'LEU' and atomname == 'CG': return True
#Otherwise
return False
def is_handedness_swap(self):
resname = self.atoms_info[0].resname
if resname in ['PRO','DPR']: #proline has slightly different geometry
if self.score > 22:
return True
elif self.score > 20:
return True
else:
return False
def outlier_type(self):
if self.score <= 4: return None
if not self.is_handedness_swap():
return "Tetrahedral geometry outlier"
else:
if self.is_pseudochiral():
return "Pseudochiral naming error"
else:
return "Chiral handedness swap"
class planarity(restraint):
__slots__ = atoms.__slots__ + [
"rms_deltas",
"delta_max",
"residual",
]
def as_table_row_phenix(self):
atoms_str = ", ".join([ a.id_str() for a in self.atoms_info ])
return [ atoms_str, self.delta_max, self.rms_deltas, self.score ]
@staticmethod
def header():
return "%-20s %10s %10s %10s %10s" % ("atoms", "rms_deltas",
"delta_max", "residual", "deviation")
def format_values(self):
return "%10.3f %10.3f %10.2f %4.1f*sigma" % (self.rms_deltas,
self.delta_max, self.residual, self.score)
def as_kinemage(self):
return None
class restraint_validation(validation):
restraint_type = None
kinemage_header = None
gui_list_headers = ["Atoms","Ideal value","Model value","Deviation (sigmas)"]
gui_formats = ["%s", "%.3f", "%.3f", "%.1f"]
wx_column_widths = [500, 100, 100, 180]
__restraints_attr__ = [
"min",
"max",
"mean",
"z_min",
"z_max",
"z_mean",
"target",
]
__slots__ = validation.__slots__ + __restraints_attr__
def __init__(self,
pdb_atoms,
sites_cart,
energies_sites,
restraint_proxies,
unit_cell,
ignore_hd=True,
sigma_cutoff=4.0,
outliers_only=True,
use_segids_in_place_of_chainids=False):
validation.__init__(self)
self.z_min = self.z_max = self.z_mean = None
deviations_method = getattr(energies_sites, "%s_deviations" %
self.restraint_type)
self.min, self.max, self.mean = deviations_method()
target = getattr(energies_sites, "%s_residual_sum" %
self.restraint_type)
self.n_total = getattr(energies_sites, "n_%s_proxies" %
self.restraint_type)
if (self.n_total > 0):
self.target = target / self.n_total
else :
self.target = 0
deviations_z_method = getattr(energies_sites, "%s_deviations_z" %
self.restraint_type, None)
if (deviations_z_method is not None):
deviations_z = deviations_z_method()
self.z_min, self.z_max, self.z_mean = deviations_z_method()
self.results = sorted(self.get_outliers(
proxies=restraint_proxies,
unit_cell=unit_cell,
sites_cart=sites_cart,
pdb_atoms=pdb_atoms,
sigma_cutoff=sigma_cutoff,
outliers_only=outliers_only,
use_segids_in_place_of_chainids=use_segids_in_place_of_chainids))
self.n_outliers = len(self.results)
def get_outliers(self, proxies, unit_cell, sites_cart, pdb_atoms,
sigma_cutoff):
raise NotImplementedError()
def show_old_output(self, *args, **kwds):
raise NotImplementedError()
def show(self, out=sys.stdout, prefix=" ", verbose=True):
if (len(self.results) > 0):
print(prefix + self.get_result_class().header(), file=out)
for result in self.results :
print(result.as_string(prefix=prefix), file=out)
self.show_summary(out=out, prefix=prefix)
def show_summary(self, out=sys.stdout, prefix=""):
if (self.n_total == 0):
print(prefix + "No restraints of this type.", file=out)
return
elif (self.n_outliers == 0):
print(prefix + \
"All restrained atoms within 4.0 sigma of ideal values.", file=out)
print("", file=out)
if (self.z_mean is not None):
print(prefix + "Min. delta: %7.3f (Z=%7.3f)" % (self.min,
self.z_min), file=out)
print(prefix + "Max. delta: %7.3f (Z=%7.3f)" % (self.max,
self.z_max), file=out)
print(prefix + "Mean delta: %7.3f (Z=%7.3f)" % (self.mean,
self.z_mean), file=out)
else :
print(prefix + "Min. delta: %7.3f" % self.min, file=out)
print(prefix + "Max. delta: %7.3f" % self.max, file=out)
print(prefix + "Mean delta: %7.3f" % self.mean, file=out)
def as_kinemage(self, chain_id=None):
header = self.kinemage_header
if (header is not None):
kin_blocks = []
for result in self.results :
if (result.is_outlier()) and (result.is_in_chain(chain_id)):
outlier_kin_txt = result.as_kinemage()
if (outlier_kin_txt is not None):
kin_blocks.append(outlier_kin_txt)
return header + "\n".join(kin_blocks)
return None
class bonds(restraint_validation):
restraint_type = "bond"
restraint_label = "Bond length"
kinemage_header = "@subgroup {length devs} dominant\n"
gui_list_headers = ["Atom 1","Atom 2","Ideal value","Model value",
"Deviation (sigmas)"]
gui_formats = ["%s", "%s", "%.3f", "%.3f", "%.1f"]
wx_column_widths = [150, 150, 100, 100, 180]
def get_result_class(self) : return bond
def get_outliers(self, proxies, unit_cell, sites_cart, pdb_atoms,
sigma_cutoff, outliers_only=True,
use_segids_in_place_of_chainids=False):
from scitbx.array_family import flex
from cctbx.geometry_restraints.linking_class import linking_class
origin_ids = linking_class()
site_labels = flex.bool(sites_cart.size(), True).iselection()
sorted_table, not_shown = proxies.get_sorted(
by_value="residual",
sites_cart=sites_cart,
site_labels=site_labels,
origin_id=origin_ids.get_origin_id('covalent geometry'))
# this can happen for C-alpha-only models, etc.
if (sorted_table is None):
return []
outliers = []
for restraint_info in sorted_table :
(i_seq, j_seq, i_seqs, ideal, model, slack, delta, sigma, weight, residual, sym_op_j,
rt_mx) = restraint_info
bond_atoms = get_atoms_info(pdb_atoms, iselection=i_seqs,
use_segids_in_place_of_chainids=use_segids_in_place_of_chainids)
if sym_op_j:
import scitbx
m3 = rt_mx.r().as_double()
m3 = scitbx.matrix.sqr(m3)
t = rt_mx.t().as_double()
t = scitbx.matrix.col((t[0],t[1],t[2]))
xyz = unit_cell.fractionalize(flex.vec3_double([bond_atoms[1].xyz]))
new_xyz = unit_cell.orthogonalize(m3.elems*xyz+t)
bond_atoms[1].xyz = new_xyz[0]
outlier = bond(
atoms_info=bond_atoms,
target=ideal,
model=model,
sigma=sigma,
slack=slack,
delta=delta,
residual=residual,
symop=sym_op_j,
outlier=True,
xyz=get_mean_xyz(bond_atoms))
if (outlier.score > sigma_cutoff):
outliers.append(outlier)
elif (not outliers_only):
outlier.outlier=False
outliers.append(outlier)
return outliers
class angles(restraint_validation):
restraint_type = "angle"
restraint_label = "Bond angle"
kinemage_header = "@subgroup {geom devs} dominant\n"
def get_result_class(self) : return angle
def get_outliers(self, proxies, unit_cell, sites_cart, pdb_atoms,
sigma_cutoff, outliers_only=True,
use_segids_in_place_of_chainids=False):
import cctbx.geometry_restraints
sorted = _get_sorted(proxies,
unit_cell=unit_cell,
sites_cart=sites_cart,
pdb_atoms=pdb_atoms,
use_segids_in_place_of_chainids=use_segids_in_place_of_chainids)
outliers = []
for proxy, proxy_atoms in sorted :
restraint = cctbx.geometry_restraints.angle(
unit_cell=unit_cell,
proxy=proxy,
sites_cart=sites_cart)
outlier = angle(
atoms_info=proxy_atoms,
target=restraint.angle_ideal,
delta=restraint.delta,
model=restraint.angle_model,
sigma=cctbx.geometry_restraints.weight_as_sigma(restraint.weight),
residual=restraint.residual(),
outlier=True,
xyz=proxy_atoms[1].xyz)
if (outlier.score > sigma_cutoff):
outliers.append(outlier)
elif (not outliers_only):
outlier.outlier=False
outliers.append(outlier)
return outliers
class dihedrals(restraint_validation):
restraint_type = "dihedral"
restraint_label = "Dihedral angle"
def get_result_class(self) : return dihedral
def get_outliers(self, proxies, unit_cell, sites_cart, pdb_atoms,
sigma_cutoff, outliers_only=True,
use_segids_in_place_of_chainids=False):
import cctbx.geometry_restraints
sorted = _get_sorted(proxies,
unit_cell=unit_cell,
sites_cart=sites_cart,
pdb_atoms=pdb_atoms)
outliers = []
for proxy, proxy_atoms in sorted :
restraint = cctbx.geometry_restraints.dihedral(
unit_cell=unit_cell,
proxy=proxy,
sites_cart=sites_cart)
outlier = dihedral(
atoms_info=proxy_atoms,
target=restraint.angle_ideal,
delta=restraint.delta,
model=restraint.angle_model,
sigma=cctbx.geometry_restraints.weight_as_sigma(restraint.weight),
residual=restraint.residual(),
xyz=get_mean_xyz([proxy_atoms[1], proxy_atoms[2]]),
outlier=True)
if (outlier.score > sigma_cutoff):
outliers.append(outlier)
elif (not outliers_only):
outlier.outlier=False
outliers.append(outlier)
return outliers
class chiralities(restraint_validation):
restraint_type = "chirality"
restraint_label = "Chiral volume"
kinemage_header = "@subgroup {chiral devs} dominant\n"
gui_list_headers = ["Atoms","Ideal value","Model value",
"Deviation (sigmas)","Probable cause"]
gui_formats = ["%s", "%.3f", "%.3f", "%.1f", "%s"]
wx_column_widths = [250, 100, 100, 180, 250]
def get_result_class(self) : return chirality
def get_outliers(self, proxies, unit_cell, sites_cart, pdb_atoms,
sigma_cutoff, outliers_only=True,
use_segids_in_place_of_chainids=False):
import cctbx.geometry_restraints
sorted = _get_sorted(proxies,
unit_cell=None,
sites_cart=sites_cart,
pdb_atoms=pdb_atoms)
outliers = []
for proxy, proxy_atoms in sorted :
restraint = cctbx.geometry_restraints.chirality(
proxy=proxy,
sites_cart=sites_cart)
outlier = chirality(
atoms_info=proxy_atoms,
target=restraint.volume_ideal,
delta=restraint.delta,
model=restraint.volume_model,
sigma=cctbx.geometry_restraints.weight_as_sigma(restraint.weight),
residual=restraint.residual(),
outlier=True,
xyz=get_mean_xyz(proxy_atoms))
if (outlier.score > sigma_cutoff):
outliers.append(outlier)
elif (not outliers_only):
outlier.outlier=False
outliers.append(outlier)
return outliers
class planarities(restraint_validation):
restraint_type = "planarity"
restraint_label = "Planar group"
gui_list_headers = ["Atoms", "Max. delta", "RMS(delta)", "Deviation (sigmas)"]
gui_formats = ["%s", "%.3f", "%.3f", "%.1f"]
wx_column_widths = [250, 100, 100, 130]
def get_result_class(self) : return planarity
def get_outliers(self, proxies, unit_cell, sites_cart, pdb_atoms,
sigma_cutoff, outliers_only=True,
use_segids_in_place_of_chainids=False):
import cctbx.geometry_restraints
from scitbx.array_family import flex
site_labels = flex.bool(sites_cart.size(), True).iselection()
sorted_table, n_not_shown = proxies.get_sorted(
by_value="residual",
sites_cart=sites_cart,
site_labels=site_labels,
unit_cell=unit_cell)
if (sorted_table is None) : return []
outliers = []
for restraint_info in sorted_table :
(plane_atoms, rms_delta, residual) = restraint_info
i_seqs = [ a[0] for a in plane_atoms ]
deviation = max([ a[1] / a[2] for a in plane_atoms ])
plane_atoms_ = get_atoms_info(pdb_atoms, iselection=i_seqs)
outlier = planarity(
atoms_info=plane_atoms_,
rms_deltas=rms_delta,
residual=residual,
delta_max=max([ a[1] for a in plane_atoms ]),
score=deviation,
outlier=True,
xyz=get_mean_xyz(plane_atoms_))
if (outlier.score > sigma_cutoff):
outliers.append(outlier)
elif (not outliers_only):
outlier.outlier=False
outliers.append(outlier)
return outliers
def get_mean_xyz(atoms):
from scitbx.matrix import col
sum = col(atoms[0].xyz)
for atom in atoms[1:] :
sum += col(atom.xyz)
return sum / len(atoms)
def _get_sorted(O,
unit_cell,
sites_cart,
pdb_atoms,
by_value="residual",
use_segids_in_place_of_chainids=False):
assert by_value in ["residual", "delta"]
if (O.size() == 0): return []
import cctbx.geometry_restraints
from scitbx.array_family import flex
from cctbx.geometry_restraints.linking_class import linking_class
origin_ids = linking_class()
deltas = flex.abs(O.deltas(sites_cart=sites_cart))
residuals = O.residuals(sites_cart=sites_cart)
if (by_value == "residual"):
data_to_sort = residuals
elif (by_value == "delta"):
data_to_sort = deltas
i_proxies_sorted = flex.sort_permutation(data=data_to_sort, reverse=True)
sorted_table = []
for i_proxy in i_proxies_sorted:
proxy = O[i_proxy]
if proxy.origin_id != origin_ids.get_origin_id('covalent geometry'):
continue
sigma = cctbx.geometry_restraints.weight_as_sigma(proxy.weight)
score = sqrt(residuals[i_proxy]) / sigma
proxy_atoms = get_atoms_info(pdb_atoms, iselection=proxy.i_seqs,
use_segids_in_place_of_chainids=use_segids_in_place_of_chainids)
sorted_table.append((proxy, proxy_atoms))
return sorted_table
class combined(slots_getstate_setstate):
__geo_types__ = ["bonds", "angles", "dihedrals", "chiralities", "planarities"]
__slots__ = __geo_types__ + ["_use_cdl"]
def __init__(self,
pdb_hierarchy,
xray_structure,
geometry_restraints_manager,
ignore_hd=True,
sigma_cutoff=4.0,
outliers_only=True,
use_segids_in_place_of_chainids=False,
cdl=None):
self._use_cdl = cdl
from mmtbx import restraints
restraints_manager = restraints.manager(
geometry=geometry_restraints_manager)
sites_cart = xray_structure.sites_cart()
hd_selection = xray_structure.hd_selection()
pdb_atoms = pdb_hierarchy.atoms()
if (ignore_hd and hd_selection.count(True) > 0):
restraints_manager = restraints_manager.select(selection = ~hd_selection)
sites_cart = sites_cart.select(~hd_selection)
pdb_atoms = pdb_atoms.select(~hd_selection)
energies_sites = restraints_manager.energies_sites(
sites_cart=sites_cart,
compute_gradients=False).geometry
for geo_type in self.__geo_types__ :
restraint_validation_class = globals()[geo_type]
if (geo_type == "bonds" ):
restraint_proxies = restraints_manager.geometry.pair_proxies(
sites_cart=sites_cart).bond_proxies
else :
restraint_proxies = getattr(restraints_manager.geometry,
"%s_proxies" % restraint_validation_class.restraint_type)
rv = restraint_validation_class(
pdb_atoms=pdb_atoms,
sites_cart=sites_cart,
energies_sites=energies_sites,
restraint_proxies=restraint_proxies,
unit_cell=xray_structure.unit_cell(),
ignore_hd=ignore_hd,
sigma_cutoff=sigma_cutoff,
outliers_only=outliers_only,
use_segids_in_place_of_chainids=use_segids_in_place_of_chainids)
setattr(self, geo_type, rv)
def show(self, out=sys.stdout, prefix="", verbose=True):
for geo_type in self.__geo_types__ :
rv = getattr(self, geo_type)
make_sub_header(rv.restraint_label + "s", out=out)
if (geo_type == "angles") and getattr(self, "_use_cdl", False):
print(" Using conformation-dependent library for mainchain "+\
"bond angle targets", file=out)
print("", file=out)
rv.show(out=out, prefix=prefix)
def get_bonds_angles_rmsds(self):
return (self.bonds.mean, self.angles.mean)
def as_kinemage(self, chain_id=None):
kin_txt = self.angles.as_kinemage(chain_id=chain_id)
kin_txt += "\n"
kin_txt += self.bonds.as_kinemage(chain_id=chain_id)
return kin_txt
| true | true |
f7396532b2c3ad5d664726f232cb165395f341c4 | 6,865 | py | Python | fpga/lib/eth/lib/axis/rtl/axis_crosspoint_wrap.py | totuwei/corundum | e983ad519fb4523d0ffca32f5e436195bcfc945c | [
"BSD-2-Clause-FreeBSD"
] | 1,121 | 2015-05-26T14:41:44.000Z | 2022-03-31T07:17:48.000Z | lib/axis/rtl/axis_crosspoint_wrap.py | yuzu762/verilog-ethernet | 108c02d721aada8a8f51e22328f6ca6c64b70d33 | [
"MIT"
] | 98 | 2016-02-12T21:15:45.000Z | 2022-03-31T03:13:00.000Z | lib/axis/rtl/axis_crosspoint_wrap.py | yuzu762/verilog-ethernet | 108c02d721aada8a8f51e22328f6ca6c64b70d33 | [
"MIT"
] | 368 | 2015-05-05T20:49:01.000Z | 2022-03-31T09:43:53.000Z | #!/usr/bin/env python
"""
Generates an AXI Stream crosspoint wrapper with the specified number of ports
"""
import argparse
from jinja2 import Template
def main():
parser = argparse.ArgumentParser(description=__doc__.strip())
parser.add_argument('-p', '--ports', type=int, default=[4], nargs='+', help="number of ports")
parser.add_argument('-n', '--name', type=str, help="module name")
parser.add_argument('-o', '--output', type=str, help="output file name")
args = parser.parse_args()
try:
generate(**args.__dict__)
except IOError as ex:
print(ex)
exit(1)
def generate(ports=4, name=None, output=None):
if type(ports) is int:
m = n = ports
elif len(ports) == 1:
m = n = ports[0]
else:
m, n = ports
if name is None:
name = "axis_crosspoint_wrap_{0}x{1}".format(m, n)
if output is None:
output = name + ".v"
print("Generating {0}x{1} port AXI stream crosspoint wrapper {2}...".format(m, n, name))
cm = (m-1).bit_length()
cn = (n-1).bit_length()
t = Template(u"""/*
Copyright (c) 2018-2021 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
// Language: Verilog 2001
`resetall
`timescale 1ns / 1ps
`default_nettype none
/*
* AXI4-Stream {{m}}x{{n}} crosspoint (wrapper)
*/
module {{name}} #
(
// Width of AXI stream interfaces in bits
parameter DATA_WIDTH = 8,
// Propagate tkeep signal
parameter KEEP_ENABLE = (DATA_WIDTH>8),
// tkeep signal width (words per cycle)
parameter KEEP_WIDTH = (DATA_WIDTH/8),
// Propagate tlast signal
parameter LAST_ENABLE = 1,
// Propagate tid signal
parameter ID_ENABLE = 0,
// tid signal width
parameter ID_WIDTH = 8,
// Propagate tdest signal
parameter DEST_ENABLE = 0,
// tdest signal width
parameter DEST_WIDTH = 8,
// Propagate tuser signal
parameter USER_ENABLE = 1,
// tuser signal width
parameter USER_WIDTH = 1
)
(
input wire clk,
input wire rst,
/*
* AXI Stream inputs
*/
{%- for p in range(m) %}
input wire [DATA_WIDTH-1:0] s{{'%02d'%p}}_axis_tdata,
input wire [KEEP_WIDTH-1:0] s{{'%02d'%p}}_axis_tkeep,
input wire s{{'%02d'%p}}_axis_tvalid,
input wire s{{'%02d'%p}}_axis_tlast,
input wire [ID_WIDTH-1:0] s{{'%02d'%p}}_axis_tid,
input wire [DEST_WIDTH-1:0] s{{'%02d'%p}}_axis_tdest,
input wire [USER_WIDTH-1:0] s{{'%02d'%p}}_axis_tuser,
{% endfor %}
/*
* AXI Stream outputs
*/
{%- for p in range(n) %}
output wire [DATA_WIDTH-1:0] m{{'%02d'%p}}_axis_tdata,
output wire [KEEP_WIDTH-1:0] m{{'%02d'%p}}_axis_tkeep,
output wire m{{'%02d'%p}}_axis_tvalid,
output wire m{{'%02d'%p}}_axis_tlast,
output wire [ID_WIDTH-1:0] m{{'%02d'%p}}_axis_tid,
output wire [DEST_WIDTH-1:0] m{{'%02d'%p}}_axis_tdest,
output wire [USER_WIDTH-1:0] m{{'%02d'%p}}_axis_tuser,
{% endfor %}
/*
* Control
*/
{%- for p in range(n) %}
input wire [{{cm-1}}:0] m{{'%02d'%p}}_select{% if not loop.last %},{% endif %}
{%- endfor %}
);
axis_crosspoint #(
.S_COUNT({{m}}),
.M_COUNT({{n}}),
.DATA_WIDTH(DATA_WIDTH),
.KEEP_ENABLE(KEEP_ENABLE),
.KEEP_WIDTH(KEEP_WIDTH),
.LAST_ENABLE(LAST_ENABLE),
.ID_ENABLE(ID_ENABLE),
.ID_WIDTH(ID_WIDTH),
.DEST_ENABLE(DEST_ENABLE),
.DEST_WIDTH(DEST_WIDTH),
.USER_ENABLE(USER_ENABLE),
.USER_WIDTH(USER_WIDTH)
)
axis_crosspoint_inst (
.clk(clk),
.rst(rst),
// AXI inputs
.s_axis_tdata({ {% for p in range(m-1,-1,-1) %}s{{'%02d'%p}}_axis_tdata{% if not loop.last %}, {% endif %}{% endfor %} }),
.s_axis_tkeep({ {% for p in range(m-1,-1,-1) %}s{{'%02d'%p}}_axis_tkeep{% if not loop.last %}, {% endif %}{% endfor %} }),
.s_axis_tvalid({ {% for p in range(m-1,-1,-1) %}s{{'%02d'%p}}_axis_tvalid{% if not loop.last %}, {% endif %}{% endfor %} }),
.s_axis_tlast({ {% for p in range(m-1,-1,-1) %}s{{'%02d'%p}}_axis_tlast{% if not loop.last %}, {% endif %}{% endfor %} }),
.s_axis_tid({ {% for p in range(m-1,-1,-1) %}s{{'%02d'%p}}_axis_tid{% if not loop.last %}, {% endif %}{% endfor %} }),
.s_axis_tdest({ {% for p in range(m-1,-1,-1) %}s{{'%02d'%p}}_axis_tdest{% if not loop.last %}, {% endif %}{% endfor %} }),
.s_axis_tuser({ {% for p in range(m-1,-1,-1) %}s{{'%02d'%p}}_axis_tuser{% if not loop.last %}, {% endif %}{% endfor %} }),
// AXI output
.m_axis_tdata({ {% for p in range(n-1,-1,-1) %}m{{'%02d'%p}}_axis_tdata{% if not loop.last %}, {% endif %}{% endfor %} }),
.m_axis_tkeep({ {% for p in range(n-1,-1,-1) %}m{{'%02d'%p}}_axis_tkeep{% if not loop.last %}, {% endif %}{% endfor %} }),
.m_axis_tvalid({ {% for p in range(n-1,-1,-1) %}m{{'%02d'%p}}_axis_tvalid{% if not loop.last %}, {% endif %}{% endfor %} }),
.m_axis_tlast({ {% for p in range(n-1,-1,-1) %}m{{'%02d'%p}}_axis_tlast{% if not loop.last %}, {% endif %}{% endfor %} }),
.m_axis_tid({ {% for p in range(n-1,-1,-1) %}m{{'%02d'%p}}_axis_tid{% if not loop.last %}, {% endif %}{% endfor %} }),
.m_axis_tdest({ {% for p in range(n-1,-1,-1) %}m{{'%02d'%p}}_axis_tdest{% if not loop.last %}, {% endif %}{% endfor %} }),
.m_axis_tuser({ {% for p in range(n-1,-1,-1) %}m{{'%02d'%p}}_axis_tuser{% if not loop.last %}, {% endif %}{% endfor %} }),
// Control
.select({ {% for p in range(n-1,-1,-1) %}m{{'%02d'%p}}_select{% if not loop.last %}, {% endif %}{% endfor %} })
);
endmodule
`resetall
""")
print(f"Writing file '{output}'...")
with open(output, 'w') as f:
f.write(t.render(
m=m,
n=n,
cm=cm,
cn=cn,
name=name
))
f.flush()
print("Done")
if __name__ == "__main__":
main()
| 35.02551 | 128 | 0.59563 |
import argparse
from jinja2 import Template
def main():
parser = argparse.ArgumentParser(description=__doc__.strip())
parser.add_argument('-p', '--ports', type=int, default=[4], nargs='+', help="number of ports")
parser.add_argument('-n', '--name', type=str, help="module name")
parser.add_argument('-o', '--output', type=str, help="output file name")
args = parser.parse_args()
try:
generate(**args.__dict__)
except IOError as ex:
print(ex)
exit(1)
def generate(ports=4, name=None, output=None):
if type(ports) is int:
m = n = ports
elif len(ports) == 1:
m = n = ports[0]
else:
m, n = ports
if name is None:
name = "axis_crosspoint_wrap_{0}x{1}".format(m, n)
if output is None:
output = name + ".v"
print("Generating {0}x{1} port AXI stream crosspoint wrapper {2}...".format(m, n, name))
cm = (m-1).bit_length()
cn = (n-1).bit_length()
t = Template(u"""/*
Copyright (c) 2018-2021 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
// Language: Verilog 2001
`resetall
`timescale 1ns / 1ps
`default_nettype none
/*
* AXI4-Stream {{m}}x{{n}} crosspoint (wrapper)
*/
module {{name}} #
(
// Width of AXI stream interfaces in bits
parameter DATA_WIDTH = 8,
// Propagate tkeep signal
parameter KEEP_ENABLE = (DATA_WIDTH>8),
// tkeep signal width (words per cycle)
parameter KEEP_WIDTH = (DATA_WIDTH/8),
// Propagate tlast signal
parameter LAST_ENABLE = 1,
// Propagate tid signal
parameter ID_ENABLE = 0,
// tid signal width
parameter ID_WIDTH = 8,
// Propagate tdest signal
parameter DEST_ENABLE = 0,
// tdest signal width
parameter DEST_WIDTH = 8,
// Propagate tuser signal
parameter USER_ENABLE = 1,
// tuser signal width
parameter USER_WIDTH = 1
)
(
input wire clk,
input wire rst,
/*
* AXI Stream inputs
*/
{%- for p in range(m) %}
input wire [DATA_WIDTH-1:0] s{{'%02d'%p}}_axis_tdata,
input wire [KEEP_WIDTH-1:0] s{{'%02d'%p}}_axis_tkeep,
input wire s{{'%02d'%p}}_axis_tvalid,
input wire s{{'%02d'%p}}_axis_tlast,
input wire [ID_WIDTH-1:0] s{{'%02d'%p}}_axis_tid,
input wire [DEST_WIDTH-1:0] s{{'%02d'%p}}_axis_tdest,
input wire [USER_WIDTH-1:0] s{{'%02d'%p}}_axis_tuser,
{% endfor %}
/*
* AXI Stream outputs
*/
{%- for p in range(n) %}
output wire [DATA_WIDTH-1:0] m{{'%02d'%p}}_axis_tdata,
output wire [KEEP_WIDTH-1:0] m{{'%02d'%p}}_axis_tkeep,
output wire m{{'%02d'%p}}_axis_tvalid,
output wire m{{'%02d'%p}}_axis_tlast,
output wire [ID_WIDTH-1:0] m{{'%02d'%p}}_axis_tid,
output wire [DEST_WIDTH-1:0] m{{'%02d'%p}}_axis_tdest,
output wire [USER_WIDTH-1:0] m{{'%02d'%p}}_axis_tuser,
{% endfor %}
/*
* Control
*/
{%- for p in range(n) %}
input wire [{{cm-1}}:0] m{{'%02d'%p}}_select{% if not loop.last %},{% endif %}
{%- endfor %}
);
axis_crosspoint #(
.S_COUNT({{m}}),
.M_COUNT({{n}}),
.DATA_WIDTH(DATA_WIDTH),
.KEEP_ENABLE(KEEP_ENABLE),
.KEEP_WIDTH(KEEP_WIDTH),
.LAST_ENABLE(LAST_ENABLE),
.ID_ENABLE(ID_ENABLE),
.ID_WIDTH(ID_WIDTH),
.DEST_ENABLE(DEST_ENABLE),
.DEST_WIDTH(DEST_WIDTH),
.USER_ENABLE(USER_ENABLE),
.USER_WIDTH(USER_WIDTH)
)
axis_crosspoint_inst (
.clk(clk),
.rst(rst),
// AXI inputs
.s_axis_tdata({ {% for p in range(m-1,-1,-1) %}s{{'%02d'%p}}_axis_tdata{% if not loop.last %}, {% endif %}{% endfor %} }),
.s_axis_tkeep({ {% for p in range(m-1,-1,-1) %}s{{'%02d'%p}}_axis_tkeep{% if not loop.last %}, {% endif %}{% endfor %} }),
.s_axis_tvalid({ {% for p in range(m-1,-1,-1) %}s{{'%02d'%p}}_axis_tvalid{% if not loop.last %}, {% endif %}{% endfor %} }),
.s_axis_tlast({ {% for p in range(m-1,-1,-1) %}s{{'%02d'%p}}_axis_tlast{% if not loop.last %}, {% endif %}{% endfor %} }),
.s_axis_tid({ {% for p in range(m-1,-1,-1) %}s{{'%02d'%p}}_axis_tid{% if not loop.last %}, {% endif %}{% endfor %} }),
.s_axis_tdest({ {% for p in range(m-1,-1,-1) %}s{{'%02d'%p}}_axis_tdest{% if not loop.last %}, {% endif %}{% endfor %} }),
.s_axis_tuser({ {% for p in range(m-1,-1,-1) %}s{{'%02d'%p}}_axis_tuser{% if not loop.last %}, {% endif %}{% endfor %} }),
// AXI output
.m_axis_tdata({ {% for p in range(n-1,-1,-1) %}m{{'%02d'%p}}_axis_tdata{% if not loop.last %}, {% endif %}{% endfor %} }),
.m_axis_tkeep({ {% for p in range(n-1,-1,-1) %}m{{'%02d'%p}}_axis_tkeep{% if not loop.last %}, {% endif %}{% endfor %} }),
.m_axis_tvalid({ {% for p in range(n-1,-1,-1) %}m{{'%02d'%p}}_axis_tvalid{% if not loop.last %}, {% endif %}{% endfor %} }),
.m_axis_tlast({ {% for p in range(n-1,-1,-1) %}m{{'%02d'%p}}_axis_tlast{% if not loop.last %}, {% endif %}{% endfor %} }),
.m_axis_tid({ {% for p in range(n-1,-1,-1) %}m{{'%02d'%p}}_axis_tid{% if not loop.last %}, {% endif %}{% endfor %} }),
.m_axis_tdest({ {% for p in range(n-1,-1,-1) %}m{{'%02d'%p}}_axis_tdest{% if not loop.last %}, {% endif %}{% endfor %} }),
.m_axis_tuser({ {% for p in range(n-1,-1,-1) %}m{{'%02d'%p}}_axis_tuser{% if not loop.last %}, {% endif %}{% endfor %} }),
// Control
.select({ {% for p in range(n-1,-1,-1) %}m{{'%02d'%p}}_select{% if not loop.last %}, {% endif %}{% endfor %} })
);
endmodule
`resetall
""")
print(f"Writing file '{output}'...")
with open(output, 'w') as f:
f.write(t.render(
m=m,
n=n,
cm=cm,
cn=cn,
name=name
))
f.flush()
print("Done")
if __name__ == "__main__":
main()
| true | true |
f739657fbfc04ae7325b956253ff5e707ad9fa35 | 10,934 | py | Python | shade/tests/functional/test_floating_ip.py | mail2nsrajesh/shade | 65ce1a22896e52ff59a23a393e3bc4227f55f006 | [
"Apache-2.0"
] | null | null | null | shade/tests/functional/test_floating_ip.py | mail2nsrajesh/shade | 65ce1a22896e52ff59a23a393e3bc4227f55f006 | [
"Apache-2.0"
] | null | null | null | shade/tests/functional/test_floating_ip.py | mail2nsrajesh/shade | 65ce1a22896e52ff59a23a393e3bc4227f55f006 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
test_floating_ip
----------------------------------
Functional tests for floating IP resource.
"""
import pprint
from testtools import content
from shade import _utils
from shade import meta
from shade.exc import OpenStackCloudException
from shade.tests.functional import base
from shade.tests.functional.util import pick_flavor
class TestFloatingIP(base.BaseFunctionalTestCase):
timeout = 60
def setUp(self):
super(TestFloatingIP, self).setUp()
self.flavor = pick_flavor(
self.user_cloud.list_flavors(get_extra=False))
if self.flavor is None:
self.assertFalse('no sensible flavor available')
self.image = self.pick_image()
# Generate a random name for these tests
self.new_item_name = self.getUniqueString()
self.addCleanup(self._cleanup_network)
self.addCleanup(self._cleanup_servers)
def _cleanup_network(self):
exception_list = list()
# Delete stale networks as well as networks created for this test
if self.user_cloud.has_service('network'):
# Delete routers
for r in self.user_cloud.list_routers():
try:
if r['name'].startswith(self.new_item_name):
self.user_cloud.update_router(
r['id'], ext_gateway_net_id=None)
for s in self.user_cloud.list_subnets():
if s['name'].startswith(self.new_item_name):
try:
self.user_cloud.remove_router_interface(
r, subnet_id=s['id'])
except Exception:
pass
self.user_cloud.delete_router(name_or_id=r['id'])
except Exception as e:
exception_list.append(str(e))
continue
# Delete subnets
for s in self.user_cloud.list_subnets():
if s['name'].startswith(self.new_item_name):
try:
self.user_cloud.delete_subnet(name_or_id=s['id'])
except Exception as e:
exception_list.append(str(e))
continue
# Delete networks
for n in self.user_cloud.list_networks():
if n['name'].startswith(self.new_item_name):
try:
self.user_cloud.delete_network(name_or_id=n['id'])
except Exception as e:
exception_list.append(str(e))
continue
if exception_list:
# Raise an error: we must make users aware that something went
# wrong
raise OpenStackCloudException('\n'.join(exception_list))
def _cleanup_servers(self):
exception_list = list()
# Delete stale servers as well as server created for this test
for i in self.user_cloud.list_servers(bare=True):
if i.name.startswith(self.new_item_name):
try:
self.user_cloud.delete_server(i, wait=True)
except Exception as e:
exception_list.append(str(e))
continue
if exception_list:
# Raise an error: we must make users aware that something went
# wrong
raise OpenStackCloudException('\n'.join(exception_list))
def _cleanup_ips(self, server):
exception_list = list()
fixed_ip = meta.get_server_private_ip(server)
for ip in self.user_cloud.list_floating_ips():
if (ip.get('fixed_ip', None) == fixed_ip
or ip.get('fixed_ip_address', None) == fixed_ip):
try:
self.user_cloud.delete_floating_ip(ip['id'])
except Exception as e:
exception_list.append(str(e))
continue
if exception_list:
# Raise an error: we must make users aware that something went
# wrong
raise OpenStackCloudException('\n'.join(exception_list))
def _setup_networks(self):
if self.user_cloud.has_service('network'):
# Create a network
self.test_net = self.user_cloud.create_network(
name=self.new_item_name + '_net')
# Create a subnet on it
self.test_subnet = self.user_cloud.create_subnet(
subnet_name=self.new_item_name + '_subnet',
network_name_or_id=self.test_net['id'],
cidr='10.24.4.0/24',
enable_dhcp=True
)
# Create a router
self.test_router = self.user_cloud.create_router(
name=self.new_item_name + '_router')
# Attach the router to an external network
ext_nets = self.user_cloud.search_networks(
filters={'router:external': True})
self.user_cloud.update_router(
name_or_id=self.test_router['id'],
ext_gateway_net_id=ext_nets[0]['id'])
# Attach the router to the internal subnet
self.user_cloud.add_router_interface(
self.test_router, subnet_id=self.test_subnet['id'])
# Select the network for creating new servers
self.nic = {'net-id': self.test_net['id']}
self.addDetail(
'networks-neutron',
content.text_content(pprint.pformat(
self.user_cloud.list_networks())))
else:
# Find network names for nova-net
data = self.user_cloud._compute_client.get('/os-tenant-networks')
nets = meta.get_and_munchify('networks', data)
self.addDetail(
'networks-nova',
content.text_content(pprint.pformat(
nets)))
self.nic = {'net-id': nets[0].id}
def test_private_ip(self):
self._setup_networks()
new_server = self.user_cloud.get_openstack_vars(
self.user_cloud.create_server(
wait=True, name=self.new_item_name + '_server',
image=self.image,
flavor=self.flavor, nics=[self.nic]))
self.addDetail(
'server', content.text_content(pprint.pformat(new_server)))
self.assertNotEqual(new_server['private_v4'], '')
def test_add_auto_ip(self):
self._setup_networks()
new_server = self.user_cloud.create_server(
wait=True, name=self.new_item_name + '_server',
image=self.image,
flavor=self.flavor, nics=[self.nic])
# ToDo: remove the following iteration when create_server waits for
# the IP to be attached
ip = None
for _ in _utils._iterate_timeout(
self.timeout, "Timeout waiting for IP address to be attached"):
ip = meta.get_server_external_ipv4(self.user_cloud, new_server)
if ip is not None:
break
new_server = self.user_cloud.get_server(new_server.id)
self.addCleanup(self._cleanup_ips, new_server)
def test_detach_ip_from_server(self):
self._setup_networks()
new_server = self.user_cloud.create_server(
wait=True, name=self.new_item_name + '_server',
image=self.image,
flavor=self.flavor, nics=[self.nic])
# ToDo: remove the following iteration when create_server waits for
# the IP to be attached
ip = None
for _ in _utils._iterate_timeout(
self.timeout, "Timeout waiting for IP address to be attached"):
ip = meta.get_server_external_ipv4(self.user_cloud, new_server)
if ip is not None:
break
new_server = self.user_cloud.get_server(new_server.id)
self.addCleanup(self._cleanup_ips, new_server)
f_ip = self.user_cloud.get_floating_ip(
id=None, filters={'floating_ip_address': ip})
self.user_cloud.detach_ip_from_server(
server_id=new_server.id, floating_ip_id=f_ip['id'])
def test_list_floating_ips(self):
fip_admin = self.operator_cloud.create_floating_ip()
self.addCleanup(self.operator_cloud.delete_floating_ip, fip_admin.id)
fip_user = self.user_cloud.create_floating_ip()
self.addCleanup(self.user_cloud.delete_floating_ip, fip_user.id)
# Get all the floating ips.
fip_id_list = [
fip.id for fip in self.operator_cloud.list_floating_ips()
]
if self.user_cloud.has_service('network'):
# Neutron returns all FIP for all projects by default
self.assertIn(fip_admin.id, fip_id_list)
self.assertIn(fip_user.id, fip_id_list)
# Ask Neutron for only a subset of all the FIPs.
filtered_fip_id_list = [
fip.id for fip in self.operator_cloud.list_floating_ips(
{'tenant_id': self.user_cloud.current_project_id}
)
]
self.assertNotIn(fip_admin.id, filtered_fip_id_list)
self.assertIn(fip_user.id, filtered_fip_id_list)
else:
self.assertIn(fip_admin.id, fip_id_list)
# By default, Nova returns only the FIPs that belong to the
# project which made the listing request.
self.assertNotIn(fip_user.id, fip_id_list)
self.assertRaisesRegex(
ValueError, "Nova-network don't support server-side.*",
self.operator_cloud.list_floating_ips, filters={'foo': 'bar'}
)
def test_search_floating_ips(self):
fip_user = self.user_cloud.create_floating_ip()
self.addCleanup(self.user_cloud.delete_floating_ip, fip_user.id)
self.assertIn(
fip_user['id'],
[fip.id for fip in self.user_cloud.search_floating_ips(
filters={"attached": False})]
)
self.assertNotIn(
fip_user['id'],
[fip.id for fip in self.user_cloud.search_floating_ips(
filters={"attached": True})]
)
| 39.330935 | 79 | 0.58908 |
import pprint
from testtools import content
from shade import _utils
from shade import meta
from shade.exc import OpenStackCloudException
from shade.tests.functional import base
from shade.tests.functional.util import pick_flavor
class TestFloatingIP(base.BaseFunctionalTestCase):
timeout = 60
def setUp(self):
super(TestFloatingIP, self).setUp()
self.flavor = pick_flavor(
self.user_cloud.list_flavors(get_extra=False))
if self.flavor is None:
self.assertFalse('no sensible flavor available')
self.image = self.pick_image()
self.new_item_name = self.getUniqueString()
self.addCleanup(self._cleanup_network)
self.addCleanup(self._cleanup_servers)
def _cleanup_network(self):
exception_list = list()
if self.user_cloud.has_service('network'):
for r in self.user_cloud.list_routers():
try:
if r['name'].startswith(self.new_item_name):
self.user_cloud.update_router(
r['id'], ext_gateway_net_id=None)
for s in self.user_cloud.list_subnets():
if s['name'].startswith(self.new_item_name):
try:
self.user_cloud.remove_router_interface(
r, subnet_id=s['id'])
except Exception:
pass
self.user_cloud.delete_router(name_or_id=r['id'])
except Exception as e:
exception_list.append(str(e))
continue
for s in self.user_cloud.list_subnets():
if s['name'].startswith(self.new_item_name):
try:
self.user_cloud.delete_subnet(name_or_id=s['id'])
except Exception as e:
exception_list.append(str(e))
continue
for n in self.user_cloud.list_networks():
if n['name'].startswith(self.new_item_name):
try:
self.user_cloud.delete_network(name_or_id=n['id'])
except Exception as e:
exception_list.append(str(e))
continue
if exception_list:
raise OpenStackCloudException('\n'.join(exception_list))
def _cleanup_servers(self):
exception_list = list()
for i in self.user_cloud.list_servers(bare=True):
if i.name.startswith(self.new_item_name):
try:
self.user_cloud.delete_server(i, wait=True)
except Exception as e:
exception_list.append(str(e))
continue
if exception_list:
raise OpenStackCloudException('\n'.join(exception_list))
def _cleanup_ips(self, server):
exception_list = list()
fixed_ip = meta.get_server_private_ip(server)
for ip in self.user_cloud.list_floating_ips():
if (ip.get('fixed_ip', None) == fixed_ip
or ip.get('fixed_ip_address', None) == fixed_ip):
try:
self.user_cloud.delete_floating_ip(ip['id'])
except Exception as e:
exception_list.append(str(e))
continue
if exception_list:
raise OpenStackCloudException('\n'.join(exception_list))
def _setup_networks(self):
if self.user_cloud.has_service('network'):
self.test_net = self.user_cloud.create_network(
name=self.new_item_name + '_net')
self.test_subnet = self.user_cloud.create_subnet(
subnet_name=self.new_item_name + '_subnet',
network_name_or_id=self.test_net['id'],
cidr='10.24.4.0/24',
enable_dhcp=True
)
self.test_router = self.user_cloud.create_router(
name=self.new_item_name + '_router')
ext_nets = self.user_cloud.search_networks(
filters={'router:external': True})
self.user_cloud.update_router(
name_or_id=self.test_router['id'],
ext_gateway_net_id=ext_nets[0]['id'])
self.user_cloud.add_router_interface(
self.test_router, subnet_id=self.test_subnet['id'])
self.nic = {'net-id': self.test_net['id']}
self.addDetail(
'networks-neutron',
content.text_content(pprint.pformat(
self.user_cloud.list_networks())))
else:
data = self.user_cloud._compute_client.get('/os-tenant-networks')
nets = meta.get_and_munchify('networks', data)
self.addDetail(
'networks-nova',
content.text_content(pprint.pformat(
nets)))
self.nic = {'net-id': nets[0].id}
def test_private_ip(self):
self._setup_networks()
new_server = self.user_cloud.get_openstack_vars(
self.user_cloud.create_server(
wait=True, name=self.new_item_name + '_server',
image=self.image,
flavor=self.flavor, nics=[self.nic]))
self.addDetail(
'server', content.text_content(pprint.pformat(new_server)))
self.assertNotEqual(new_server['private_v4'], '')
def test_add_auto_ip(self):
self._setup_networks()
new_server = self.user_cloud.create_server(
wait=True, name=self.new_item_name + '_server',
image=self.image,
flavor=self.flavor, nics=[self.nic])
ip = None
for _ in _utils._iterate_timeout(
self.timeout, "Timeout waiting for IP address to be attached"):
ip = meta.get_server_external_ipv4(self.user_cloud, new_server)
if ip is not None:
break
new_server = self.user_cloud.get_server(new_server.id)
self.addCleanup(self._cleanup_ips, new_server)
def test_detach_ip_from_server(self):
self._setup_networks()
new_server = self.user_cloud.create_server(
wait=True, name=self.new_item_name + '_server',
image=self.image,
flavor=self.flavor, nics=[self.nic])
ip = None
for _ in _utils._iterate_timeout(
self.timeout, "Timeout waiting for IP address to be attached"):
ip = meta.get_server_external_ipv4(self.user_cloud, new_server)
if ip is not None:
break
new_server = self.user_cloud.get_server(new_server.id)
self.addCleanup(self._cleanup_ips, new_server)
f_ip = self.user_cloud.get_floating_ip(
id=None, filters={'floating_ip_address': ip})
self.user_cloud.detach_ip_from_server(
server_id=new_server.id, floating_ip_id=f_ip['id'])
def test_list_floating_ips(self):
fip_admin = self.operator_cloud.create_floating_ip()
self.addCleanup(self.operator_cloud.delete_floating_ip, fip_admin.id)
fip_user = self.user_cloud.create_floating_ip()
self.addCleanup(self.user_cloud.delete_floating_ip, fip_user.id)
fip_id_list = [
fip.id for fip in self.operator_cloud.list_floating_ips()
]
if self.user_cloud.has_service('network'):
self.assertIn(fip_admin.id, fip_id_list)
self.assertIn(fip_user.id, fip_id_list)
filtered_fip_id_list = [
fip.id for fip in self.operator_cloud.list_floating_ips(
{'tenant_id': self.user_cloud.current_project_id}
)
]
self.assertNotIn(fip_admin.id, filtered_fip_id_list)
self.assertIn(fip_user.id, filtered_fip_id_list)
else:
self.assertIn(fip_admin.id, fip_id_list)
self.assertNotIn(fip_user.id, fip_id_list)
self.assertRaisesRegex(
ValueError, "Nova-network don't support server-side.*",
self.operator_cloud.list_floating_ips, filters={'foo': 'bar'}
)
def test_search_floating_ips(self):
fip_user = self.user_cloud.create_floating_ip()
self.addCleanup(self.user_cloud.delete_floating_ip, fip_user.id)
self.assertIn(
fip_user['id'],
[fip.id for fip in self.user_cloud.search_floating_ips(
filters={"attached": False})]
)
self.assertNotIn(
fip_user['id'],
[fip.id for fip in self.user_cloud.search_floating_ips(
filters={"attached": True})]
)
| true | true |
f73965cc71992ab9e887b50a297a05f6fd51c46a | 3,269 | py | Python | api/metadata/migrations/0019_add_non_ministerial_departments.py | cad106uk/market-access-api | a357c33bbec93408b193e598a5628634126e9e99 | [
"MIT"
] | null | null | null | api/metadata/migrations/0019_add_non_ministerial_departments.py | cad106uk/market-access-api | a357c33bbec93408b193e598a5628634126e9e99 | [
"MIT"
] | null | null | null | api/metadata/migrations/0019_add_non_ministerial_departments.py | cad106uk/market-access-api | a357c33bbec93408b193e598a5628634126e9e99 | [
"MIT"
] | null | null | null | from django.db import migrations
from api.metadata.constants import OrganisationType
ORGANISATIONS = [
{
"name": "The Charity Commission",
"organisation_type": OrganisationType.NON_MINISTERIAL_DEPARTMENTS
},
{
"name": "Competition and Markets Authority",
"organisation_type": OrganisationType.NON_MINISTERIAL_DEPARTMENTS
},
{
"name": "Crown Prosecution Service",
"organisation_type": OrganisationType.NON_MINISTERIAL_DEPARTMENTS
},
{
"name": "Food Standards Agency",
"organisation_type": OrganisationType.NON_MINISTERIAL_DEPARTMENTS
},
{
"name": "Forestry Commission",
"organisation_type": OrganisationType.NON_MINISTERIAL_DEPARTMENTS
},
{
"name": "Government Actuary's Department",
"organisation_type": OrganisationType.NON_MINISTERIAL_DEPARTMENTS
},
{
"name": "Government Legal Department",
"organisation_type": OrganisationType.NON_MINISTERIAL_DEPARTMENTS
},
{
"name": "HM Land Registry",
"organisation_type": OrganisationType.NON_MINISTERIAL_DEPARTMENTS
},
{
"name": "HM Revenue & Customs",
"organisation_type": OrganisationType.NON_MINISTERIAL_DEPARTMENTS
},
{
"name": "NS&I",
"organisation_type": OrganisationType.NON_MINISTERIAL_DEPARTMENTS
},
{
"name": "The National Archives",
"organisation_type": OrganisationType.NON_MINISTERIAL_DEPARTMENTS
},
{
"name": "National Crime Agency",
"organisation_type": OrganisationType.NON_MINISTERIAL_DEPARTMENTS
},
{
"name": "Office of Rail and Road",
"organisation_type": OrganisationType.NON_MINISTERIAL_DEPARTMENTS
},
{
"name": "Ofgem",
"organisation_type": OrganisationType.NON_MINISTERIAL_DEPARTMENTS
},
{
"name": "Ofqual",
"organisation_type": OrganisationType.NON_MINISTERIAL_DEPARTMENTS
},
{
"name": "Ofsted",
"organisation_type": OrganisationType.NON_MINISTERIAL_DEPARTMENTS
},
{
"name": "Serious Fraud Office",
"organisation_type": OrganisationType.NON_MINISTERIAL_DEPARTMENTS
},
{
"name": "Supreme Court of the United Kingdom",
"organisation_type": OrganisationType.NON_MINISTERIAL_DEPARTMENTS
},
{
"name": "UK Statistics Authority",
"organisation_type": OrganisationType.NON_MINISTERIAL_DEPARTMENTS
},
{
"name": "The Water Services Regulation Authority",
"organisation_type": OrganisationType.NON_MINISTERIAL_DEPARTMENTS
}
]
def create_organisations(apps, schema_editor):
Organisation = apps.get_model("metadata", "Organisation")
for item in ORGANISATIONS:
Organisation.objects.create(**item)
def delete_organisations(apps, schema_editor):
Organisation = apps.get_model("metadata", "Organisation")
for item in ORGANISATIONS:
Organisation.objects.delete(**item)
class Migration(migrations.Migration):
dependencies = [
("metadata", "0018_auto_20201118_1133"),
]
operations = [
migrations.RunPython(create_organisations, reverse_code=delete_organisations),
]
| 28.929204 | 86 | 0.663812 | from django.db import migrations
from api.metadata.constants import OrganisationType
ORGANISATIONS = [
{
"name": "The Charity Commission",
"organisation_type": OrganisationType.NON_MINISTERIAL_DEPARTMENTS
},
{
"name": "Competition and Markets Authority",
"organisation_type": OrganisationType.NON_MINISTERIAL_DEPARTMENTS
},
{
"name": "Crown Prosecution Service",
"organisation_type": OrganisationType.NON_MINISTERIAL_DEPARTMENTS
},
{
"name": "Food Standards Agency",
"organisation_type": OrganisationType.NON_MINISTERIAL_DEPARTMENTS
},
{
"name": "Forestry Commission",
"organisation_type": OrganisationType.NON_MINISTERIAL_DEPARTMENTS
},
{
"name": "Government Actuary's Department",
"organisation_type": OrganisationType.NON_MINISTERIAL_DEPARTMENTS
},
{
"name": "Government Legal Department",
"organisation_type": OrganisationType.NON_MINISTERIAL_DEPARTMENTS
},
{
"name": "HM Land Registry",
"organisation_type": OrganisationType.NON_MINISTERIAL_DEPARTMENTS
},
{
"name": "HM Revenue & Customs",
"organisation_type": OrganisationType.NON_MINISTERIAL_DEPARTMENTS
},
{
"name": "NS&I",
"organisation_type": OrganisationType.NON_MINISTERIAL_DEPARTMENTS
},
{
"name": "The National Archives",
"organisation_type": OrganisationType.NON_MINISTERIAL_DEPARTMENTS
},
{
"name": "National Crime Agency",
"organisation_type": OrganisationType.NON_MINISTERIAL_DEPARTMENTS
},
{
"name": "Office of Rail and Road",
"organisation_type": OrganisationType.NON_MINISTERIAL_DEPARTMENTS
},
{
"name": "Ofgem",
"organisation_type": OrganisationType.NON_MINISTERIAL_DEPARTMENTS
},
{
"name": "Ofqual",
"organisation_type": OrganisationType.NON_MINISTERIAL_DEPARTMENTS
},
{
"name": "Ofsted",
"organisation_type": OrganisationType.NON_MINISTERIAL_DEPARTMENTS
},
{
"name": "Serious Fraud Office",
"organisation_type": OrganisationType.NON_MINISTERIAL_DEPARTMENTS
},
{
"name": "Supreme Court of the United Kingdom",
"organisation_type": OrganisationType.NON_MINISTERIAL_DEPARTMENTS
},
{
"name": "UK Statistics Authority",
"organisation_type": OrganisationType.NON_MINISTERIAL_DEPARTMENTS
},
{
"name": "The Water Services Regulation Authority",
"organisation_type": OrganisationType.NON_MINISTERIAL_DEPARTMENTS
}
]
def create_organisations(apps, schema_editor):
Organisation = apps.get_model("metadata", "Organisation")
for item in ORGANISATIONS:
Organisation.objects.create(**item)
def delete_organisations(apps, schema_editor):
Organisation = apps.get_model("metadata", "Organisation")
for item in ORGANISATIONS:
Organisation.objects.delete(**item)
class Migration(migrations.Migration):
dependencies = [
("metadata", "0018_auto_20201118_1133"),
]
operations = [
migrations.RunPython(create_organisations, reverse_code=delete_organisations),
]
| true | true |
f73967755c490c9e1120b2c21e745c507cbc5e62 | 13,441 | py | Python | tools/skpbench/skpbench.py | mohad12211/skia | 042a53aa094715e031ebad4da072524ace316744 | [
"BSD-3-Clause"
] | 3 | 2019-03-07T17:01:23.000Z | 2021-07-03T22:01:36.000Z | tools/skpbench/skpbench.py | mohad12211/skia | 042a53aa094715e031ebad4da072524ace316744 | [
"BSD-3-Clause"
] | 2 | 2020-08-11T17:14:27.000Z | 2020-09-12T17:08:46.000Z | tools/skpbench/skpbench.py | promoter/skia | bc5ed776134c60ae13d22cabc8e0f6aca0fdd422 | [
"BSD-3-Clause"
] | 14 | 2015-07-17T17:23:53.000Z | 2020-07-06T21:06:57.000Z | #!/usr/bin/env python
# Copyright 2016 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
from _adb import Adb
from _benchresult import BenchResult
from _hardware import HardwareException, Hardware
from argparse import ArgumentParser
from multiprocessing import Queue
from threading import Thread, Timer
import collections
import glob
import math
import re
import subprocess
import sys
import time
__argparse = ArgumentParser(description="""
Executes the skpbench binary with various configs and skps.
Also monitors the output in order to filter out and re-run results that have an
unacceptable stddev.
""")
__argparse.add_argument('skpbench',
help="path to the skpbench binary")
__argparse.add_argument('--adb',
action='store_true', help="execute skpbench over adb")
__argparse.add_argument('--adb_binary', default='adb',
help="The name of the adb binary to use.")
__argparse.add_argument('-s', '--device-serial',
help="if using adb, ID of the specific device to target "
"(only required if more than 1 device is attached)")
__argparse.add_argument('-m', '--max-stddev',
type=float, default=4,
help="initial max allowable relative standard deviation")
__argparse.add_argument('-x', '--suffix',
help="suffix to append on config (e.g. '_before', '_after')")
__argparse.add_argument('-w','--write-path',
help="directory to save .png proofs to disk.")
__argparse.add_argument('-v','--verbosity',
type=int, default=1, help="level of verbosity (0=none to 5=debug)")
__argparse.add_argument('-d', '--duration',
type=int, help="number of milliseconds to run each benchmark")
__argparse.add_argument('-l', '--sample-ms',
type=int, help="duration of a sample (minimum)")
__argparse.add_argument('--force',
action='store_true',
help="perform benchmarking on unrecognized Android devices")
__argparse.add_argument('--gpu',
action='store_true',
help="perform timing on the gpu clock instead of cpu (gpu work only)")
__argparse.add_argument('--fps',
action='store_true', help="use fps instead of ms")
__argparse.add_argument('--pr',
help="comma- or space-separated list of GPU path renderers, including: "
"[[~]all [~]default [~]dashline [~]nvpr [~]msaa [~]aaconvex "
"[~]aalinearizing [~]small [~]tess]")
__argparse.add_argument('--cc',
action='store_true', help="allow coverage counting shortcuts to render paths")
__argparse.add_argument('--nocache',
action='store_true', help="disable caching of path mask textures")
__argparse.add_argument('-c', '--config',
default='gl', help="comma- or space-separated list of GPU configs")
__argparse.add_argument('-a', '--resultsfile',
help="optional file to append results into")
__argparse.add_argument('--ddl',
action='store_true', help="record the skp into DDLs before rendering")
__argparse.add_argument('--ddlNumAdditionalThreads',
type=int, default=0,
help="number of DDL recording threads in addition to main one")
__argparse.add_argument('--ddlTilingWidthHeight',
type=int, default=0, help="number of tiles along one edge when in DDL mode")
__argparse.add_argument('--ddlRecordTime',
action='store_true', help="report just the cpu time spent recording DDLs")
__argparse.add_argument('--gpuThreads',
type=int, default=-1,
help="Create this many extra threads to assist with GPU work, including"
" software path rendering. Defaults to two.")
__argparse.add_argument('srcs',
nargs='+',
help=".skp files or directories to expand for .skp files, and/or .svg files")
FLAGS = __argparse.parse_args()
if FLAGS.adb:
import _adb_path as _path
_path.init(FLAGS.device_serial, FLAGS.adb_binary)
else:
import _os_path as _path
def dump_commandline_if_verbose(commandline):
if FLAGS.verbosity >= 5:
quoted = ['\'%s\'' % re.sub(r'([\\\'])', r'\\\1', x) for x in commandline]
print(' '.join(quoted), file=sys.stderr)
class StddevException(Exception):
pass
class Message:
READLINE = 0,
POLL_HARDWARE = 1,
EXIT = 2
def __init__(self, message, value=None):
self.message = message
self.value = value
class SubprocessMonitor(Thread):
def __init__(self, queue, proc):
self._queue = queue
self._proc = proc
Thread.__init__(self)
def run(self):
"""Runs on the background thread."""
for line in iter(self._proc.stdout.readline, b''):
self._queue.put(Message(Message.READLINE, line.decode('utf-8').rstrip()))
self._queue.put(Message(Message.EXIT))
class SKPBench:
ARGV = [FLAGS.skpbench, '--verbosity', str(FLAGS.verbosity)]
if FLAGS.duration:
ARGV.extend(['--duration', str(FLAGS.duration)])
if FLAGS.sample_ms:
ARGV.extend(['--sampleMs', str(FLAGS.sample_ms)])
if FLAGS.gpu:
ARGV.extend(['--gpuClock', 'true'])
if FLAGS.fps:
ARGV.extend(['--fps', 'true'])
if FLAGS.pr:
ARGV.extend(['--pr'] + re.split(r'[ ,]', FLAGS.pr))
if FLAGS.cc:
ARGV.extend(['--cc', 'true'])
if FLAGS.nocache:
ARGV.extend(['--cachePathMasks', 'false'])
if FLAGS.gpuThreads != -1:
ARGV.extend(['--gpuThreads', str(FLAGS.gpuThreads)])
# DDL parameters
if FLAGS.ddl:
ARGV.extend(['--ddl', 'true'])
if FLAGS.ddlNumAdditionalThreads:
ARGV.extend(['--ddlNumAdditionalThreads',
str(FLAGS.ddlNumAdditionalThreads)])
if FLAGS.ddlTilingWidthHeight:
ARGV.extend(['--ddlTilingWidthHeight', str(FLAGS.ddlTilingWidthHeight)])
if FLAGS.ddlRecordTime:
ARGV.extend(['--ddlRecordTime', 'true'])
if FLAGS.adb:
if FLAGS.device_serial is None:
ARGV[:0] = [FLAGS.adb_binary, 'shell']
else:
ARGV[:0] = [FLAGS.adb_binary, '-s', FLAGS.device_serial, 'shell']
@classmethod
def get_header(cls, outfile=sys.stdout):
commandline = cls.ARGV + ['--duration', '0']
dump_commandline_if_verbose(commandline)
out = subprocess.check_output(commandline, stderr=subprocess.STDOUT)
return out.rstrip()
@classmethod
def run_warmup(cls, warmup_time, config):
if not warmup_time:
return
print('running %i second warmup...' % warmup_time, file=sys.stderr)
commandline = cls.ARGV + ['--duration', str(warmup_time * 1000),
'--config', config,
'--src', 'warmup']
dump_commandline_if_verbose(commandline)
output = subprocess.check_output(commandline, stderr=subprocess.STDOUT)
# validate the warmup run output.
for line in output.decode('utf-8').split('\n'):
match = BenchResult.match(line.rstrip())
if match and match.bench == 'warmup':
return
raise Exception('Invalid warmup output:\n%s' % output)
def __init__(self, src, config, max_stddev, best_result=None):
self.src = src
self.config = config
self.max_stddev = max_stddev
self.best_result = best_result
self._queue = Queue()
self._proc = None
self._monitor = None
self._hw_poll_timer = None
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
if self._proc:
self.terminate()
if self._hw_poll_timer:
self._hw_poll_timer.cancel()
def execute(self, hardware):
hardware.sanity_check()
self._schedule_hardware_poll()
commandline = self.ARGV + ['--config', self.config,
'--src', self.src,
'--suppressHeader', 'true']
if FLAGS.write_path:
pngfile = _path.join(FLAGS.write_path, self.config,
_path.basename(self.src) + '.png')
commandline.extend(['--png', pngfile])
dump_commandline_if_verbose(commandline)
self._proc = subprocess.Popen(commandline, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
self._monitor = SubprocessMonitor(self._queue, self._proc)
self._monitor.start()
while True:
message = self._queue.get()
if message.message == Message.READLINE:
result = BenchResult.match(message.value)
if result:
hardware.sanity_check()
self._process_result(result)
elif hardware.filter_line(message.value):
print(message.value, file=sys.stderr)
continue
if message.message == Message.POLL_HARDWARE:
hardware.sanity_check()
self._schedule_hardware_poll()
continue
if message.message == Message.EXIT:
self._monitor.join()
self._proc.wait()
if self._proc.returncode != 0:
raise Exception("skpbench exited with nonzero exit code %i" %
self._proc.returncode)
self._proc = None
break
def _schedule_hardware_poll(self):
if self._hw_poll_timer:
self._hw_poll_timer.cancel()
self._hw_poll_timer = \
Timer(1, lambda: self._queue.put(Message(Message.POLL_HARDWARE)))
self._hw_poll_timer.start()
def _process_result(self, result):
if not self.best_result or result.stddev <= self.best_result.stddev:
self.best_result = result
elif FLAGS.verbosity >= 2:
print("reusing previous result for %s/%s with lower stddev "
"(%s%% instead of %s%%)." %
(result.config, result.bench, self.best_result.stddev,
result.stddev), file=sys.stderr)
if self.max_stddev and self.best_result.stddev > self.max_stddev:
raise StddevException()
def terminate(self):
if self._proc:
self._proc.terminate()
self._monitor.join()
self._proc.wait()
self._proc = None
def emit_result(line, resultsfile=None):
print(line)
sys.stdout.flush()
if resultsfile:
print(line, file=resultsfile)
resultsfile.flush()
def run_benchmarks(configs, srcs, hardware, resultsfile=None):
hasheader = False
benches = collections.deque([(src, config, FLAGS.max_stddev)
for src in srcs
for config in configs])
while benches:
try:
with hardware:
SKPBench.run_warmup(hardware.warmup_time, configs[0])
if not hasheader:
emit_result(SKPBench.get_header(), resultsfile)
hasheader = True
while benches:
benchargs = benches.popleft()
with SKPBench(*benchargs) as skpbench:
try:
skpbench.execute(hardware)
if skpbench.best_result:
emit_result(skpbench.best_result.format(FLAGS.suffix),
resultsfile)
else:
print("WARNING: no result for %s with config %s" %
(skpbench.src, skpbench.config), file=sys.stderr)
except StddevException:
retry_max_stddev = skpbench.max_stddev * math.sqrt(2)
if FLAGS.verbosity >= 1:
print("stddev is too high for %s/%s (%s%%, max=%.2f%%), "
"re-queuing with max=%.2f%%." %
(skpbench.best_result.config, skpbench.best_result.bench,
skpbench.best_result.stddev, skpbench.max_stddev,
retry_max_stddev),
file=sys.stderr)
benches.append((skpbench.src, skpbench.config, retry_max_stddev,
skpbench.best_result))
except HardwareException as exception:
skpbench.terminate()
if FLAGS.verbosity >= 4:
hardware.print_debug_diagnostics()
if FLAGS.verbosity >= 1:
print("%s; rebooting and taking a %i second nap..." %
(exception.message, exception.sleeptime), file=sys.stderr)
benches.appendleft(benchargs) # retry the same bench next time.
raise # wake hw up from benchmarking mode before the nap.
except HardwareException as exception:
time.sleep(exception.sleeptime)
def main():
# Delimiter is ',' or ' ', skip if nested inside parens (e.g. gpu(a=b,c=d)).
DELIMITER = r'[, ](?!(?:[^(]*\([^)]*\))*[^()]*\))'
configs = re.split(DELIMITER, FLAGS.config)
srcs = _path.find_skps(FLAGS.srcs)
assert srcs
if FLAGS.adb:
adb = Adb(FLAGS.device_serial, FLAGS.adb_binary,
echo=(FLAGS.verbosity >= 5))
model = adb.check('getprop ro.product.model').strip()
if model == 'Pixel C':
from _hardware_pixel_c import HardwarePixelC
hardware = HardwarePixelC(adb)
elif model == 'Pixel':
from _hardware_pixel import HardwarePixel
hardware = HardwarePixel(adb)
elif model == 'Pixel 2':
from _hardware_pixel2 import HardwarePixel2
hardware = HardwarePixel2(adb)
elif model == 'Nexus 6P':
from _hardware_nexus_6p import HardwareNexus6P
hardware = HardwareNexus6P(adb)
elif FLAGS.force:
from _hardware_android import HardwareAndroid
print("WARNING: %s: don't know how to monitor this hardware; results "
"may be unreliable." % model, file=sys.stderr)
hardware = HardwareAndroid(adb)
else:
raise Exception("%s: don't know how to monitor this hardware. "
"Use --force to bypass this warning." % model)
else:
hardware = Hardware()
if FLAGS.resultsfile:
with open(FLAGS.resultsfile, mode='a+') as resultsfile:
run_benchmarks(configs, srcs, hardware, resultsfile=resultsfile)
else:
run_benchmarks(configs, srcs, hardware)
if __name__ == '__main__':
main()
| 36.229111 | 80 | 0.654788 |
from __future__ import print_function
from _adb import Adb
from _benchresult import BenchResult
from _hardware import HardwareException, Hardware
from argparse import ArgumentParser
from multiprocessing import Queue
from threading import Thread, Timer
import collections
import glob
import math
import re
import subprocess
import sys
import time
__argparse = ArgumentParser(description="""
Executes the skpbench binary with various configs and skps.
Also monitors the output in order to filter out and re-run results that have an
unacceptable stddev.
""")
__argparse.add_argument('skpbench',
help="path to the skpbench binary")
__argparse.add_argument('--adb',
action='store_true', help="execute skpbench over adb")
__argparse.add_argument('--adb_binary', default='adb',
help="The name of the adb binary to use.")
__argparse.add_argument('-s', '--device-serial',
help="if using adb, ID of the specific device to target "
"(only required if more than 1 device is attached)")
__argparse.add_argument('-m', '--max-stddev',
type=float, default=4,
help="initial max allowable relative standard deviation")
__argparse.add_argument('-x', '--suffix',
help="suffix to append on config (e.g. '_before', '_after')")
__argparse.add_argument('-w','--write-path',
help="directory to save .png proofs to disk.")
__argparse.add_argument('-v','--verbosity',
type=int, default=1, help="level of verbosity (0=none to 5=debug)")
__argparse.add_argument('-d', '--duration',
type=int, help="number of milliseconds to run each benchmark")
__argparse.add_argument('-l', '--sample-ms',
type=int, help="duration of a sample (minimum)")
__argparse.add_argument('--force',
action='store_true',
help="perform benchmarking on unrecognized Android devices")
__argparse.add_argument('--gpu',
action='store_true',
help="perform timing on the gpu clock instead of cpu (gpu work only)")
__argparse.add_argument('--fps',
action='store_true', help="use fps instead of ms")
__argparse.add_argument('--pr',
help="comma- or space-separated list of GPU path renderers, including: "
"[[~]all [~]default [~]dashline [~]nvpr [~]msaa [~]aaconvex "
"[~]aalinearizing [~]small [~]tess]")
__argparse.add_argument('--cc',
action='store_true', help="allow coverage counting shortcuts to render paths")
__argparse.add_argument('--nocache',
action='store_true', help="disable caching of path mask textures")
__argparse.add_argument('-c', '--config',
default='gl', help="comma- or space-separated list of GPU configs")
__argparse.add_argument('-a', '--resultsfile',
help="optional file to append results into")
__argparse.add_argument('--ddl',
action='store_true', help="record the skp into DDLs before rendering")
__argparse.add_argument('--ddlNumAdditionalThreads',
type=int, default=0,
help="number of DDL recording threads in addition to main one")
__argparse.add_argument('--ddlTilingWidthHeight',
type=int, default=0, help="number of tiles along one edge when in DDL mode")
__argparse.add_argument('--ddlRecordTime',
action='store_true', help="report just the cpu time spent recording DDLs")
__argparse.add_argument('--gpuThreads',
type=int, default=-1,
help="Create this many extra threads to assist with GPU work, including"
" software path rendering. Defaults to two.")
__argparse.add_argument('srcs',
nargs='+',
help=".skp files or directories to expand for .skp files, and/or .svg files")
FLAGS = __argparse.parse_args()
if FLAGS.adb:
import _adb_path as _path
_path.init(FLAGS.device_serial, FLAGS.adb_binary)
else:
import _os_path as _path
def dump_commandline_if_verbose(commandline):
if FLAGS.verbosity >= 5:
quoted = ['\'%s\'' % re.sub(r'([\\\'])', r'\\\1', x) for x in commandline]
print(' '.join(quoted), file=sys.stderr)
class StddevException(Exception):
pass
class Message:
READLINE = 0,
POLL_HARDWARE = 1,
EXIT = 2
def __init__(self, message, value=None):
self.message = message
self.value = value
class SubprocessMonitor(Thread):
def __init__(self, queue, proc):
self._queue = queue
self._proc = proc
Thread.__init__(self)
def run(self):
for line in iter(self._proc.stdout.readline, b''):
self._queue.put(Message(Message.READLINE, line.decode('utf-8').rstrip()))
self._queue.put(Message(Message.EXIT))
class SKPBench:
ARGV = [FLAGS.skpbench, '--verbosity', str(FLAGS.verbosity)]
if FLAGS.duration:
ARGV.extend(['--duration', str(FLAGS.duration)])
if FLAGS.sample_ms:
ARGV.extend(['--sampleMs', str(FLAGS.sample_ms)])
if FLAGS.gpu:
ARGV.extend(['--gpuClock', 'true'])
if FLAGS.fps:
ARGV.extend(['--fps', 'true'])
if FLAGS.pr:
ARGV.extend(['--pr'] + re.split(r'[ ,]', FLAGS.pr))
if FLAGS.cc:
ARGV.extend(['--cc', 'true'])
if FLAGS.nocache:
ARGV.extend(['--cachePathMasks', 'false'])
if FLAGS.gpuThreads != -1:
ARGV.extend(['--gpuThreads', str(FLAGS.gpuThreads)])
# DDL parameters
if FLAGS.ddl:
ARGV.extend(['--ddl', 'true'])
if FLAGS.ddlNumAdditionalThreads:
ARGV.extend(['--ddlNumAdditionalThreads',
str(FLAGS.ddlNumAdditionalThreads)])
if FLAGS.ddlTilingWidthHeight:
ARGV.extend(['--ddlTilingWidthHeight', str(FLAGS.ddlTilingWidthHeight)])
if FLAGS.ddlRecordTime:
ARGV.extend(['--ddlRecordTime', 'true'])
if FLAGS.adb:
if FLAGS.device_serial is None:
ARGV[:0] = [FLAGS.adb_binary, 'shell']
else:
ARGV[:0] = [FLAGS.adb_binary, '-s', FLAGS.device_serial, 'shell']
@classmethod
def get_header(cls, outfile=sys.stdout):
commandline = cls.ARGV + ['--duration', '0']
dump_commandline_if_verbose(commandline)
out = subprocess.check_output(commandline, stderr=subprocess.STDOUT)
return out.rstrip()
@classmethod
def run_warmup(cls, warmup_time, config):
if not warmup_time:
return
print('running %i second warmup...' % warmup_time, file=sys.stderr)
commandline = cls.ARGV + ['--duration', str(warmup_time * 1000),
'--config', config,
'--src', 'warmup']
dump_commandline_if_verbose(commandline)
output = subprocess.check_output(commandline, stderr=subprocess.STDOUT)
# validate the warmup run output.
for line in output.decode('utf-8').split('\n'):
match = BenchResult.match(line.rstrip())
if match and match.bench == 'warmup':
return
raise Exception('Invalid warmup output:\n%s' % output)
def __init__(self, src, config, max_stddev, best_result=None):
self.src = src
self.config = config
self.max_stddev = max_stddev
self.best_result = best_result
self._queue = Queue()
self._proc = None
self._monitor = None
self._hw_poll_timer = None
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
if self._proc:
self.terminate()
if self._hw_poll_timer:
self._hw_poll_timer.cancel()
def execute(self, hardware):
hardware.sanity_check()
self._schedule_hardware_poll()
commandline = self.ARGV + ['--config', self.config,
'--src', self.src,
'--suppressHeader', 'true']
if FLAGS.write_path:
pngfile = _path.join(FLAGS.write_path, self.config,
_path.basename(self.src) + '.png')
commandline.extend(['--png', pngfile])
dump_commandline_if_verbose(commandline)
self._proc = subprocess.Popen(commandline, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
self._monitor = SubprocessMonitor(self._queue, self._proc)
self._monitor.start()
while True:
message = self._queue.get()
if message.message == Message.READLINE:
result = BenchResult.match(message.value)
if result:
hardware.sanity_check()
self._process_result(result)
elif hardware.filter_line(message.value):
print(message.value, file=sys.stderr)
continue
if message.message == Message.POLL_HARDWARE:
hardware.sanity_check()
self._schedule_hardware_poll()
continue
if message.message == Message.EXIT:
self._monitor.join()
self._proc.wait()
if self._proc.returncode != 0:
raise Exception("skpbench exited with nonzero exit code %i" %
self._proc.returncode)
self._proc = None
break
def _schedule_hardware_poll(self):
if self._hw_poll_timer:
self._hw_poll_timer.cancel()
self._hw_poll_timer = \
Timer(1, lambda: self._queue.put(Message(Message.POLL_HARDWARE)))
self._hw_poll_timer.start()
def _process_result(self, result):
if not self.best_result or result.stddev <= self.best_result.stddev:
self.best_result = result
elif FLAGS.verbosity >= 2:
print("reusing previous result for %s/%s with lower stddev "
"(%s%% instead of %s%%)." %
(result.config, result.bench, self.best_result.stddev,
result.stddev), file=sys.stderr)
if self.max_stddev and self.best_result.stddev > self.max_stddev:
raise StddevException()
def terminate(self):
if self._proc:
self._proc.terminate()
self._monitor.join()
self._proc.wait()
self._proc = None
def emit_result(line, resultsfile=None):
print(line)
sys.stdout.flush()
if resultsfile:
print(line, file=resultsfile)
resultsfile.flush()
def run_benchmarks(configs, srcs, hardware, resultsfile=None):
hasheader = False
benches = collections.deque([(src, config, FLAGS.max_stddev)
for src in srcs
for config in configs])
while benches:
try:
with hardware:
SKPBench.run_warmup(hardware.warmup_time, configs[0])
if not hasheader:
emit_result(SKPBench.get_header(), resultsfile)
hasheader = True
while benches:
benchargs = benches.popleft()
with SKPBench(*benchargs) as skpbench:
try:
skpbench.execute(hardware)
if skpbench.best_result:
emit_result(skpbench.best_result.format(FLAGS.suffix),
resultsfile)
else:
print("WARNING: no result for %s with config %s" %
(skpbench.src, skpbench.config), file=sys.stderr)
except StddevException:
retry_max_stddev = skpbench.max_stddev * math.sqrt(2)
if FLAGS.verbosity >= 1:
print("stddev is too high for %s/%s (%s%%, max=%.2f%%), "
"re-queuing with max=%.2f%%." %
(skpbench.best_result.config, skpbench.best_result.bench,
skpbench.best_result.stddev, skpbench.max_stddev,
retry_max_stddev),
file=sys.stderr)
benches.append((skpbench.src, skpbench.config, retry_max_stddev,
skpbench.best_result))
except HardwareException as exception:
skpbench.terminate()
if FLAGS.verbosity >= 4:
hardware.print_debug_diagnostics()
if FLAGS.verbosity >= 1:
print("%s; rebooting and taking a %i second nap..." %
(exception.message, exception.sleeptime), file=sys.stderr)
benches.appendleft(benchargs) # retry the same bench next time.
raise # wake hw up from benchmarking mode before the nap.
except HardwareException as exception:
time.sleep(exception.sleeptime)
def main():
# Delimiter is ',' or ' ', skip if nested inside parens (e.g. gpu(a=b,c=d)).
DELIMITER = r'[, ](?!(?:[^(]*\([^)]*\))*[^()]*\))'
configs = re.split(DELIMITER, FLAGS.config)
srcs = _path.find_skps(FLAGS.srcs)
assert srcs
if FLAGS.adb:
adb = Adb(FLAGS.device_serial, FLAGS.adb_binary,
echo=(FLAGS.verbosity >= 5))
model = adb.check('getprop ro.product.model').strip()
if model == 'Pixel C':
from _hardware_pixel_c import HardwarePixelC
hardware = HardwarePixelC(adb)
elif model == 'Pixel':
from _hardware_pixel import HardwarePixel
hardware = HardwarePixel(adb)
elif model == 'Pixel 2':
from _hardware_pixel2 import HardwarePixel2
hardware = HardwarePixel2(adb)
elif model == 'Nexus 6P':
from _hardware_nexus_6p import HardwareNexus6P
hardware = HardwareNexus6P(adb)
elif FLAGS.force:
from _hardware_android import HardwareAndroid
print("WARNING: %s: don't know how to monitor this hardware; results "
"may be unreliable." % model, file=sys.stderr)
hardware = HardwareAndroid(adb)
else:
raise Exception("%s: don't know how to monitor this hardware. "
"Use --force to bypass this warning." % model)
else:
hardware = Hardware()
if FLAGS.resultsfile:
with open(FLAGS.resultsfile, mode='a+') as resultsfile:
run_benchmarks(configs, srcs, hardware, resultsfile=resultsfile)
else:
run_benchmarks(configs, srcs, hardware)
if __name__ == '__main__':
main()
| true | true |
f73967f1cca06e84f142f53ce00b6ee8683e87d9 | 1,955 | py | Python | Project 2/receiver.py | jontlu/ECE303-Comm-Nets | d21bf285c3231db3c2c328cbf07cbc150afdc1af | [
"MIT"
] | null | null | null | Project 2/receiver.py | jontlu/ECE303-Comm-Nets | d21bf285c3231db3c2c328cbf07cbc150afdc1af | [
"MIT"
] | null | null | null | Project 2/receiver.py | jontlu/ECE303-Comm-Nets | d21bf285c3231db3c2c328cbf07cbc150afdc1af | [
"MIT"
] | null | null | null | # Written by S. Mevawala, modified by D. Gitzel
# ECE303 Communication Networks
# Project 2: Selective Repeat simulator
# Jon Lu & David Yang 05/02/2021
import logging
import channelsimulator
import utils
import sys
import socket
import array
import hashlib
class Receiver(object):
def __init__(self, inbound_port=50005, outbound_port=50006, timeout=5, debug_level=logging.INFO):
self.logger = utils.Logger(self.__class__.__name__, debug_level)
self.inbound_port = inbound_port
self.outbound_port = outbound_port
self.simulator = channelsimulator.ChannelSimulator(inbound_port=inbound_port, outbound_port=outbound_port,
debug_level=debug_level)
self.simulator.rcvr_setup(timeout)
self.simulator.sndr_setup(timeout)
class RDTReceiver(Receiver):
expectedRDTBit = 0
def __init__(self):
super(RDTReceiver, self).__init__()
def receive(self):
try:
while 1:
receivedSegment = self.simulator.u_receive()
receivedRDTBit = receivedSegment[0:1]
checksum = receivedSegment[1:33]
data = receivedSegment[33:]
if (str(self.expectedRDTBit) == str(receivedRDTBit)) and (str(checksumGet(data)) == str(checksum)):
self.simulator.u_send(str(self.expectedRDTBit) + str(checksumGet(data)))
self.expectedRDTBit = 1 - self.expectedRDTBit
sys.stdout.write("{}".format(data))
sys.stdout.flush()
else:
negativeRDTBit = str(1 - self.expectedRDTBit)
self.simulator.u_send(negativeRDTBit + str(checksumGet(data)))
except socket.timeout:
sys.exit()
def checksumGet(data):
return hashlib.md5(data).hexdigest()
if __name__ == "__main__":
rcvr = RDTReceiver()
rcvr.receive()
| 32.04918 | 115 | 0.627621 |
import logging
import channelsimulator
import utils
import sys
import socket
import array
import hashlib
class Receiver(object):
def __init__(self, inbound_port=50005, outbound_port=50006, timeout=5, debug_level=logging.INFO):
self.logger = utils.Logger(self.__class__.__name__, debug_level)
self.inbound_port = inbound_port
self.outbound_port = outbound_port
self.simulator = channelsimulator.ChannelSimulator(inbound_port=inbound_port, outbound_port=outbound_port,
debug_level=debug_level)
self.simulator.rcvr_setup(timeout)
self.simulator.sndr_setup(timeout)
class RDTReceiver(Receiver):
expectedRDTBit = 0
def __init__(self):
super(RDTReceiver, self).__init__()
def receive(self):
try:
while 1:
receivedSegment = self.simulator.u_receive()
receivedRDTBit = receivedSegment[0:1]
checksum = receivedSegment[1:33]
data = receivedSegment[33:]
if (str(self.expectedRDTBit) == str(receivedRDTBit)) and (str(checksumGet(data)) == str(checksum)):
self.simulator.u_send(str(self.expectedRDTBit) + str(checksumGet(data)))
self.expectedRDTBit = 1 - self.expectedRDTBit
sys.stdout.write("{}".format(data))
sys.stdout.flush()
else:
negativeRDTBit = str(1 - self.expectedRDTBit)
self.simulator.u_send(negativeRDTBit + str(checksumGet(data)))
except socket.timeout:
sys.exit()
def checksumGet(data):
return hashlib.md5(data).hexdigest()
if __name__ == "__main__":
rcvr = RDTReceiver()
rcvr.receive()
| true | true |
f7396a2d5d3be53b95d8c738f9978a887a468a33 | 300 | py | Python | blog/templatetags/blog.py | mikespub-archive/wkornewald-allbuttonspressed | 57adb0de9a61b8abec80e678b6589f6a5a3131b5 | [
"BSD-3-Clause"
] | null | null | null | blog/templatetags/blog.py | mikespub-archive/wkornewald-allbuttonspressed | 57adb0de9a61b8abec80e678b6589f6a5a3131b5 | [
"BSD-3-Clause"
] | null | null | null | blog/templatetags/blog.py | mikespub-archive/wkornewald-allbuttonspressed | 57adb0de9a61b8abec80e678b6589f6a5a3131b5 | [
"BSD-3-Clause"
] | null | null | null | from ..models import Blog
from django.template import Library
register = Library()
@register.inclusion_tag('blog/feeds.html')
def blog_feeds():
blogs = Blog.objects.all()
return {'blogs': blogs}
@register.inclusion_tag('blog/feeds.html')
def blog_feed(blog):
return {'blogs': (blog,)}
| 21.428571 | 42 | 0.71 | from ..models import Blog
from django.template import Library
register = Library()
@register.inclusion_tag('blog/feeds.html')
def blog_feeds():
blogs = Blog.objects.all()
return {'blogs': blogs}
@register.inclusion_tag('blog/feeds.html')
def blog_feed(blog):
return {'blogs': (blog,)}
| true | true |
f7396a8e2c74169aab5f5de45a108e3134ec84bd | 832 | py | Python | utils/polling.py | CyberPuffer/CyberPuffer | 8e503e5c93ba57ed5242bfa9d0cf1664d857e148 | [
"BSD-2-Clause"
] | null | null | null | utils/polling.py | CyberPuffer/CyberPuffer | 8e503e5c93ba57ed5242bfa9d0cf1664d857e148 | [
"BSD-2-Clause"
] | null | null | null | utils/polling.py | CyberPuffer/CyberPuffer | 8e503e5c93ba57ed5242bfa9d0cf1664d857e148 | [
"BSD-2-Clause"
] | null | null | null | from telegram import constants
from telegram.ext import Updater
from distutils.version import LooseVersion
from telegram import __version__ as ptb_version
from utils import functions, globals, log
logger = log.get_logger(name='Bot')
def polling(args):
request_kwargs = {
'proxy_url': args.proxy} if args.proxy is not None else None
updater = Updater(token=args.api_secret, use_context=True,
request_kwargs=request_kwargs)
dispatcher = updater.dispatcher
globals.dispatcher = dispatcher
functions.load_all_funcs(dispatcher, args)
if LooseVersion(ptb_version) >= LooseVersion('13.5'):
updater.start_polling(allowed_updates=constants.UPDATE_ALL_TYPES)
else:
logger.warn('PTB version < 13.5, not all update types are listening')
updater.start_polling() | 39.619048 | 77 | 0.735577 | from telegram import constants
from telegram.ext import Updater
from distutils.version import LooseVersion
from telegram import __version__ as ptb_version
from utils import functions, globals, log
logger = log.get_logger(name='Bot')
def polling(args):
request_kwargs = {
'proxy_url': args.proxy} if args.proxy is not None else None
updater = Updater(token=args.api_secret, use_context=True,
request_kwargs=request_kwargs)
dispatcher = updater.dispatcher
globals.dispatcher = dispatcher
functions.load_all_funcs(dispatcher, args)
if LooseVersion(ptb_version) >= LooseVersion('13.5'):
updater.start_polling(allowed_updates=constants.UPDATE_ALL_TYPES)
else:
logger.warn('PTB version < 13.5, not all update types are listening')
updater.start_polling() | true | true |
f7396bf0fe3e00bb7ed8191b0c1b265e7db6a240 | 1,048 | py | Python | Programming Challenges/Maximum Pairwise Product/maximum_pairwise_product.py | Tarbo/algo-data-structure | 3f1219dac9a56e385d3db347e713d1ccae17087b | [
"MIT"
] | null | null | null | Programming Challenges/Maximum Pairwise Product/maximum_pairwise_product.py | Tarbo/algo-data-structure | 3f1219dac9a56e385d3db347e713d1ccae17087b | [
"MIT"
] | null | null | null | Programming Challenges/Maximum Pairwise Product/maximum_pairwise_product.py | Tarbo/algo-data-structure | 3f1219dac9a56e385d3db347e713d1ccae17087b | [
"MIT"
] | null | null | null | # python3
def max_pairwise_product_naive(numbers):
assert len(numbers) >= 2
assert all(0 <= x <= 2 * 10 ** 5 for x in numbers)
product = 0
for i in range(len(numbers)):
for j in range(i + 1, len(numbers)):
product = max(product, numbers[i] * numbers[j])
return product
def max_pairwise_product(numbers):
assert len(numbers) >= 2
assert all(0 <= x <= 2 * 10 ** 5 for x in numbers)
# Get the index of the maximum number.
index_i = -1
for i in range(len(numbers)):
if (numbers[index_i] < numbers[i]) | (index_i == -1):
index_i = i
# Get the index of the second max number.
index_j = -1
for j in range(len(numbers)):
if (j != index_i) and (numbers[index_j] < numbers[j]) | (index_j == -1):
index_j = j
return numbers[index_j] * numbers[index_i]
if __name__ == '__main__':
n = int(input())
input_numbers = list(map(int, input().split()))
assert len(input_numbers) == n
print(max_pairwise_product(input_numbers))
| 27.578947 | 80 | 0.596374 |
def max_pairwise_product_naive(numbers):
assert len(numbers) >= 2
assert all(0 <= x <= 2 * 10 ** 5 for x in numbers)
product = 0
for i in range(len(numbers)):
for j in range(i + 1, len(numbers)):
product = max(product, numbers[i] * numbers[j])
return product
def max_pairwise_product(numbers):
assert len(numbers) >= 2
assert all(0 <= x <= 2 * 10 ** 5 for x in numbers)
index_i = -1
for i in range(len(numbers)):
if (numbers[index_i] < numbers[i]) | (index_i == -1):
index_i = i
index_j = -1
for j in range(len(numbers)):
if (j != index_i) and (numbers[index_j] < numbers[j]) | (index_j == -1):
index_j = j
return numbers[index_j] * numbers[index_i]
if __name__ == '__main__':
n = int(input())
input_numbers = list(map(int, input().split()))
assert len(input_numbers) == n
print(max_pairwise_product(input_numbers))
| true | true |
f7396e484b0f2218dd8363f969c209091fb6e511 | 8,469 | py | Python | tests/integration/suite/test_cluster_defaults.py | arthurh4/rancher | 1e7e336de5ee546a8dd8291211af5cff8754f6f7 | [
"Apache-2.0"
] | null | null | null | tests/integration/suite/test_cluster_defaults.py | arthurh4/rancher | 1e7e336de5ee546a8dd8291211af5cff8754f6f7 | [
"Apache-2.0"
] | null | null | null | tests/integration/suite/test_cluster_defaults.py | arthurh4/rancher | 1e7e336de5ee546a8dd8291211af5cff8754f6f7 | [
"Apache-2.0"
] | null | null | null | import json
import pytest
from rancher import ApiError
from .common import random_str
from .conftest import wait_for
@pytest.mark.skip(reason="cluster-defaults disabled")
def test_generic_initial_defaults(admin_mc):
cclient = admin_mc.client
schema_defaults = {}
setting_defaults = {}
data = cclient.schema.types['cluster'].resourceFields
default = data["enableNetworkPolicy"]["default"]
for name in cclient.schema.types['cluster'].resourceFields.keys():
if name == "enableNetworkPolicy":
schema_defaults["enableNetworkPolicy"] = default
for name in cclient.schema.types['rancherKubernetesEngineConfig'] \
.resourceFields.keys():
if name == "ignoreDockerVersion":
schema_defaults["ignoreDockerVersion"] = cclient.schema. \
types["rancherKubernetesEngineConfig"]. \
resourceFields["ignoreDockerVersion"]. \
data_dict()["default"]
setting = cclient.list_setting(name="cluster-defaults")
data = json.loads(setting['data'][0]['default'])
setting_defaults["enableNetworkPolicy"] = data["enableNetworkPolicy"]
setting_defaults["ignoreDockerVersion"] = \
data["rancherKubernetesEngineConfig"]["ignoreDockerVersion"]
assert schema_defaults == setting_defaults
def test_generic_initial_conditions(admin_mc, remove_resource):
cluster = admin_mc.client.create_cluster(
name=random_str(), amazonElasticContainerServiceConfig={
"accessKey": "asdfsd"})
remove_resource(cluster)
assert len(cluster.conditions) == 3
assert cluster.conditions[0].type == 'Pending'
assert cluster.conditions[0].status == 'True'
assert cluster.conditions[1].type == 'Provisioned'
assert cluster.conditions[1].status == 'Unknown'
assert cluster.conditions[2].type == 'Waiting'
assert cluster.conditions[2].status == 'Unknown'
assert 'exportYaml' not in cluster.actions
def test_eks_cluster_immutable_subnets(admin_mc, remove_resource):
cluster = admin_mc.client.create_cluster(
name=random_str(), amazonElasticContainerServiceConfig={
"accessKey": "asdfsd",
"secretKey": "verySecretKey",
"subnets": [
"subnet-045bfaeca7d3f1cb3",
"subnet-02388a166136f98c4"
]})
remove_resource(cluster)
def cannot_modify_error():
with pytest.raises(ApiError) as e:
# try to edit cluster subnets
admin_mc.client.update_by_id_cluster(
id=cluster.id,
amazonElasticContainerServiceConfig={
"accessKey": "asdfsd",
"secretKey": "verySecretKey",
"subnets": [
"subnet-045bfaeca7d3f1cb3"
]})
if e.value.error.status == 404:
return False
print(e)
assert e.value.error.status == 422
assert e.value.error.message ==\
'cannot modify EKS subnets after creation'
return True
# lister used by cluster validator may not be up to date, may need to retry
wait_for(cannot_modify_error)
# tests updates still work
new = admin_mc.client.update_by_id_cluster(
id=cluster.id,
name=cluster.name,
description="update",
amazonElasticContainerServiceConfig={
# required field when updating KE clusters
"driverName": "amazonelasticcontainerservice",
"accessKey": "asdfsd",
"secretKey": "verySecretKey",
"subnets": [
"subnet-045bfaeca7d3f1cb3",
"subnet-02388a166136f98c4"
]})
assert new.id == cluster.id
assert not hasattr(cluster, "description")
assert hasattr(new, "description")
def test_rke_initial_conditions(admin_mc, remove_resource):
cluster = admin_mc.client.create_cluster(
name=random_str(), rancherKubernetesEngineConfig={
"accessKey": "asdfsd"})
remove_resource(cluster)
assert len(cluster.conditions) == 3
assert cluster.conditions[0].type == 'Pending'
assert cluster.conditions[0].status == 'True'
assert cluster.conditions[1].type == 'Provisioned'
assert cluster.conditions[1].status == 'Unknown'
assert cluster.conditions[2].type == 'Waiting'
assert cluster.conditions[2].status == 'Unknown'
assert 'exportYaml' in cluster.actions
def test_psp_enabled_set(admin_mc, remove_resource):
"""Asserts podSecurityPolicy field is used to populate pspEnabled in
cluster capabilities"""
admin_client = admin_mc.client
cluster = admin_client.create_cluster(
name=random_str(), rancherKubernetesEngineConfig={
"accessKey": "asdfsd",
"services": {
"kubeApi": {
"podSecurityPolicy": True,
}
}
})
remove_resource(cluster)
def psp_set_to_true():
updated_cluster = admin_client.by_id_cluster(id=cluster.id)
capabilities = updated_cluster.get("capabilities")
if capabilities is not None:
return capabilities.get("pspEnabled") is True
return None
wait_for(lambda: psp_set_to_true(), fail_handler=lambda: "failed waiting "
"for pspEnabled to be set")
def test_import_initial_conditions(admin_mc, remove_resource):
cluster = admin_mc.client.create_cluster(name=random_str())
remove_resource(cluster)
assert not cluster.conditions
def test_rke_k8s_deprecated_versions(admin_mc, remove_resource):
client = admin_mc.client
deprecated_versions_setting = client.by_id_setting(
"k8s-versions-deprecated")
client.update_by_id_setting(id=deprecated_versions_setting.id,
value="{\"v1.8.10-rancher1-1\":true}")
with pytest.raises(ApiError) as e:
cluster = client.create_cluster(
name=random_str(), rancherKubernetesEngineConfig={
"kubernetesVersion": "v1.8.10-rancher1-1"})
remove_resource(cluster)
assert e.value.error.status == 500
assert e.value.error.message == 'Requested kubernetesVersion ' \
'v1.8.10-rancher1-1 is deprecated'
client.update_by_id_setting(id=deprecated_versions_setting.id,
value="")
def test_save_as_template_action_rbac(admin_mc, remove_resource, user_factory):
cluster = admin_mc.client.create_cluster(name=random_str(),
rancherKubernetesEngineConfig={
"services": {
"type":
"rkeConfigServices",
"kubeApi": {
"alwaysPullImages":
"false",
"podSecurityPolicy":
"false",
"serviceNodePort\
Range":
"30000-32767",
"type":
"kubeAPIService"
}
}
})
remove_resource(cluster)
assert cluster.conditions[0].type == 'Pending'
assert cluster.conditions[0].status == 'True'
try:
admin_mc.client.action(obj=cluster, action_name="saveAsTemplate",
clusterTemplateName="template1",
clusterTemplateRevisionName="v1")
except ApiError as e:
assert e.error.status == 503
user = user_factory()
user_cluster = user.client.create_cluster(name=random_str())
remove_resource(user_cluster)
assert cluster.conditions[0].type == 'Pending'
assert cluster.conditions[0].status == 'True'
try:
user.client.action(obj=user_cluster, action_name="saveAsTemplate")
except AttributeError as e:
assert e is not None
| 38.671233 | 79 | 0.581651 | import json
import pytest
from rancher import ApiError
from .common import random_str
from .conftest import wait_for
@pytest.mark.skip(reason="cluster-defaults disabled")
def test_generic_initial_defaults(admin_mc):
cclient = admin_mc.client
schema_defaults = {}
setting_defaults = {}
data = cclient.schema.types['cluster'].resourceFields
default = data["enableNetworkPolicy"]["default"]
for name in cclient.schema.types['cluster'].resourceFields.keys():
if name == "enableNetworkPolicy":
schema_defaults["enableNetworkPolicy"] = default
for name in cclient.schema.types['rancherKubernetesEngineConfig'] \
.resourceFields.keys():
if name == "ignoreDockerVersion":
schema_defaults["ignoreDockerVersion"] = cclient.schema. \
types["rancherKubernetesEngineConfig"]. \
resourceFields["ignoreDockerVersion"]. \
data_dict()["default"]
setting = cclient.list_setting(name="cluster-defaults")
data = json.loads(setting['data'][0]['default'])
setting_defaults["enableNetworkPolicy"] = data["enableNetworkPolicy"]
setting_defaults["ignoreDockerVersion"] = \
data["rancherKubernetesEngineConfig"]["ignoreDockerVersion"]
assert schema_defaults == setting_defaults
def test_generic_initial_conditions(admin_mc, remove_resource):
cluster = admin_mc.client.create_cluster(
name=random_str(), amazonElasticContainerServiceConfig={
"accessKey": "asdfsd"})
remove_resource(cluster)
assert len(cluster.conditions) == 3
assert cluster.conditions[0].type == 'Pending'
assert cluster.conditions[0].status == 'True'
assert cluster.conditions[1].type == 'Provisioned'
assert cluster.conditions[1].status == 'Unknown'
assert cluster.conditions[2].type == 'Waiting'
assert cluster.conditions[2].status == 'Unknown'
assert 'exportYaml' not in cluster.actions
def test_eks_cluster_immutable_subnets(admin_mc, remove_resource):
cluster = admin_mc.client.create_cluster(
name=random_str(), amazonElasticContainerServiceConfig={
"accessKey": "asdfsd",
"secretKey": "verySecretKey",
"subnets": [
"subnet-045bfaeca7d3f1cb3",
"subnet-02388a166136f98c4"
]})
remove_resource(cluster)
def cannot_modify_error():
with pytest.raises(ApiError) as e:
admin_mc.client.update_by_id_cluster(
id=cluster.id,
amazonElasticContainerServiceConfig={
"accessKey": "asdfsd",
"secretKey": "verySecretKey",
"subnets": [
"subnet-045bfaeca7d3f1cb3"
]})
if e.value.error.status == 404:
return False
print(e)
assert e.value.error.status == 422
assert e.value.error.message ==\
'cannot modify EKS subnets after creation'
return True
wait_for(cannot_modify_error)
new = admin_mc.client.update_by_id_cluster(
id=cluster.id,
name=cluster.name,
description="update",
amazonElasticContainerServiceConfig={
"driverName": "amazonelasticcontainerservice",
"accessKey": "asdfsd",
"secretKey": "verySecretKey",
"subnets": [
"subnet-045bfaeca7d3f1cb3",
"subnet-02388a166136f98c4"
]})
assert new.id == cluster.id
assert not hasattr(cluster, "description")
assert hasattr(new, "description")
def test_rke_initial_conditions(admin_mc, remove_resource):
cluster = admin_mc.client.create_cluster(
name=random_str(), rancherKubernetesEngineConfig={
"accessKey": "asdfsd"})
remove_resource(cluster)
assert len(cluster.conditions) == 3
assert cluster.conditions[0].type == 'Pending'
assert cluster.conditions[0].status == 'True'
assert cluster.conditions[1].type == 'Provisioned'
assert cluster.conditions[1].status == 'Unknown'
assert cluster.conditions[2].type == 'Waiting'
assert cluster.conditions[2].status == 'Unknown'
assert 'exportYaml' in cluster.actions
def test_psp_enabled_set(admin_mc, remove_resource):
admin_client = admin_mc.client
cluster = admin_client.create_cluster(
name=random_str(), rancherKubernetesEngineConfig={
"accessKey": "asdfsd",
"services": {
"kubeApi": {
"podSecurityPolicy": True,
}
}
})
remove_resource(cluster)
def psp_set_to_true():
updated_cluster = admin_client.by_id_cluster(id=cluster.id)
capabilities = updated_cluster.get("capabilities")
if capabilities is not None:
return capabilities.get("pspEnabled") is True
return None
wait_for(lambda: psp_set_to_true(), fail_handler=lambda: "failed waiting "
"for pspEnabled to be set")
def test_import_initial_conditions(admin_mc, remove_resource):
cluster = admin_mc.client.create_cluster(name=random_str())
remove_resource(cluster)
assert not cluster.conditions
def test_rke_k8s_deprecated_versions(admin_mc, remove_resource):
client = admin_mc.client
deprecated_versions_setting = client.by_id_setting(
"k8s-versions-deprecated")
client.update_by_id_setting(id=deprecated_versions_setting.id,
value="{\"v1.8.10-rancher1-1\":true}")
with pytest.raises(ApiError) as e:
cluster = client.create_cluster(
name=random_str(), rancherKubernetesEngineConfig={
"kubernetesVersion": "v1.8.10-rancher1-1"})
remove_resource(cluster)
assert e.value.error.status == 500
assert e.value.error.message == 'Requested kubernetesVersion ' \
'v1.8.10-rancher1-1 is deprecated'
client.update_by_id_setting(id=deprecated_versions_setting.id,
value="")
def test_save_as_template_action_rbac(admin_mc, remove_resource, user_factory):
cluster = admin_mc.client.create_cluster(name=random_str(),
rancherKubernetesEngineConfig={
"services": {
"type":
"rkeConfigServices",
"kubeApi": {
"alwaysPullImages":
"false",
"podSecurityPolicy":
"false",
"serviceNodePort\
Range":
"30000-32767",
"type":
"kubeAPIService"
}
}
})
remove_resource(cluster)
assert cluster.conditions[0].type == 'Pending'
assert cluster.conditions[0].status == 'True'
try:
admin_mc.client.action(obj=cluster, action_name="saveAsTemplate",
clusterTemplateName="template1",
clusterTemplateRevisionName="v1")
except ApiError as e:
assert e.error.status == 503
user = user_factory()
user_cluster = user.client.create_cluster(name=random_str())
remove_resource(user_cluster)
assert cluster.conditions[0].type == 'Pending'
assert cluster.conditions[0].status == 'True'
try:
user.client.action(obj=user_cluster, action_name="saveAsTemplate")
except AttributeError as e:
assert e is not None
| true | true |
f7396f29e1bc9378dc70ababb0cc17936d600bc6 | 745 | py | Python | Test.py | kamil-mech/LamPy-Server | 09bfc2872f368151fa2e402281593a075d838153 | [
"MIT"
] | null | null | null | Test.py | kamil-mech/LamPy-Server | 09bfc2872f368151fa2e402281593a075d838153 | [
"MIT"
] | null | null | null | Test.py | kamil-mech/LamPy-Server | 09bfc2872f368151fa2e402281593a075d838153 | [
"MIT"
] | null | null | null | import time
from Adafruit_PWM_Servo_Driver import PWM
from Servo import Servo
if __name__ == '__main__':
# mg995
servo2 = Servo(channel=0, min=150, max=530, freq=100) # mg995
# sg90
# Servo pan
# servo2 = Servo(channel=2, min=250, max=380, freq=50)
# sg90 2
# servo tilt
# servo3 = Servo(channel=1, min=240, max=327, freq=50)
time.sleep(4)
servo2.move_to(0)
# # servo3.move_to(0)
# #
time.sleep(4)
servo2.move_to(1)
#
# time.sleep(0.1)
# servo2.move_to(0)
# # servo3.move_to(1)
#
# #
time.sleep(4)
# servo2.move_to(0.5)
# time.sleep(1)
# servo3.move_to(0.5)
pwm = PWM(0x40)
pwm.setPWMFreq(50)
pwm.softwareReset()
print('done')
| 19.102564 | 65 | 0.583893 | import time
from Adafruit_PWM_Servo_Driver import PWM
from Servo import Servo
if __name__ == '__main__':
servo2 = Servo(channel=0, min=150, max=530, freq=100)
time.sleep(4)
servo2.move_to(0)
(4)
servo2.move_to(1)
sleep(4)
pwm = PWM(0x40)
pwm.setPWMFreq(50)
pwm.softwareReset()
print('done')
| true | true |
f7396fa042dd6ee92327090762398516eb4196f1 | 7,106 | py | Python | plugin.video.catchuptvandmore/resources/lib/skeletons/wo_live.py | akuala/REPO.KUALA | ea9a157025530d2ce8fa0d88431c46c5352e89d4 | [
"Apache-2.0"
] | 2 | 2018-11-02T19:55:30.000Z | 2020-08-14T02:22:20.000Z | plugin.video.catchuptvandmore/resources/lib/skeletons/wo_live.py | akuala/REPO.KUALA | ea9a157025530d2ce8fa0d88431c46c5352e89d4 | [
"Apache-2.0"
] | null | null | null | plugin.video.catchuptvandmore/resources/lib/skeletons/wo_live.py | akuala/REPO.KUALA | ea9a157025530d2ce8fa0d88431c46c5352e89d4 | [
"Apache-2.0"
] | 3 | 2019-12-17T20:47:00.000Z | 2021-02-11T19:03:59.000Z | # -*- coding: utf-8 -*-
"""
Catch-up TV & More
Copyright (C) 2016 SylvainCecchetto
This file is part of Catch-up TV & More.
Catch-up TV & More is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
Catch-up TV & More is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with Catch-up TV & More; if not, write to the Free Software Foundation,
Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
# The unicode_literals import only has
# an effect on Python 2.
# It makes string literals as unicode like in Python 3
from __future__ import unicode_literals
from codequick import Script
"""
The following dictionaries describe
the addon's tree architecture.
* Key: item id
* Value: item infos
- callback: Callback function to run once this item is selected
- thumb: Item thumb path relative to "media" folder
- fanart: Item fanart path relative to "meia" folder
- module: Item module to load in order to work (like 6play.py)
"""
menu = {
'euronews': {
'callback':
'live_bridge',
'thumb':
'channels/wo/euronews.png',
'fanart':
'channels/wo/euronews_fanart.jpg',
'module':
'resources.lib.channels.wo.euronews',
'available_languages': [
'FR', 'EN', 'AR', 'DE', 'IT', 'ES', 'PT', 'RU', 'TR', 'FA', 'GR',
'HU'
],
'enabled': True,
'order': 2
},
'arte': {
'callback': 'live_bridge',
'thumb': 'channels/wo/arte.png',
'fanart': 'channels/wo/arte_fanart.jpg',
'module': 'resources.lib.channels.wo.arte',
'available_languages': ['FR', 'DE'],
'xmltv_ids': {
'fr': 'C111.api.telerama.fr'
},
'm3u_groups': {
'fr': 'France TNT'
},
'm3u_orders': {
'fr': 7
},
'enabled': True,
'order': 3
},
'france24': {
'callback': 'live_bridge',
'thumb': 'channels/wo/france24.png',
'fanart': 'channels/wo/france24_fanart.jpg',
'module': 'resources.lib.channels.wo.france24',
'available_languages': ['FR', 'EN', 'AR', 'ES'],
'enabled': True,
'order': 4
},
'nhkworld': {
'callback': 'live_bridge',
'thumb': 'channels/wo/nhkworld.png',
'fanart': 'channels/wo/nhkworld_fanart.jpg',
'module': 'resources.lib.channels.wo.nhkworld',
'available_languages': ['Outside Japan', 'In Japan'],
'enabled': True,
'order': 5
},
'tivi5monde': {
'callback': 'live_bridge',
'thumb': 'channels/wo/tivi5monde.png',
'fanart': 'channels/wo/tivi5monde_fanart.jpg',
'module': 'resources.lib.channels.wo.tivi5monde',
'enabled': True,
'order': 7
},
'bvn': {
'callback': 'live_bridge',
'thumb': 'channels/wo/bvn.png',
'fanart': 'channels/wo/bvn_fanart.jpg',
'module': 'resources.lib.channels.wo.bvn',
'enabled': True,
'order': 8
},
'icitelevision': {
'callback': 'live_bridge',
'thumb': 'channels/wo/icitelevision.png',
'fanart': 'channels/wo/icitelevision_fanart.jpg',
'module': 'resources.lib.channels.wo.icitelevision',
'enabled': True,
'order': 9
},
'arirang': {
'callback': 'live_bridge',
'thumb': 'channels/wo/arirang.png',
'fanart': 'channels/wo/arirang_fanart.jpg',
'module': 'resources.lib.channels.wo.arirang',
'enabled': True,
'order': 11
},
'dw': {
'callback': 'live_bridge',
'thumb': 'channels/wo/dw.png',
'fanart': 'channels/wo/dw_fanart.jpg',
'module': 'resources.lib.channels.wo.dw',
'available_languages': ['EN', 'AR', 'ES', 'DE'],
'enabled': True,
'order': 12
},
'qvc': {
'callback': 'live_bridge',
'thumb': 'channels/wo/qvc.png',
'fanart': 'channels/wo/qvc_fanart.jpg',
'module': 'resources.lib.channels.wo.qvc',
'available_languages': ['JP', 'DE', 'IT', 'UK', 'US'],
'enabled': True,
'order': 15
},
'icirdi': {
'callback': 'live_bridge',
'thumb': 'channels/wo/icirdi.png',
'fanart': 'channels/wo/icirdi_fanart.jpg',
'module': 'resources.lib.channels.wo.icirdi',
'enabled': True,
'order': 16
},
'cgtn': {
'callback': 'live_bridge',
'thumb': 'channels/wo/cgtn.png',
'fanart': 'channels/wo/cgtn_fanart.jpg',
'module': 'resources.lib.channels.wo.cgtn',
'available_languages': ['FR', 'EN', 'AR', 'ES', 'RU'],
'enabled': True,
'order': 17
},
'cgtndocumentary': {
'callback': 'live_bridge',
'thumb': 'channels/wo/cgtndocumentary.png',
'fanart': 'channels/wo/cgtndocumentary_fanart.jpg',
'module': 'resources.lib.channels.wo.cgtn',
'enabled': True,
'order': 18
},
'afriquemedia': {
'callback': 'live_bridge',
'thumb': 'channels/wo/afriquemedia.png',
'fanart': 'channels/wo/afriquemedia_fanart.jpg',
'module': 'resources.lib.channels.wo.afriquemedia',
'enabled': True,
'order': 20
},
'tv5mondefbs': {
'callback': 'live_bridge',
'thumb': 'channels/wo/tv5mondefbs.png',
'fanart': 'channels/wo/tv5mondefbs_fanart.jpg',
'module': 'resources.lib.channels.wo.tv5monde',
'enabled': True,
'order': 21
},
'tv5mondeinfo': {
'callback': 'live_bridge',
'thumb': 'channels/wo/tv5mondeinfo.png',
'fanart': 'channels/wo/tv5mondeinfo_fanart.jpg',
'module': 'resources.lib.channels.wo.tv5monde',
'enabled': True,
'order': 22
},
'channelnewsasia': {
'callback': 'live_bridge',
'thumb': 'channels/wo/channelnewsasia.png',
'fanart': 'channels/wo/channelnewsasia_fanart.jpg',
'module': 'resources.lib.channels.wo.channelnewsasia',
'enabled': True,
'order': 23
},
'rt': {
'callback': 'live_bridge',
'thumb': 'channels/wo/rt.png',
'fanart': 'channels/wo/rt_fanart.jpg',
'module': 'resources.lib.channels.wo.rt',
'available_languages': ['FR', 'EN', 'AR', 'ES'],
'enabled': True,
'order': 24
},
'africa24': {
'callback': 'live_bridge',
'thumb': 'channels/wo/africa24.png',
'fanart': 'channels/wo/africa24_fanart.jpg',
'module': 'resources.lib.channels.wo.africa24',
'enabled': True,
'order': 25
}
}
| 32.746544 | 78 | 0.562764 |
from __future__ import unicode_literals
from codequick import Script
menu = {
'euronews': {
'callback':
'live_bridge',
'thumb':
'channels/wo/euronews.png',
'fanart':
'channels/wo/euronews_fanart.jpg',
'module':
'resources.lib.channels.wo.euronews',
'available_languages': [
'FR', 'EN', 'AR', 'DE', 'IT', 'ES', 'PT', 'RU', 'TR', 'FA', 'GR',
'HU'
],
'enabled': True,
'order': 2
},
'arte': {
'callback': 'live_bridge',
'thumb': 'channels/wo/arte.png',
'fanart': 'channels/wo/arte_fanart.jpg',
'module': 'resources.lib.channels.wo.arte',
'available_languages': ['FR', 'DE'],
'xmltv_ids': {
'fr': 'C111.api.telerama.fr'
},
'm3u_groups': {
'fr': 'France TNT'
},
'm3u_orders': {
'fr': 7
},
'enabled': True,
'order': 3
},
'france24': {
'callback': 'live_bridge',
'thumb': 'channels/wo/france24.png',
'fanart': 'channels/wo/france24_fanart.jpg',
'module': 'resources.lib.channels.wo.france24',
'available_languages': ['FR', 'EN', 'AR', 'ES'],
'enabled': True,
'order': 4
},
'nhkworld': {
'callback': 'live_bridge',
'thumb': 'channels/wo/nhkworld.png',
'fanart': 'channels/wo/nhkworld_fanart.jpg',
'module': 'resources.lib.channels.wo.nhkworld',
'available_languages': ['Outside Japan', 'In Japan'],
'enabled': True,
'order': 5
},
'tivi5monde': {
'callback': 'live_bridge',
'thumb': 'channels/wo/tivi5monde.png',
'fanart': 'channels/wo/tivi5monde_fanart.jpg',
'module': 'resources.lib.channels.wo.tivi5monde',
'enabled': True,
'order': 7
},
'bvn': {
'callback': 'live_bridge',
'thumb': 'channels/wo/bvn.png',
'fanart': 'channels/wo/bvn_fanart.jpg',
'module': 'resources.lib.channels.wo.bvn',
'enabled': True,
'order': 8
},
'icitelevision': {
'callback': 'live_bridge',
'thumb': 'channels/wo/icitelevision.png',
'fanart': 'channels/wo/icitelevision_fanart.jpg',
'module': 'resources.lib.channels.wo.icitelevision',
'enabled': True,
'order': 9
},
'arirang': {
'callback': 'live_bridge',
'thumb': 'channels/wo/arirang.png',
'fanart': 'channels/wo/arirang_fanart.jpg',
'module': 'resources.lib.channels.wo.arirang',
'enabled': True,
'order': 11
},
'dw': {
'callback': 'live_bridge',
'thumb': 'channels/wo/dw.png',
'fanart': 'channels/wo/dw_fanart.jpg',
'module': 'resources.lib.channels.wo.dw',
'available_languages': ['EN', 'AR', 'ES', 'DE'],
'enabled': True,
'order': 12
},
'qvc': {
'callback': 'live_bridge',
'thumb': 'channels/wo/qvc.png',
'fanart': 'channels/wo/qvc_fanart.jpg',
'module': 'resources.lib.channels.wo.qvc',
'available_languages': ['JP', 'DE', 'IT', 'UK', 'US'],
'enabled': True,
'order': 15
},
'icirdi': {
'callback': 'live_bridge',
'thumb': 'channels/wo/icirdi.png',
'fanart': 'channels/wo/icirdi_fanart.jpg',
'module': 'resources.lib.channels.wo.icirdi',
'enabled': True,
'order': 16
},
'cgtn': {
'callback': 'live_bridge',
'thumb': 'channels/wo/cgtn.png',
'fanart': 'channels/wo/cgtn_fanart.jpg',
'module': 'resources.lib.channels.wo.cgtn',
'available_languages': ['FR', 'EN', 'AR', 'ES', 'RU'],
'enabled': True,
'order': 17
},
'cgtndocumentary': {
'callback': 'live_bridge',
'thumb': 'channels/wo/cgtndocumentary.png',
'fanart': 'channels/wo/cgtndocumentary_fanart.jpg',
'module': 'resources.lib.channels.wo.cgtn',
'enabled': True,
'order': 18
},
'afriquemedia': {
'callback': 'live_bridge',
'thumb': 'channels/wo/afriquemedia.png',
'fanart': 'channels/wo/afriquemedia_fanart.jpg',
'module': 'resources.lib.channels.wo.afriquemedia',
'enabled': True,
'order': 20
},
'tv5mondefbs': {
'callback': 'live_bridge',
'thumb': 'channels/wo/tv5mondefbs.png',
'fanart': 'channels/wo/tv5mondefbs_fanart.jpg',
'module': 'resources.lib.channels.wo.tv5monde',
'enabled': True,
'order': 21
},
'tv5mondeinfo': {
'callback': 'live_bridge',
'thumb': 'channels/wo/tv5mondeinfo.png',
'fanart': 'channels/wo/tv5mondeinfo_fanart.jpg',
'module': 'resources.lib.channels.wo.tv5monde',
'enabled': True,
'order': 22
},
'channelnewsasia': {
'callback': 'live_bridge',
'thumb': 'channels/wo/channelnewsasia.png',
'fanart': 'channels/wo/channelnewsasia_fanart.jpg',
'module': 'resources.lib.channels.wo.channelnewsasia',
'enabled': True,
'order': 23
},
'rt': {
'callback': 'live_bridge',
'thumb': 'channels/wo/rt.png',
'fanart': 'channels/wo/rt_fanart.jpg',
'module': 'resources.lib.channels.wo.rt',
'available_languages': ['FR', 'EN', 'AR', 'ES'],
'enabled': True,
'order': 24
},
'africa24': {
'callback': 'live_bridge',
'thumb': 'channels/wo/africa24.png',
'fanart': 'channels/wo/africa24_fanart.jpg',
'module': 'resources.lib.channels.wo.africa24',
'enabled': True,
'order': 25
}
}
| true | true |
f7396fc9d79c91054f1706d011fce6444565db44 | 6,473 | py | Python | projects/opengrok/opengrok-tools/src/test/python/test_command.py | pwr-pbrwio/PBR20M2 | 98904cb265baa7ce6a00455ea6edb8366a51c61b | [
"Apache-2.0"
] | 5 | 2018-12-13T17:46:39.000Z | 2022-03-29T02:07:47.000Z | projects/opengrok/opengrok-tools/src/test/python/test_command.py | pwr-pbrwio/PBR20M2 | 98904cb265baa7ce6a00455ea6edb8366a51c61b | [
"Apache-2.0"
] | 42 | 2019-12-08T18:41:13.000Z | 2021-08-28T13:08:55.000Z | projects/opengrok/opengrok-tools/src/test/python/test_command.py | sealuzh/lightweight-effectiveness | f6ef4c98b8f572a86e42252686995b771e655f80 | [
"MIT"
] | 8 | 2018-12-25T04:19:01.000Z | 2021-03-24T17:02:44.000Z | #!/usr/bin/env python3
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# See LICENSE.txt included in this distribution for the specific
# language governing permissions and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at LICENSE.txt.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
#
import unittest
import sys
import os
import time
sys.path.insert(0, os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..',
'main', 'python')))
from opengrok_tools.utils.command import Command
import tempfile
class TestApp(unittest.TestCase):
#def __init__(self):
# logging.basicConfig(level=logging.DEBUG)
def test_subst_append_default(self):
cmd = Command(['foo', '=ARG=', 'bar'],
args_subst={"ARG": "blah"},
args_append=["1", "2"])
self.assertEqual(['foo', '=blah=', 'bar', '1', '2'], cmd.cmd)
def test_subst_append_exclsubst(self):
"""
Exclusive substitution is on and was performed, therefore no arguments
should be appended.
"""
cmd = Command(['foo', 'ARG', 'bar'],
args_subst={"ARG": "blah"},
args_append=["1", "2"],
excl_subst=True)
self.assertEqual(['foo', 'blah', 'bar'], cmd.cmd)
def test_subst_append_exclsubst_nosubst(self):
"""
Exclusive substituation is on however no substitution was performed,
therefore arguments can be appended.
"""
cmd = Command(['foo', 'bar'],
args_subst={"ARG": "blah"},
args_append=["1", "2"],
excl_subst=True)
self.assertEqual(['foo', 'bar', '1', '2'], cmd.cmd)
def test_execute_nonexistent(self):
cmd = Command(['/baaah', '/etc/passwd'])
cmd.execute()
self.assertEqual(None, cmd.getretcode())
self.assertEqual(Command.ERRORED, cmd.getstate())
@unittest.skipUnless(os.name.startswith("posix"), "requires Unix")
def test_getoutput(self):
cmd = Command(['/bin/ls', '/etc/passwd'])
cmd.execute()
self.assertEqual(['/etc/passwd\n'], cmd.getoutput())
@unittest.skipUnless(os.name.startswith("posix"), "requires Unix")
def test_work_dir(self):
os.chdir("/")
orig_cwd = os.getcwd()
self.assertNotEqual(orig_cwd, tempfile.gettempdir())
cmd = Command(['/bin/ls', '/etc/passwd'],
work_dir=tempfile.gettempdir())
cmd.execute()
self.assertEqual(orig_cwd, os.getcwd())
@unittest.skipUnless(os.name.startswith("posix"), "requires Unix")
def test_env(self):
cmd = Command(['/usr/bin/env'],
env_vars={'FOO': 'BAR', 'A': 'B'})
cmd.execute()
self.assertTrue("FOO=BAR\n" in cmd.getoutput())
@unittest.skipUnless(os.path.exists('/bin/true') and os.path.exists('/bin/false'), "requires Unix")
def test_retcode(self):
cmd = Command(["/bin/false"])
cmd.execute()
self.assertNotEqual(0, cmd.getretcode())
self.assertEqual(Command.FINISHED, cmd.getstate())
cmd = Command(["/bin/true"])
cmd.execute()
self.assertEqual(0, cmd.getretcode())
self.assertEqual(Command.FINISHED, cmd.getstate())
@unittest.skipUnless(os.path.exists('/usr/bin/true') and os.path.exists('/usr/bin/false'), "requires Unix")
def test_retcode_usr(self):
cmd = Command(["/usr/bin/false"])
cmd.execute()
self.assertNotEqual(0, cmd.getretcode())
self.assertEqual(Command.FINISHED, cmd.getstate())
cmd = Command(["/usr/bin/true"])
cmd.execute()
self.assertEqual(0, cmd.getretcode())
self.assertEqual(Command.FINISHED, cmd.getstate())
def test_str(self):
cmd = Command(["foo", "bar"])
self.assertEqual("foo bar", str(cmd))
@unittest.skipUnless(os.name.startswith("posix"), "requires Unix")
def test_timeout(self):
timeout = 30
cmd = Command(["/bin/sleep", str(timeout)], timeout=3)
start_time = time.time()
cmd.execute()
# Check the process is no longer around.
self.assertIsNotNone(cmd.getpid())
self.assertRaises(ProcessLookupError, os.kill, cmd.getpid(), 0)
elapsed_time = time.time() - start_time
self.assertTrue(elapsed_time < timeout)
self.assertEqual(Command.TIMEDOUT, cmd.getstate())
self.assertEqual(None, cmd.getretcode())
@unittest.skipUnless(os.name.startswith("posix"), "requires Unix")
def test_notimeout(self):
cmd_timeout = 30
cmd = Command(["/bin/sleep", "3"], timeout=cmd_timeout)
cmd.execute()
self.assertEqual(Command.FINISHED, cmd.getstate())
self.assertEqual(0, cmd.getretcode())
@unittest.skipUnless(os.name.startswith("posix"), "requires Unix")
def test_stderr(self):
cmd = Command(["/bin/cat", "/foo/bar", "/etc/passwd"],
redirect_stderr=False)
cmd.execute()
self.assertEqual(Command.FINISHED, cmd.getstate())
self.assertNotEqual(0, cmd.getretcode())
# The error could contain localized output strings so check just
# for the path itself.
self.assertTrue("/foo/bar" in "\n".join(cmd.geterroutput()))
self.assertFalse("/foo/bar" in "\n".join(cmd.getoutput()))
self.assertTrue("root" in "\n".join(cmd.getoutput()))
@unittest.skipUnless(os.name.startswith("posix"), "requires Unix")
def test_resource_limits(self):
"""
Simple smoke test for setting resource limits.
"""
resource_limits = {"RLIMIT_NOFILE": 1024}
cmd = Command(['/bin/cat', '/etc/passwd'],
resource_limits=resource_limits)
cmd.set_resource_limits(resource_limits)
cmd.execute()
if __name__ == '__main__':
unittest.main()
| 36.778409 | 111 | 0.612544 |
import unittest
import sys
import os
import time
sys.path.insert(0, os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..',
'main', 'python')))
from opengrok_tools.utils.command import Command
import tempfile
class TestApp(unittest.TestCase):
def test_subst_append_default(self):
cmd = Command(['foo', '=ARG=', 'bar'],
args_subst={"ARG": "blah"},
args_append=["1", "2"])
self.assertEqual(['foo', '=blah=', 'bar', '1', '2'], cmd.cmd)
def test_subst_append_exclsubst(self):
cmd = Command(['foo', 'ARG', 'bar'],
args_subst={"ARG": "blah"},
args_append=["1", "2"],
excl_subst=True)
self.assertEqual(['foo', 'blah', 'bar'], cmd.cmd)
def test_subst_append_exclsubst_nosubst(self):
cmd = Command(['foo', 'bar'],
args_subst={"ARG": "blah"},
args_append=["1", "2"],
excl_subst=True)
self.assertEqual(['foo', 'bar', '1', '2'], cmd.cmd)
def test_execute_nonexistent(self):
cmd = Command(['/baaah', '/etc/passwd'])
cmd.execute()
self.assertEqual(None, cmd.getretcode())
self.assertEqual(Command.ERRORED, cmd.getstate())
@unittest.skipUnless(os.name.startswith("posix"), "requires Unix")
def test_getoutput(self):
cmd = Command(['/bin/ls', '/etc/passwd'])
cmd.execute()
self.assertEqual(['/etc/passwd\n'], cmd.getoutput())
@unittest.skipUnless(os.name.startswith("posix"), "requires Unix")
def test_work_dir(self):
os.chdir("/")
orig_cwd = os.getcwd()
self.assertNotEqual(orig_cwd, tempfile.gettempdir())
cmd = Command(['/bin/ls', '/etc/passwd'],
work_dir=tempfile.gettempdir())
cmd.execute()
self.assertEqual(orig_cwd, os.getcwd())
@unittest.skipUnless(os.name.startswith("posix"), "requires Unix")
def test_env(self):
cmd = Command(['/usr/bin/env'],
env_vars={'FOO': 'BAR', 'A': 'B'})
cmd.execute()
self.assertTrue("FOO=BAR\n" in cmd.getoutput())
@unittest.skipUnless(os.path.exists('/bin/true') and os.path.exists('/bin/false'), "requires Unix")
def test_retcode(self):
cmd = Command(["/bin/false"])
cmd.execute()
self.assertNotEqual(0, cmd.getretcode())
self.assertEqual(Command.FINISHED, cmd.getstate())
cmd = Command(["/bin/true"])
cmd.execute()
self.assertEqual(0, cmd.getretcode())
self.assertEqual(Command.FINISHED, cmd.getstate())
@unittest.skipUnless(os.path.exists('/usr/bin/true') and os.path.exists('/usr/bin/false'), "requires Unix")
def test_retcode_usr(self):
cmd = Command(["/usr/bin/false"])
cmd.execute()
self.assertNotEqual(0, cmd.getretcode())
self.assertEqual(Command.FINISHED, cmd.getstate())
cmd = Command(["/usr/bin/true"])
cmd.execute()
self.assertEqual(0, cmd.getretcode())
self.assertEqual(Command.FINISHED, cmd.getstate())
def test_str(self):
cmd = Command(["foo", "bar"])
self.assertEqual("foo bar", str(cmd))
@unittest.skipUnless(os.name.startswith("posix"), "requires Unix")
def test_timeout(self):
timeout = 30
cmd = Command(["/bin/sleep", str(timeout)], timeout=3)
start_time = time.time()
cmd.execute()
self.assertIsNotNone(cmd.getpid())
self.assertRaises(ProcessLookupError, os.kill, cmd.getpid(), 0)
elapsed_time = time.time() - start_time
self.assertTrue(elapsed_time < timeout)
self.assertEqual(Command.TIMEDOUT, cmd.getstate())
self.assertEqual(None, cmd.getretcode())
@unittest.skipUnless(os.name.startswith("posix"), "requires Unix")
def test_notimeout(self):
cmd_timeout = 30
cmd = Command(["/bin/sleep", "3"], timeout=cmd_timeout)
cmd.execute()
self.assertEqual(Command.FINISHED, cmd.getstate())
self.assertEqual(0, cmd.getretcode())
@unittest.skipUnless(os.name.startswith("posix"), "requires Unix")
def test_stderr(self):
cmd = Command(["/bin/cat", "/foo/bar", "/etc/passwd"],
redirect_stderr=False)
cmd.execute()
self.assertEqual(Command.FINISHED, cmd.getstate())
self.assertNotEqual(0, cmd.getretcode())
self.assertTrue("/foo/bar" in "\n".join(cmd.geterroutput()))
self.assertFalse("/foo/bar" in "\n".join(cmd.getoutput()))
self.assertTrue("root" in "\n".join(cmd.getoutput()))
@unittest.skipUnless(os.name.startswith("posix"), "requires Unix")
def test_resource_limits(self):
resource_limits = {"RLIMIT_NOFILE": 1024}
cmd = Command(['/bin/cat', '/etc/passwd'],
resource_limits=resource_limits)
cmd.set_resource_limits(resource_limits)
cmd.execute()
if __name__ == '__main__':
unittest.main()
| true | true |
f739706fc12fea041b8d25d4b64f842543b1a907 | 495 | py | Python | app/db/functions/server_defaults/uuid_.py | Max-Zhenzhera/my_vocab_backend | f93d0c7c7f4a45fce47eb7ce74cfcda195b13a72 | [
"MIT"
] | 1 | 2021-11-18T16:25:22.000Z | 2021-11-18T16:25:22.000Z | app/db/functions/server_defaults/uuid_.py | Max-Zhenzhera/my_vocab_backend | f93d0c7c7f4a45fce47eb7ce74cfcda195b13a72 | [
"MIT"
] | null | null | null | app/db/functions/server_defaults/uuid_.py | Max-Zhenzhera/my_vocab_backend | f93d0c7c7f4a45fce47eb7ce74cfcda195b13a72 | [
"MIT"
] | null | null | null | from sqlalchemy.dialects.postgresql import UUID
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.sql import expression
__all__ = ['gen_random_uuid']
class gen_random_uuid(expression.FunctionElement): # noqa Class names should use CamelCase convention
type = UUID()
@compiles(gen_random_uuid, 'postgresql')
def pg_gen_random_uuid(element, compiler, **kw):
""" postgres docs: https://www.postgresql.org/docs/14/functions-uuid.html"""
return 'gen_random_uuid()'
| 29.117647 | 107 | 0.765657 | from sqlalchemy.dialects.postgresql import UUID
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.sql import expression
__all__ = ['gen_random_uuid']
class gen_random_uuid(expression.FunctionElement):
type = UUID()
@compiles(gen_random_uuid, 'postgresql')
def pg_gen_random_uuid(element, compiler, **kw):
return 'gen_random_uuid()'
| true | true |
f73970aa3e5a592264043fb2f931cc2bca5f1423 | 3,235 | py | Python | profiles_project/settings.py | Eldor99/profiles-rest-api | 2ec2bd0e796cb31001e32254b71eba4e273a5e20 | [
"MIT"
] | null | null | null | profiles_project/settings.py | Eldor99/profiles-rest-api | 2ec2bd0e796cb31001e32254b71eba4e273a5e20 | [
"MIT"
] | null | null | null | profiles_project/settings.py | Eldor99/profiles-rest-api | 2ec2bd0e796cb31001e32254b71eba4e273a5e20 | [
"MIT"
] | null | null | null | """
Django settings for profiles_project project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'l7sye_6)^fxo*4tj^+^fx5r5axl=$=@%*!$nqlvsck!aa6&wb%'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'profiles_api',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'profiles_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'profiles_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
AUTH_USER_MODEL = 'profiles_api.UserProfile'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| 25.88 | 91 | 0.698609 |
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'l7sye_6)^fxo*4tj^+^fx5r5axl=$=@%*!$nqlvsck!aa6&wb%'
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'profiles_api',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'profiles_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'profiles_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
AUTH_USER_MODEL = 'profiles_api.UserProfile'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| true | true |
f73971afbc14a8e1cb7448175f685ff0fd11ada3 | 854 | py | Python | leetcode/easy/excel_sheet_column_title/py/solution.py | lilsweetcaligula/Online-Judges | 48454a8e6b5b86f80e89eca1b396480df8960cfd | [
"MIT"
] | null | null | null | leetcode/easy/excel_sheet_column_title/py/solution.py | lilsweetcaligula/Online-Judges | 48454a8e6b5b86f80e89eca1b396480df8960cfd | [
"MIT"
] | null | null | null | leetcode/easy/excel_sheet_column_title/py/solution.py | lilsweetcaligula/Online-Judges | 48454a8e6b5b86f80e89eca1b396480df8960cfd | [
"MIT"
] | null | null | null | class Solution(object):
def convertToTitle(self, n):
"""
:type n: int
:rtype: str
"""
LOOKUP = {
0: 'A', 1: 'B',
2: 'C', 3: 'D',
4: 'E', 5: 'F',
6: 'G', 7: 'H',
8: 'I', 9: 'J',
10: 'K', 11: 'L',
12: 'M', 13: 'N',
14: 'O', 15: 'P',
16: 'Q', 17: 'R',
18: 'S', 19: 'T',
20: 'U', 21: 'V',
22: 'W', 23: 'X',
24: 'Y', 25: 'Z',
}
chars = ''
while n > 0:
chars += LOOKUP[(n - 1) % 26]
n = (n - 1) // 26
return chars[::-1]
| 28.466667 | 41 | 0.204918 | class Solution(object):
def convertToTitle(self, n):
LOOKUP = {
0: 'A', 1: 'B',
2: 'C', 3: 'D',
4: 'E', 5: 'F',
6: 'G', 7: 'H',
8: 'I', 9: 'J',
10: 'K', 11: 'L',
12: 'M', 13: 'N',
14: 'O', 15: 'P',
16: 'Q', 17: 'R',
18: 'S', 19: 'T',
20: 'U', 21: 'V',
22: 'W', 23: 'X',
24: 'Y', 25: 'Z',
}
chars = ''
while n > 0:
chars += LOOKUP[(n - 1) % 26]
n = (n - 1) // 26
return chars[::-1]
| true | true |
f73972567fa204a891dbbd0d1f85d42ba03d310a | 32 | py | Python | factorial/loader/work/__init__.py | mgarciadelojo/factorialhr | f776694effb96ed973b49699809f835aa0b19dfa | [
"MIT"
] | 6 | 2020-09-01T09:53:05.000Z | 2021-06-29T21:31:18.000Z | factorial/loader/work/__init__.py | mgarciadelojo/factorialhr | f776694effb96ed973b49699809f835aa0b19dfa | [
"MIT"
] | 2 | 2020-11-28T02:48:12.000Z | 2020-12-12T14:13:32.000Z | factorial/loader/work/__init__.py | mgarciadelojo/factorialhr | f776694effb96ed973b49699809f835aa0b19dfa | [
"MIT"
] | 3 | 2020-06-25T05:51:04.000Z | 2021-03-23T07:53:27.000Z | from .json_work import JsonWork
| 16 | 31 | 0.84375 | from .json_work import JsonWork
| true | true |
f73974a5a549a98135ff703c3d5b57bc65680f40 | 16,761 | py | Python | mmdet/models/roi_heads/mask_heads/fcn_mask_head.py | lnmdlong/mmdetection | 87768a5d0a0188d46c50b575b417e9ec2fb5c06c | [
"Apache-2.0"
] | null | null | null | mmdet/models/roi_heads/mask_heads/fcn_mask_head.py | lnmdlong/mmdetection | 87768a5d0a0188d46c50b575b417e9ec2fb5c06c | [
"Apache-2.0"
] | null | null | null | mmdet/models/roi_heads/mask_heads/fcn_mask_head.py | lnmdlong/mmdetection | 87768a5d0a0188d46c50b575b417e9ec2fb5c06c | [
"Apache-2.0"
] | null | null | null | import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule, build_conv_layer, build_upsample_layer
from mmcv.ops.carafe import CARAFEPack
from mmcv.runner import BaseModule, ModuleList, auto_fp16, force_fp32
from torch.nn.modules.utils import _pair
from mmdet.core import mask_target
from mmdet.models.builder import HEADS, build_loss
BYTES_PER_FLOAT = 4
# TODO: This memory limit may be too much or too little. It would be better to
# determine it based on available resources.
GPU_MEM_LIMIT = 1024**3 # 1 GB memory limit
@HEADS.register_module()
class FCNMaskHead(BaseModule):
def __init__(self,
num_convs=4,
roi_feat_size=14,
in_channels=256,
conv_kernel_size=3,
conv_out_channels=256,
num_classes=80,
class_agnostic=False,
upsample_cfg=dict(type='deconv', scale_factor=2),
conv_cfg=None,
norm_cfg=None,
predictor_cfg=dict(type='Conv'),
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0),
init_cfg=None):
assert init_cfg is None, 'To prevent abnormal initialization ' \
'behavior, init_cfg is not allowed to be set'
super(FCNMaskHead, self).__init__(init_cfg)
self.upsample_cfg = upsample_cfg.copy()
if self.upsample_cfg['type'] not in [
None, 'deconv', 'nearest', 'bilinear', 'carafe'
]:
raise ValueError(
f'Invalid upsample method {self.upsample_cfg["type"]}, '
'accepted methods are "deconv", "nearest", "bilinear", '
'"carafe"')
self.num_convs = num_convs
# WARN: roi_feat_size is reserved and not used
self.roi_feat_size = _pair(roi_feat_size)
self.in_channels = in_channels
self.conv_kernel_size = conv_kernel_size
self.conv_out_channels = conv_out_channels
self.upsample_method = self.upsample_cfg.get('type')
self.scale_factor = self.upsample_cfg.pop('scale_factor', None)
self.num_classes = num_classes
self.class_agnostic = class_agnostic
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.predictor_cfg = predictor_cfg
self.fp16_enabled = False
self.loss_mask = build_loss(loss_mask)
self.convs = ModuleList()
for i in range(self.num_convs):
in_channels = (
self.in_channels if i == 0 else self.conv_out_channels)
padding = (self.conv_kernel_size - 1) // 2
self.convs.append(
ConvModule(
in_channels,
self.conv_out_channels,
self.conv_kernel_size,
padding=padding,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg))
upsample_in_channels = (
self.conv_out_channels if self.num_convs > 0 else in_channels)
upsample_cfg_ = self.upsample_cfg.copy()
if self.upsample_method is None:
self.upsample = None
elif self.upsample_method == 'deconv':
upsample_cfg_.update(
in_channels=upsample_in_channels,
out_channels=self.conv_out_channels,
kernel_size=self.scale_factor,
stride=self.scale_factor)
self.upsample = build_upsample_layer(upsample_cfg_)
elif self.upsample_method == 'carafe':
upsample_cfg_.update(
channels=upsample_in_channels, scale_factor=self.scale_factor)
self.upsample = build_upsample_layer(upsample_cfg_)
else:
# suppress warnings
align_corners = (None
if self.upsample_method == 'nearest' else False)
upsample_cfg_.update(
scale_factor=self.scale_factor,
mode=self.upsample_method,
align_corners=align_corners)
self.upsample = build_upsample_layer(upsample_cfg_)
out_channels = 1 if self.class_agnostic else self.num_classes
logits_in_channel = (
self.conv_out_channels
if self.upsample_method == 'deconv' else upsample_in_channels)
self.conv_logits = build_conv_layer(self.predictor_cfg,
logits_in_channel, out_channels, 1)
self.relu = nn.ReLU(inplace=True)
self.debug_imgs = None
def init_weights(self):
super(FCNMaskHead, self).init_weights()
for m in [self.upsample, self.conv_logits]:
if m is None:
continue
elif isinstance(m, CARAFEPack):
m.init_weights()
else:
nn.init.kaiming_normal_(
m.weight, mode='fan_out', nonlinearity='relu')
nn.init.constant_(m.bias, 0)
@auto_fp16()
def forward(self, x):
for conv in self.convs:
x = conv(x)
if self.upsample is not None:
x = self.upsample(x)
if self.upsample_method == 'deconv':
x = self.relu(x)
mask_pred = self.conv_logits(x)
return mask_pred
def get_targets(self, sampling_results, gt_masks, rcnn_train_cfg):
pos_proposals = [res.pos_bboxes for res in sampling_results]
pos_assigned_gt_inds = [
res.pos_assigned_gt_inds for res in sampling_results
]
mask_targets = mask_target(pos_proposals, pos_assigned_gt_inds,
gt_masks, rcnn_train_cfg)
return mask_targets
@force_fp32(apply_to=('mask_pred', ))
def loss(self, mask_pred, mask_targets, labels):
"""
Example:
>>> from mmdet.models.roi_heads.mask_heads.fcn_mask_head import * # NOQA
>>> N = 7 # N = number of extracted ROIs
>>> C, H, W = 11, 32, 32
>>> # Create example instance of FCN Mask Head.
>>> # There are lots of variations depending on the configuration
>>> self = FCNMaskHead(num_classes=C, num_convs=1)
>>> inputs = torch.rand(N, self.in_channels, H, W)
>>> mask_pred = self.forward(inputs)
>>> sf = self.scale_factor
>>> labels = torch.randint(0, C, size=(N,))
>>> # With the default properties the mask targets should indicate
>>> # a (potentially soft) single-class label
>>> mask_targets = torch.rand(N, H * sf, W * sf)
>>> loss = self.loss(mask_pred, mask_targets, labels)
>>> print('loss = {!r}'.format(loss))
"""
loss = dict()
if mask_pred.size(0) == 0:
loss_mask = mask_pred.sum()
else:
if self.class_agnostic:
loss_mask = self.loss_mask(mask_pred, mask_targets,
torch.zeros_like(labels))
else:
loss_mask = self.loss_mask(mask_pred, mask_targets, labels)
loss['loss_mask'] = loss_mask
return loss
def get_seg_masks(self, mask_pred, det_bboxes, det_labels, rcnn_test_cfg,
ori_shape, scale_factor, rescale):
"""Get segmentation masks from mask_pred and bboxes.
Args:
mask_pred (Tensor or ndarray): shape (n, #class, h, w).
For single-scale testing, mask_pred is the direct output of
model, whose type is Tensor, while for multi-scale testing,
it will be converted to numpy array outside of this method.
det_bboxes (Tensor): shape (n, 4/5)
det_labels (Tensor): shape (n, )
rcnn_test_cfg (dict): rcnn testing config
ori_shape (Tuple): original image height and width, shape (2,)
scale_factor(float | Tensor): If ``rescale is True``, box
coordinates are divided by this scale factor to fit
``ori_shape``.
rescale (bool): If True, the resulting masks will be rescaled to
``ori_shape``.
Returns:
list[list]: encoded masks. The c-th item in the outer list
corresponds to the c-th class. Given the c-th outer list, the
i-th item in that inner list is the mask for the i-th box with
class label c.
Example:
>>> import mmcv
>>> from mmdet.models.roi_heads.mask_heads.fcn_mask_head import * # NOQA
>>> N = 7 # N = number of extracted ROIs
>>> C, H, W = 11, 32, 32
>>> # Create example instance of FCN Mask Head.
>>> self = FCNMaskHead(num_classes=C, num_convs=0)
>>> inputs = torch.rand(N, self.in_channels, H, W)
>>> mask_pred = self.forward(inputs)
>>> # Each input is associated with some bounding box
>>> det_bboxes = torch.Tensor([[1, 1, 42, 42 ]] * N)
>>> det_labels = torch.randint(0, C, size=(N,))
>>> rcnn_test_cfg = mmcv.Config({'mask_thr_binary': 0, })
>>> ori_shape = (H * 4, W * 4)
>>> scale_factor = torch.FloatTensor((1, 1))
>>> rescale = False
>>> # Encoded masks are a list for each category.
>>> encoded_masks = self.get_seg_masks(
>>> mask_pred, det_bboxes, det_labels, rcnn_test_cfg, ori_shape,
>>> scale_factor, rescale
>>> )
>>> assert len(encoded_masks) == C
>>> assert sum(list(map(len, encoded_masks))) == N
"""
if not isinstance(mask_pred, torch.Tensor):
mask_pred = det_bboxes.new_tensor(mask_pred)
device = mask_pred.device
cls_segms = [[] for _ in range(self.num_classes)
] # BG is not included in num_classes
bboxes = det_bboxes[:, :4]
labels = det_labels
# No need to consider rescale and scale_factor while exporting to ONNX
if torch.onnx.is_in_onnx_export():
img_h, img_w = ori_shape[:2]
else:
if rescale:
img_h, img_w = ori_shape[:2]
else:
if isinstance(scale_factor, float):
img_h = np.round(ori_shape[0] * scale_factor).astype(
np.int32)
img_w = np.round(ori_shape[1] * scale_factor).astype(
np.int32)
else:
w_scale, h_scale = scale_factor[0], scale_factor[1]
img_h = np.round(ori_shape[0] * h_scale.item()).astype(
np.int32)
img_w = np.round(ori_shape[1] * w_scale.item()).astype(
np.int32)
scale_factor = 1.0
if not isinstance(scale_factor, (float, torch.Tensor)):
scale_factor = bboxes.new_tensor(scale_factor)
bboxes = bboxes / scale_factor
# support exporting to ONNX
if torch.onnx.is_in_onnx_export():
threshold = rcnn_test_cfg.mask_thr_binary
if not self.class_agnostic:
box_inds = torch.arange(mask_pred.shape[0])
mask_pred = mask_pred[box_inds, labels][:, None]
masks, _ = _do_paste_mask(
mask_pred, bboxes, img_h, img_w, skip_empty=False)
if threshold >= 0:
masks = (masks >= threshold).to(dtype=torch.bool)
else:
# TensorRT backend does not have data type of uint8
is_trt_backend = os.environ.get(
'ONNX_BACKEND') == 'MMCVTensorRT'
target_dtype = torch.int32 if is_trt_backend else torch.uint8
masks = (masks * 255).to(dtype=target_dtype)
return masks
N = len(mask_pred)
# The actual implementation split the input into chunks,
# and paste them chunk by chunk.
if device.type == 'cpu':
# CPU is most efficient when they are pasted one by one with
# skip_empty=True, so that it performs minimal number of
# operations.
num_chunks = N
else:
# GPU benefits from parallelism for larger chunks,
# but may have memory issue
num_chunks = int(
np.ceil(N * img_h * img_w * BYTES_PER_FLOAT / GPU_MEM_LIMIT))
assert (num_chunks <=
N), 'Default GPU_MEM_LIMIT is too small; try increasing it'
chunks = torch.chunk(torch.arange(N, device=device), num_chunks)
threshold = rcnn_test_cfg.mask_thr_binary
im_mask = torch.zeros(
N,
img_h,
img_w,
device=device,
dtype=torch.bool if threshold >= 0 else torch.uint8)
if not self.class_agnostic:
mask_pred = mask_pred[range(N), labels][:, None]
for inds in chunks:
masks_chunk, spatial_inds = _do_paste_mask(
mask_pred[inds],
bboxes[inds],
img_h,
img_w,
skip_empty=device.type == 'cpu')
if threshold >= 0:
masks_chunk = (masks_chunk >= threshold).to(dtype=torch.bool)
else:
# for visualization and debugging
masks_chunk = (masks_chunk * 255).to(dtype=torch.uint8)
im_mask[(inds, ) + spatial_inds] = masks_chunk
if torch.jit.is_tracing():
return im_mask.detach().int()
for i in range(N):
cls_segms[labels[i]].append(im_mask[i].detach().cpu().numpy())
return cls_segms
def _do_paste_mask(masks, boxes, img_h, img_w, skip_empty=True):
"""Paste instance masks according to boxes.
This implementation is modified from
https://github.com/facebookresearch/detectron2/
Args:
masks (Tensor): N, 1, H, W
boxes (Tensor): N, 4
img_h (int): Height of the image to be pasted.
img_w (int): Width of the image to be pasted.
skip_empty (bool): Only paste masks within the region that
tightly bound all boxes, and returns the results this region only.
An important optimization for CPU.
Returns:
tuple: (Tensor, tuple). The first item is mask tensor, the second one
is the slice object.
If skip_empty == False, the whole image will be pasted. It will
return a mask of shape (N, img_h, img_w) and an empty tuple.
If skip_empty == True, only area around the mask will be pasted.
A mask of shape (N, h', w') and its start and end coordinates
in the original image will be returned.
"""
# On GPU, paste all masks together (up to chunk size)
# by using the entire image to sample the masks
# Compared to pasting them one by one,
# this has more operations but is faster on COCO-scale dataset.
device = masks.device
if skip_empty:
x0_int, y0_int = torch.clamp(
boxes.min(dim=0).values.floor()[:2] - 1,
min=0).to(dtype=torch.int32)
x1_int = torch.clamp(
boxes[:, 2].max().ceil() + 1, max=img_w).to(dtype=torch.int32)
y1_int = torch.clamp(
boxes[:, 3].max().ceil() + 1, max=img_h).to(dtype=torch.int32)
else:
x0_int, y0_int = 0, 0
x1_int, y1_int = img_w, img_h
x0, y0, x1, y1 = torch.split(boxes, 1, dim=1) # each is Nx1
N = masks.shape[0]
img_y = torch.arange(y0_int, y1_int, device=device).to(torch.float32) + 0.5
img_x = torch.arange(x0_int, x1_int, device=device).to(torch.float32) + 0.5
img_y = (img_y - y0) / (y1 - y0) * 2 - 1
img_x = (img_x - x0) / (x1 - x0) * 2 - 1
# img_x, img_y have shapes (N, w), (N, h)
# IsInf op is not supported with ONNX<=1.7.0
if not torch.onnx.is_in_onnx_export():
if torch.isinf(img_x).any():
inds = torch.where(torch.isinf(img_x))
img_x[inds] = 0
if torch.isinf(img_y).any():
inds = torch.where(torch.isinf(img_y))
img_y[inds] = 0
gx = img_x[:, None, :].expand(N, img_y.size(1), img_x.size(1))
gy = img_y[:, :, None].expand(N, img_y.size(1), img_x.size(1))
grid = torch.stack([gx, gy], dim=3)
img_masks = F.grid_sample(
masks.to(dtype=torch.float32), grid, align_corners=False)
if skip_empty:
return img_masks[:, 0], (slice(y0_int, y1_int), slice(x0_int, x1_int))
else:
return img_masks[:, 0], ()
| 42.113065 | 85 | 0.569477 | import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule, build_conv_layer, build_upsample_layer
from mmcv.ops.carafe import CARAFEPack
from mmcv.runner import BaseModule, ModuleList, auto_fp16, force_fp32
from torch.nn.modules.utils import _pair
from mmdet.core import mask_target
from mmdet.models.builder import HEADS, build_loss
BYTES_PER_FLOAT = 4
GPU_MEM_LIMIT = 1024**3
@HEADS.register_module()
class FCNMaskHead(BaseModule):
def __init__(self,
num_convs=4,
roi_feat_size=14,
in_channels=256,
conv_kernel_size=3,
conv_out_channels=256,
num_classes=80,
class_agnostic=False,
upsample_cfg=dict(type='deconv', scale_factor=2),
conv_cfg=None,
norm_cfg=None,
predictor_cfg=dict(type='Conv'),
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0),
init_cfg=None):
assert init_cfg is None, 'To prevent abnormal initialization ' \
'behavior, init_cfg is not allowed to be set'
super(FCNMaskHead, self).__init__(init_cfg)
self.upsample_cfg = upsample_cfg.copy()
if self.upsample_cfg['type'] not in [
None, 'deconv', 'nearest', 'bilinear', 'carafe'
]:
raise ValueError(
f'Invalid upsample method {self.upsample_cfg["type"]}, '
'accepted methods are "deconv", "nearest", "bilinear", '
'"carafe"')
self.num_convs = num_convs
self.roi_feat_size = _pair(roi_feat_size)
self.in_channels = in_channels
self.conv_kernel_size = conv_kernel_size
self.conv_out_channels = conv_out_channels
self.upsample_method = self.upsample_cfg.get('type')
self.scale_factor = self.upsample_cfg.pop('scale_factor', None)
self.num_classes = num_classes
self.class_agnostic = class_agnostic
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.predictor_cfg = predictor_cfg
self.fp16_enabled = False
self.loss_mask = build_loss(loss_mask)
self.convs = ModuleList()
for i in range(self.num_convs):
in_channels = (
self.in_channels if i == 0 else self.conv_out_channels)
padding = (self.conv_kernel_size - 1) // 2
self.convs.append(
ConvModule(
in_channels,
self.conv_out_channels,
self.conv_kernel_size,
padding=padding,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg))
upsample_in_channels = (
self.conv_out_channels if self.num_convs > 0 else in_channels)
upsample_cfg_ = self.upsample_cfg.copy()
if self.upsample_method is None:
self.upsample = None
elif self.upsample_method == 'deconv':
upsample_cfg_.update(
in_channels=upsample_in_channels,
out_channels=self.conv_out_channels,
kernel_size=self.scale_factor,
stride=self.scale_factor)
self.upsample = build_upsample_layer(upsample_cfg_)
elif self.upsample_method == 'carafe':
upsample_cfg_.update(
channels=upsample_in_channels, scale_factor=self.scale_factor)
self.upsample = build_upsample_layer(upsample_cfg_)
else:
align_corners = (None
if self.upsample_method == 'nearest' else False)
upsample_cfg_.update(
scale_factor=self.scale_factor,
mode=self.upsample_method,
align_corners=align_corners)
self.upsample = build_upsample_layer(upsample_cfg_)
out_channels = 1 if self.class_agnostic else self.num_classes
logits_in_channel = (
self.conv_out_channels
if self.upsample_method == 'deconv' else upsample_in_channels)
self.conv_logits = build_conv_layer(self.predictor_cfg,
logits_in_channel, out_channels, 1)
self.relu = nn.ReLU(inplace=True)
self.debug_imgs = None
def init_weights(self):
super(FCNMaskHead, self).init_weights()
for m in [self.upsample, self.conv_logits]:
if m is None:
continue
elif isinstance(m, CARAFEPack):
m.init_weights()
else:
nn.init.kaiming_normal_(
m.weight, mode='fan_out', nonlinearity='relu')
nn.init.constant_(m.bias, 0)
@auto_fp16()
def forward(self, x):
for conv in self.convs:
x = conv(x)
if self.upsample is not None:
x = self.upsample(x)
if self.upsample_method == 'deconv':
x = self.relu(x)
mask_pred = self.conv_logits(x)
return mask_pred
def get_targets(self, sampling_results, gt_masks, rcnn_train_cfg):
pos_proposals = [res.pos_bboxes for res in sampling_results]
pos_assigned_gt_inds = [
res.pos_assigned_gt_inds for res in sampling_results
]
mask_targets = mask_target(pos_proposals, pos_assigned_gt_inds,
gt_masks, rcnn_train_cfg)
return mask_targets
@force_fp32(apply_to=('mask_pred', ))
def loss(self, mask_pred, mask_targets, labels):
loss = dict()
if mask_pred.size(0) == 0:
loss_mask = mask_pred.sum()
else:
if self.class_agnostic:
loss_mask = self.loss_mask(mask_pred, mask_targets,
torch.zeros_like(labels))
else:
loss_mask = self.loss_mask(mask_pred, mask_targets, labels)
loss['loss_mask'] = loss_mask
return loss
def get_seg_masks(self, mask_pred, det_bboxes, det_labels, rcnn_test_cfg,
ori_shape, scale_factor, rescale):
if not isinstance(mask_pred, torch.Tensor):
mask_pred = det_bboxes.new_tensor(mask_pred)
device = mask_pred.device
cls_segms = [[] for _ in range(self.num_classes)
]
bboxes = det_bboxes[:, :4]
labels = det_labels
if torch.onnx.is_in_onnx_export():
img_h, img_w = ori_shape[:2]
else:
if rescale:
img_h, img_w = ori_shape[:2]
else:
if isinstance(scale_factor, float):
img_h = np.round(ori_shape[0] * scale_factor).astype(
np.int32)
img_w = np.round(ori_shape[1] * scale_factor).astype(
np.int32)
else:
w_scale, h_scale = scale_factor[0], scale_factor[1]
img_h = np.round(ori_shape[0] * h_scale.item()).astype(
np.int32)
img_w = np.round(ori_shape[1] * w_scale.item()).astype(
np.int32)
scale_factor = 1.0
if not isinstance(scale_factor, (float, torch.Tensor)):
scale_factor = bboxes.new_tensor(scale_factor)
bboxes = bboxes / scale_factor
if torch.onnx.is_in_onnx_export():
threshold = rcnn_test_cfg.mask_thr_binary
if not self.class_agnostic:
box_inds = torch.arange(mask_pred.shape[0])
mask_pred = mask_pred[box_inds, labels][:, None]
masks, _ = _do_paste_mask(
mask_pred, bboxes, img_h, img_w, skip_empty=False)
if threshold >= 0:
masks = (masks >= threshold).to(dtype=torch.bool)
else:
is_trt_backend = os.environ.get(
'ONNX_BACKEND') == 'MMCVTensorRT'
target_dtype = torch.int32 if is_trt_backend else torch.uint8
masks = (masks * 255).to(dtype=target_dtype)
return masks
N = len(mask_pred)
if device.type == 'cpu':
num_chunks = N
else:
num_chunks = int(
np.ceil(N * img_h * img_w * BYTES_PER_FLOAT / GPU_MEM_LIMIT))
assert (num_chunks <=
N), 'Default GPU_MEM_LIMIT is too small; try increasing it'
chunks = torch.chunk(torch.arange(N, device=device), num_chunks)
threshold = rcnn_test_cfg.mask_thr_binary
im_mask = torch.zeros(
N,
img_h,
img_w,
device=device,
dtype=torch.bool if threshold >= 0 else torch.uint8)
if not self.class_agnostic:
mask_pred = mask_pred[range(N), labels][:, None]
for inds in chunks:
masks_chunk, spatial_inds = _do_paste_mask(
mask_pred[inds],
bboxes[inds],
img_h,
img_w,
skip_empty=device.type == 'cpu')
if threshold >= 0:
masks_chunk = (masks_chunk >= threshold).to(dtype=torch.bool)
else:
masks_chunk = (masks_chunk * 255).to(dtype=torch.uint8)
im_mask[(inds, ) + spatial_inds] = masks_chunk
if torch.jit.is_tracing():
return im_mask.detach().int()
for i in range(N):
cls_segms[labels[i]].append(im_mask[i].detach().cpu().numpy())
return cls_segms
def _do_paste_mask(masks, boxes, img_h, img_w, skip_empty=True):
device = masks.device
if skip_empty:
x0_int, y0_int = torch.clamp(
boxes.min(dim=0).values.floor()[:2] - 1,
min=0).to(dtype=torch.int32)
x1_int = torch.clamp(
boxes[:, 2].max().ceil() + 1, max=img_w).to(dtype=torch.int32)
y1_int = torch.clamp(
boxes[:, 3].max().ceil() + 1, max=img_h).to(dtype=torch.int32)
else:
x0_int, y0_int = 0, 0
x1_int, y1_int = img_w, img_h
x0, y0, x1, y1 = torch.split(boxes, 1, dim=1)
N = masks.shape[0]
img_y = torch.arange(y0_int, y1_int, device=device).to(torch.float32) + 0.5
img_x = torch.arange(x0_int, x1_int, device=device).to(torch.float32) + 0.5
img_y = (img_y - y0) / (y1 - y0) * 2 - 1
img_x = (img_x - x0) / (x1 - x0) * 2 - 1
if not torch.onnx.is_in_onnx_export():
if torch.isinf(img_x).any():
inds = torch.where(torch.isinf(img_x))
img_x[inds] = 0
if torch.isinf(img_y).any():
inds = torch.where(torch.isinf(img_y))
img_y[inds] = 0
gx = img_x[:, None, :].expand(N, img_y.size(1), img_x.size(1))
gy = img_y[:, :, None].expand(N, img_y.size(1), img_x.size(1))
grid = torch.stack([gx, gy], dim=3)
img_masks = F.grid_sample(
masks.to(dtype=torch.float32), grid, align_corners=False)
if skip_empty:
return img_masks[:, 0], (slice(y0_int, y1_int), slice(x0_int, x1_int))
else:
return img_masks[:, 0], ()
| true | true |
f73974b03ae39de3b8a04e0e22ae6d94b91dd2a0 | 1,376 | py | Python | miningstatistic/statistic/migrations/0002_add_verbose_names.py | crowmurk/miners | d173f1bee44d0752eefb53b1a0da847a3882a352 | [
"MIT"
] | null | null | null | miningstatistic/statistic/migrations/0002_add_verbose_names.py | crowmurk/miners | d173f1bee44d0752eefb53b1a0da847a3882a352 | [
"MIT"
] | 1 | 2018-09-16T05:35:01.000Z | 2018-09-16T05:35:01.000Z | miningstatistic/statistic/migrations/0002_add_verbose_names.py | crowmurk/miners | d173f1bee44d0752eefb53b1a0da847a3882a352 | [
"MIT"
] | null | null | null | # Generated by Django 2.1.2 on 2018-10-17 07:32
import core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('statistic', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='serverstatistic',
options={'ordering': ['-request_id', 'task', '-status'], 'verbose_name': 'Статистика сервера', 'verbose_name_plural': 'Статистика серверов'},
),
migrations.AlterField(
model_name='serverstatistic',
name='executed',
field=models.DateTimeField(verbose_name='Время выполнения'),
),
migrations.AlterField(
model_name='serverstatistic',
name='result',
field=models.TextField(validators=[core.validators.validate_json], verbose_name='Результат опроса'),
),
migrations.AlterField(
model_name='serverstatistic',
name='status',
field=models.BooleanField(verbose_name='Статус завершения'),
),
migrations.AlterField(
model_name='serverstatistic',
name='task',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='statistic', to='task.ServerTask', verbose_name='Задание'),
),
]
| 34.4 | 153 | 0.625 |
import core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('statistic', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='serverstatistic',
options={'ordering': ['-request_id', 'task', '-status'], 'verbose_name': 'Статистика сервера', 'verbose_name_plural': 'Статистика серверов'},
),
migrations.AlterField(
model_name='serverstatistic',
name='executed',
field=models.DateTimeField(verbose_name='Время выполнения'),
),
migrations.AlterField(
model_name='serverstatistic',
name='result',
field=models.TextField(validators=[core.validators.validate_json], verbose_name='Результат опроса'),
),
migrations.AlterField(
model_name='serverstatistic',
name='status',
field=models.BooleanField(verbose_name='Статус завершения'),
),
migrations.AlterField(
model_name='serverstatistic',
name='task',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='statistic', to='task.ServerTask', verbose_name='Задание'),
),
]
| true | true |
f73974fd21116cf205103e78262fe4b388883bb5 | 820 | py | Python | spark_auto_mapper_fhir/value_sets/v2_0532.py | imranq2/SparkAutoMapper.FHIR | dd23b218fb0097d1edc2f3e688e8d6d4d7278bd2 | [
"Apache-2.0"
] | 1 | 2020-10-31T23:25:07.000Z | 2020-10-31T23:25:07.000Z | spark_auto_mapper_fhir/value_sets/v2_0532.py | icanbwell/SparkAutoMapper.FHIR | 98f368e781b46523142c7cb513c670d659a93c9b | [
"Apache-2.0"
] | null | null | null | spark_auto_mapper_fhir/value_sets/v2_0532.py | icanbwell/SparkAutoMapper.FHIR | 98f368e781b46523142c7cb513c670d659a93c9b | [
"Apache-2.0"
] | null | null | null | from __future__ import annotations
from spark_auto_mapper_fhir.fhir_types.uri import FhirUri
from spark_auto_mapper_fhir.value_sets.generic_type import GenericTypeCode
from spark_auto_mapper.type_definitions.defined_types import AutoMapperTextInputType
# This file is auto-generated by generate_classes so do not edit manually
# noinspection PyPep8Naming
class V2_0532(GenericTypeCode):
"""
v2.0532
From: http://terminology.hl7.org/ValueSet/v2-0532 in v2-tables.xml
FHIR Value set/code system definition for HL7 v2 table 0532 ( Expanded yes/no
indicator)
"""
def __init__(self, value: AutoMapperTextInputType):
super().__init__(value=value)
"""
http://terminology.hl7.org/ValueSet/v2-0532
"""
codeset: FhirUri = "http://terminology.hl7.org/ValueSet/v2-0532"
| 31.538462 | 85 | 0.757317 | from __future__ import annotations
from spark_auto_mapper_fhir.fhir_types.uri import FhirUri
from spark_auto_mapper_fhir.value_sets.generic_type import GenericTypeCode
from spark_auto_mapper.type_definitions.defined_types import AutoMapperTextInputType
class V2_0532(GenericTypeCode):
def __init__(self, value: AutoMapperTextInputType):
super().__init__(value=value)
codeset: FhirUri = "http://terminology.hl7.org/ValueSet/v2-0532"
| true | true |
f739768ba3a3dcd1ee2df044fba957f6a1611d66 | 156 | py | Python | versions.py | oguznsari/sentdex-NN | b91ebf6bb9e8c0580f3e186090716c7c33eb823d | [
"MIT"
] | null | null | null | versions.py | oguznsari/sentdex-NN | b91ebf6bb9e8c0580f3e186090716c7c33eb823d | [
"MIT"
] | null | null | null | versions.py | oguznsari/sentdex-NN | b91ebf6bb9e8c0580f3e186090716c7c33eb823d | [
"MIT"
] | null | null | null | import sys
import numpy as np
import matplotlib
print("Python:", sys.version)
print("Numpy:", np.__version__)
print("Matplotlib:", matplotlib.__version__)
| 19.5 | 44 | 0.769231 | import sys
import numpy as np
import matplotlib
print("Python:", sys.version)
print("Numpy:", np.__version__)
print("Matplotlib:", matplotlib.__version__)
| true | true |
f7397778926ebf421f7d9d0ffdc39316f57579f6 | 420 | py | Python | examples/idioms/programs/037.0671-currying.py | laowantong/paroxython | 4626798a60eeaa765dbfab9e63e04030c9fcb1d0 | [
"MIT"
] | 31 | 2020-05-02T13:34:26.000Z | 2021-06-06T17:25:52.000Z | examples/idioms/programs/037.0671-currying.py | laowantong/paroxython | 4626798a60eeaa765dbfab9e63e04030c9fcb1d0 | [
"MIT"
] | 108 | 2019-11-18T19:41:52.000Z | 2022-03-18T13:58:17.000Z | examples/idioms/programs/037.0671-currying.py | laowantong/paroxython | 4626798a60eeaa765dbfab9e63e04030c9fcb1d0 | [
"MIT"
] | 4 | 2020-05-19T08:57:44.000Z | 2020-09-21T08:53:46.000Z | """Currying.
Transform a function that takes multiple arguments into a function for which some of the arguments are preset.
Source: Adrian
"""
# Implementation author: cym13
# Created on 2015-11-30T12:37:28.255934Z
# Last modified on 2019-09-26T16:59:21.413984Z
# Version 3
from functools import partial
def f(a):
def add(b):
return a + b
return add
print(f(2)(1))
# add_to_two = partial(f, 2)
| 16.153846 | 110 | 0.7 |
from functools import partial
def f(a):
def add(b):
return a + b
return add
print(f(2)(1))
| true | true |
f7397796c5f3f08bb50d94301d9225dd5ece1f1b | 1,114 | py | Python | spikeinterface/toolkit/tests/test_utils.py | marcbue/spikeinterface | d3462eeabcb9f0b9816004dd47355e40f4de1ac5 | [
"MIT"
] | null | null | null | spikeinterface/toolkit/tests/test_utils.py | marcbue/spikeinterface | d3462eeabcb9f0b9816004dd47355e40f4de1ac5 | [
"MIT"
] | null | null | null | spikeinterface/toolkit/tests/test_utils.py | marcbue/spikeinterface | d3462eeabcb9f0b9816004dd47355e40f4de1ac5 | [
"MIT"
] | null | null | null | import unittest
import pytest
import numpy as np
from spikeinterface.core.tests.testing_tools import generate_recording
from spikeinterface.toolkit.utils import (get_random_data_chunks,
get_closest_channels, get_noise_levels)
def test_get_random_data_chunks():
rec = generate_recording(num_channels=1, sampling_frequency=1000., durations=[10., 20.])
chunks = get_random_data_chunks(rec, num_chunks_per_segment=50, chunk_size=500, seed=0)
assert chunks.shape == (50000, 1)
def test_get_closest_channels():
rec = generate_recording(num_channels=32, sampling_frequency=1000., durations=[0.1])
closest_channels_inds, distances = get_closest_channels(rec)
closest_channels_inds, distances = get_closest_channels(rec, num_channels=4)
def test_get_noise_levels():
rec = generate_recording(num_channels=2, sampling_frequency=1000., durations=[60.])
noise_levels = get_noise_levels(rec)
print(noise_levels)
if __name__ == '__main__':
test_get_random_data_chunks()
# test_get_closest_channels()
# test_get_noise_levels()
| 30.944444 | 92 | 0.753142 | import unittest
import pytest
import numpy as np
from spikeinterface.core.tests.testing_tools import generate_recording
from spikeinterface.toolkit.utils import (get_random_data_chunks,
get_closest_channels, get_noise_levels)
def test_get_random_data_chunks():
rec = generate_recording(num_channels=1, sampling_frequency=1000., durations=[10., 20.])
chunks = get_random_data_chunks(rec, num_chunks_per_segment=50, chunk_size=500, seed=0)
assert chunks.shape == (50000, 1)
def test_get_closest_channels():
rec = generate_recording(num_channels=32, sampling_frequency=1000., durations=[0.1])
closest_channels_inds, distances = get_closest_channels(rec)
closest_channels_inds, distances = get_closest_channels(rec, num_channels=4)
def test_get_noise_levels():
rec = generate_recording(num_channels=2, sampling_frequency=1000., durations=[60.])
noise_levels = get_noise_levels(rec)
print(noise_levels)
if __name__ == '__main__':
test_get_random_data_chunks()
| true | true |
f73979105d96c017dcdbf9d64d2b3bc00b8ea5ad | 3,764 | py | Python | GetStarted/03_finding_images.py | pberezina/earthengine-py-notebooks | 4cbe3c52bcc9ed3f1337bf097aa5799442991a5e | [
"MIT"
] | 1 | 2020-03-20T19:39:34.000Z | 2020-03-20T19:39:34.000Z | GetStarted/03_finding_images.py | pberezina/earthengine-py-notebooks | 4cbe3c52bcc9ed3f1337bf097aa5799442991a5e | [
"MIT"
] | null | null | null | GetStarted/03_finding_images.py | pberezina/earthengine-py-notebooks | 4cbe3c52bcc9ed3f1337bf097aa5799442991a5e | [
"MIT"
] | null | null | null | '''
<table class="ee-notebook-buttons" align="left">
<td><a target="_blank" href="https://github.com/giswqs/earthengine-py-notebooks/tree/master/GetStarted/03_finding_images.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td>
<td><a target="_blank" href="https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/GetStarted/03_finding_images.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td>
<td><a target="_blank" href="https://mybinder.org/v2/gh/giswqs/earthengine-py-notebooks/master?filepath=GetStarted/03_finding_images.ipynb"><img width=58px src="https://mybinder.org/static/images/logo_social.png" />Run in binder</a></td>
<td><a target="_blank" href="https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/GetStarted/03_finding_images.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td>
</table>
'''
# %%
'''
## Install Earth Engine API
Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geehydro](https://github.com/giswqs/geehydro). The **geehydro** Python package builds on the [folium](https://github.com/python-visualization/folium) package and implements several methods for displaying Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, `Map.centerObject()`, and `Map.setOptions()`.
The following script checks if the geehydro package has been installed. If not, it will install geehydro, which automatically install its dependencies, including earthengine-api and folium.
'''
# %%
import subprocess
try:
import geehydro
except ImportError:
print('geehydro package not installed. Installing ...')
subprocess.check_call(["python", '-m', 'pip', 'install', 'geehydro'])
# %%
'''
Import libraries
'''
# %%
import ee
import folium
import geehydro
# %%
'''
Authenticate and initialize Earth Engine API. You only need to authenticate the Earth Engine API once.
'''
# %%
try:
ee.Initialize()
except Exception as e:
ee.Authenticate()
ee.Initialize()
# %%
'''
## Create an interactive map
This step creates an interactive map using [folium](https://github.com/python-visualization/folium). The default basemap is the OpenStreetMap. Additional basemaps can be added using the `Map.setOptions()` function.
The optional basemaps can be `ROADMAP`, `SATELLITE`, `HYBRID`, `TERRAIN`, or `ESRI`.
'''
# %%
Map = folium.Map(location=[40, -100], zoom_start=4)
Map.setOptions('HYBRID')
# %%
'''
## Add Earth Engine Python script
'''
# %%
collection = ee.ImageCollection('LANDSAT/LC08/C01/T1')
point = ee.Geometry.Point(-122.262, 37.8719)
start = ee.Date('2014-06-01')
finish = ee.Date('2014-10-01')
filteredCollection = ee.ImageCollection('LANDSAT/LC08/C01/T1') \
.filterBounds(point) \
.filterDate(start, finish) \
.sort('CLOUD_COVER', True)
first = filteredCollection.first()
# Define visualization parameters in an object literal.
vizParams = {'bands': ['B5', 'B4', 'B3'],
'min': 5000, 'max': 15000, 'gamma': 1.3}
Map.addLayer(first, vizParams, 'Landsat 8 image')
# Load a feature collection.
featureCollection = ee.FeatureCollection('TIGER/2016/States')
# Filter the collection.
filteredFC = featureCollection.filter(ee.Filter.eq('NAME', 'California'))
# Display the collection.
Map.addLayer(ee.Image().paint(filteredFC, 0, 2),
{'palette': 'red'}, 'California')
# %%
'''
## Display Earth Engine data layers
'''
# %%
Map.setControlVisibility(layerControl=True, fullscreenControl=True, latLngPopup=True)
Map | 35.509434 | 422 | 0.720244 |
import subprocess
try:
import geehydro
except ImportError:
print('geehydro package not installed. Installing ...')
subprocess.check_call(["python", '-m', 'pip', 'install', 'geehydro'])
import ee
import folium
import geehydro
try:
ee.Initialize()
except Exception as e:
ee.Authenticate()
ee.Initialize()
Map = folium.Map(location=[40, -100], zoom_start=4)
Map.setOptions('HYBRID')
collection = ee.ImageCollection('LANDSAT/LC08/C01/T1')
point = ee.Geometry.Point(-122.262, 37.8719)
start = ee.Date('2014-06-01')
finish = ee.Date('2014-10-01')
filteredCollection = ee.ImageCollection('LANDSAT/LC08/C01/T1') \
.filterBounds(point) \
.filterDate(start, finish) \
.sort('CLOUD_COVER', True)
first = filteredCollection.first()
vizParams = {'bands': ['B5', 'B4', 'B3'],
'min': 5000, 'max': 15000, 'gamma': 1.3}
Map.addLayer(first, vizParams, 'Landsat 8 image')
featureCollection = ee.FeatureCollection('TIGER/2016/States')
filteredFC = featureCollection.filter(ee.Filter.eq('NAME', 'California'))
Map.addLayer(ee.Image().paint(filteredFC, 0, 2),
{'palette': 'red'}, 'California')
Map.setControlVisibility(layerControl=True, fullscreenControl=True, latLngPopup=True)
Map | true | true |
f7397ac015f87f3ed5275efc9f2b5140d07fad8b | 299 | py | Python | tree_registry_crawler/pipelines.py | g0vhk-io/tree-registry-crawler | 95f959615c00a486ebd95d896d74907af96f0579 | [
"Apache-2.0"
] | null | null | null | tree_registry_crawler/pipelines.py | g0vhk-io/tree-registry-crawler | 95f959615c00a486ebd95d896d74907af96f0579 | [
"Apache-2.0"
] | null | null | null | tree_registry_crawler/pipelines.py | g0vhk-io/tree-registry-crawler | 95f959615c00a486ebd95d896d74907af96f0579 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
class TreeRegistryCrawlerPipeline(object):
def process_item(self, item, spider):
return item
| 24.916667 | 65 | 0.722408 |
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
class TreeRegistryCrawlerPipeline(object):
def process_item(self, item, spider):
return item
| true | true |
f7397ac8f6550cb6035d1be0d3d3974eda410e08 | 1,191 | py | Python | user/utils/validators.py | shoorday/IC | dfd6509d2e850ffd02dc0ddd31366e5e7f51eab8 | [
"MIT"
] | 2 | 2020-05-13T03:53:11.000Z | 2020-05-27T11:19:18.000Z | user/utils/validators.py | ShoorDay/IC | dfd6509d2e850ffd02dc0ddd31366e5e7f51eab8 | [
"MIT"
] | 4 | 2021-03-01T21:13:48.000Z | 2021-06-28T19:57:00.000Z | user/utils/validators.py | shoorday/IC | dfd6509d2e850ffd02dc0ddd31366e5e7f51eab8 | [
"MIT"
] | null | null | null | from rest_framework.serializers import ValidationError
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.auth import get_user_model, authenticate
User = get_user_model()
def pwdExist(data):
try:
new_password = str(data['new_password'])
return data
except KeyError:
raise ValidationError('new_password为必填字段')
def checkOldPwd(data):
try:
new_password = str(data['new_password'])
old_password = str(data['old_password'])
username = str(data['username'])
except KeyError:
raise ValidationError('字段缺失')
user = authenticate(username=username, password=old_password)
if user is not None:
if old_password != new_password:
return data
raise ValidationError('新旧密码相同')
raise ValidationError('用户不存在')
def CheckUsername(data):
username = data.get('username', None)
if username is not None:
illegal_name = ['admin', 'sure', 'sure.z',
'sure.zh', 'django', 'shoor', 'shoorday']
if username.lower() in illegal_name:
raise ValidationError('用户名已被使用')
return data
| 29.775 | 66 | 0.638119 | from rest_framework.serializers import ValidationError
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.auth import get_user_model, authenticate
User = get_user_model()
def pwdExist(data):
try:
new_password = str(data['new_password'])
return data
except KeyError:
raise ValidationError('new_password为必填字段')
def checkOldPwd(data):
try:
new_password = str(data['new_password'])
old_password = str(data['old_password'])
username = str(data['username'])
except KeyError:
raise ValidationError('字段缺失')
user = authenticate(username=username, password=old_password)
if user is not None:
if old_password != new_password:
return data
raise ValidationError('新旧密码相同')
raise ValidationError('用户不存在')
def CheckUsername(data):
username = data.get('username', None)
if username is not None:
illegal_name = ['admin', 'sure', 'sure.z',
'sure.zh', 'django', 'shoor', 'shoorday']
if username.lower() in illegal_name:
raise ValidationError('用户名已被使用')
return data
| true | true |
f7397b8effcc0c80aa4665a9fc237a3ced823fa4 | 1,323 | py | Python | facein_api/profiles/migrations/0002_fix_user_model.py | gda2048/facein-backend | 01a8500ffcdaef91540b0cdf94d50dd64cb46276 | [
"MIT"
] | 1 | 2021-12-27T12:35:06.000Z | 2021-12-27T12:35:06.000Z | facein_api/profiles/migrations/0002_fix_user_model.py | Sirkirill/facetracker-backend | 462dff11e7dccc379a6e04250b121120ec954c8d | [
"MIT"
] | 10 | 2021-06-07T16:41:35.000Z | 2022-03-12T01:04:23.000Z | facein_api/profiles/migrations/0002_fix_user_model.py | gda2048/facein-backend | 01a8500ffcdaef91540b0cdf94d50dd64cb46276 | [
"MIT"
] | null | null | null | # Generated by Django 2.2.12 on 2020-05-24 21:17
from django.db import migrations
from django.db import models
class Migration(migrations.Migration):
dependencies = [
('profiles', '0001_add_custom_user'),
]
operations = [
migrations.AlterField(
model_name='user',
name='is_admin',
field=models.BooleanField(default=False,
help_text='Designates whether this user should be treated as admin of the customer company. ',
verbose_name='admin status'),
),
migrations.AlterField(
model_name='user',
name='is_blacklisted',
field=models.BooleanField(default=False,
help_text='Designates whether this user should be treated as active. ',
verbose_name='black list status'),
),
migrations.AlterField(
model_name='user',
name='is_security',
field=models.BooleanField(default=False,
help_text='Designates whether this user should be treated as security guard of the customer company. ',
verbose_name='security status'),
),
]
| 37.8 | 141 | 0.540438 |
from django.db import migrations
from django.db import models
class Migration(migrations.Migration):
dependencies = [
('profiles', '0001_add_custom_user'),
]
operations = [
migrations.AlterField(
model_name='user',
name='is_admin',
field=models.BooleanField(default=False,
help_text='Designates whether this user should be treated as admin of the customer company. ',
verbose_name='admin status'),
),
migrations.AlterField(
model_name='user',
name='is_blacklisted',
field=models.BooleanField(default=False,
help_text='Designates whether this user should be treated as active. ',
verbose_name='black list status'),
),
migrations.AlterField(
model_name='user',
name='is_security',
field=models.BooleanField(default=False,
help_text='Designates whether this user should be treated as security guard of the customer company. ',
verbose_name='security status'),
),
]
| true | true |
f7397cc615020ec3d682feaf0352f879ca7afd63 | 51,090 | py | Python | tests/t5/test_modeling_tf_t5.py | symphonylyh/transformers | 03e5d5196ca76008b60da9bb6d604e6bdbcba0db | [
"Apache-2.0"
] | 1 | 2022-03-16T13:02:15.000Z | 2022-03-16T13:02:15.000Z | tests/t5/test_modeling_tf_t5.py | symphonylyh/transformers | 03e5d5196ca76008b60da9bb6d604e6bdbcba0db | [
"Apache-2.0"
] | null | null | null | tests/t5/test_modeling_tf_t5.py | symphonylyh/transformers | 03e5d5196ca76008b60da9bb6d604e6bdbcba0db | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2018 Google T5 Authors and HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import T5Config, is_tf_available
from transformers.file_utils import cached_property
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ..test_configuration_common import ConfigTester
from ..test_modeling_tf_common import TFModelTesterMixin, ids_tensor
if is_tf_available():
import tensorflow as tf
from transformers import ByT5Tokenizer, T5Tokenizer, TFT5EncoderModel, TFT5ForConditionalGeneration, TFT5Model
class TFT5ModelTester:
def __init__(
self,
parent,
):
self.parent = parent
self.batch_size = 13
self.seq_length = 7
self.is_training = True
self.use_input_mask = True
self.use_labels = True
self.vocab_size = 99
self.n_positions = 14
self.hidden_size = 32
self.num_hidden_layers = 5
self.num_attention_heads = 4
self.d_ff = 37
self.relative_attention_num_buckets = 8
self.dropout_rate = 0.1
self.initializer_factor = 0.002
self.eos_token_id = 1
self.pad_token_id = 0
self.scope = None
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
token_labels = None
if self.use_labels:
token_labels = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
config = T5Config(
vocab_size=self.vocab_size,
n_positions=self.n_positions,
d_model=self.hidden_size,
d_ff=self.d_ff,
d_kv=self.hidden_size // self.num_attention_heads,
num_layers=self.num_hidden_layers,
num_heads=self.num_attention_heads,
relative_attention_num_buckets=self.relative_attention_num_buckets,
dropout_rate=self.dropout_rate,
initializer_factor=self.initializer_factor,
eos_token_id=self.eos_token_id,
bos_token_id=self.pad_token_id,
pad_token_id=self.pad_token_id,
decoder_start_token_id=self.pad_token_id,
)
return (config, input_ids, input_mask, token_labels)
def create_and_check_t5_model(self, config, input_ids, input_mask, token_labels):
model = TFT5Model(config=config)
inputs = {
"input_ids": input_ids,
"decoder_input_ids": input_ids,
"decoder_attention_mask": input_mask,
}
result = model(inputs)
result = model(input_ids, decoder_attention_mask=input_mask, decoder_input_ids=input_ids)
decoder_output = result.last_hidden_state
decoder_past = result.past_key_values
encoder_output = result.encoder_last_hidden_state
self.parent.assertListEqual(list(encoder_output.shape), [self.batch_size, self.seq_length, self.hidden_size])
self.parent.assertListEqual(list(decoder_output.shape), [self.batch_size, self.seq_length, self.hidden_size])
# There should be `num_layers` key value embeddings stored in decoder_past[1]
self.parent.assertEqual(len(decoder_past), config.num_layers)
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past[1] tuple
self.parent.assertEqual(len(decoder_past[0]), 4)
def create_and_check_t5_with_lm_head(self, config, input_ids, input_mask, token_labels):
model = TFT5ForConditionalGeneration(config=config)
inputs_dict = {
"input_ids": input_ids,
"decoder_input_ids": input_ids,
"decoder_attention_mask": input_mask,
}
result = model(inputs_dict)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_t5_decoder_model_past(self, config, input_ids, decoder_input_ids, attention_mask):
model = TFT5Model(config=config).get_decoder()
input_ids = input_ids[:1, :]
self.batch_size = 1
# first forward pass
outputs = model(input_ids, use_cache=True)
outputs_use_cache_conf = model(input_ids)
outputs_no_past = model(input_ids, use_cache=False)
self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf))
self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1)
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
# append to next input_ids and
next_input_ids = tf.concat([input_ids, next_tokens], axis=-1)
output_from_no_past = model(next_input_ids)[0]
output_from_past = model(next_tokens, past_key_values=outputs.past_key_values)[0]
# select random slice
random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1]))
output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx]
output_from_past_slice = output_from_past[:, 0, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-3)
def create_and_check_t5_decoder_model_attention_mask_past(
self, config, input_ids, decoder_input_ids, attention_mask
):
model = TFT5Model(config=config).get_decoder()
# create attention mask
half_seq_length = self.seq_length // 2
attn_mask_begin = tf.ones((self.batch_size, half_seq_length), dtype=tf.int32)
attn_mask_end = tf.zeros((self.batch_size, self.seq_length - half_seq_length), dtype=tf.int32)
attn_mask = tf.concat([attn_mask_begin, attn_mask_end], axis=1)
# first forward pass
outputs = model(input_ids, attention_mask=attn_mask, use_cache=True)
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
# change a random masked slice from input_ids
random_seq_idx_to_change = ids_tensor((1,), half_seq_length).numpy() + 1
random_other_next_tokens = ids_tensor((self.batch_size, self.seq_length), config.vocab_size)
vector_condition = tf.range(self.seq_length) == (self.seq_length - random_seq_idx_to_change)
condition = tf.transpose(
tf.broadcast_to(tf.expand_dims(vector_condition, -1), (self.seq_length, self.batch_size))
)
input_ids = tf.where(condition, random_other_next_tokens, input_ids)
# append to next input_ids and attn_mask
next_input_ids = tf.concat([input_ids, next_tokens], axis=-1)
attn_mask = tf.concat(
[attn_mask, tf.ones((attn_mask.shape[0], 1), dtype=tf.int32)],
axis=1,
)
# get two different outputs
output_from_no_past = model(next_input_ids, attention_mask=attn_mask)[0]
output_from_past = model(next_tokens, past_key_values=outputs.past_key_values, attention_mask=attn_mask)[0]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).numpy().item()
output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx]
output_from_past_slice = output_from_past[:, 0, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-3)
def create_and_check_t5_decoder_model_past_large_inputs(
self, config, input_ids, decoder_input_ids, attention_mask
):
model = TFT5Model(config=config).get_decoder()
input_ids = input_ids[:1, :]
attention_mask = attention_mask[:1, :]
self.batch_size = 1
# first forward pass
outputs = model(input_ids, attention_mask=attention_mask, use_cache=True)
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
next_attn_mask = ids_tensor((self.batch_size, 3), 2)
# append to next input_ids and
next_input_ids = tf.concat([input_ids, next_tokens], axis=-1)
next_attention_mask = tf.concat([attention_mask, next_attn_mask], axis=-1)
output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)[0]
output_from_past = model(
next_tokens, attention_mask=next_attention_mask, past_key_values=outputs.past_key_values
)[0]
self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1])
# select random slice
random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1]))
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx]
output_from_past_slice = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-3)
def create_and_check_t5_xla_generate(self, config, input_ids, *args):
config.eos_token_id = None
config.max_length = 10
config.do_sample = False
config.num_beams = 1
model = TFT5ForConditionalGeneration(config=config)
# make sure there are no pad tokens in prompt
input_ids = tf.where(input_ids != config.pad_token_id, input_ids, config.pad_token_id + 5)
generated = model.generate(input_ids)
generate_xla = tf.function(model.generate, jit_compile=True)
generated_xla = generate_xla(input_ids)
self.parent.assertListEqual(generated.numpy().tolist(), generated_xla.numpy().tolist())
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(config, input_ids, input_mask, token_labels) = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"decoder_input_ids": input_ids,
"decoder_attention_mask": input_mask,
}
return config, inputs_dict
@require_tf
class TFT5ModelTest(TFModelTesterMixin, unittest.TestCase):
is_encoder_decoder = True
all_model_classes = (TFT5Model, TFT5ForConditionalGeneration) if is_tf_available() else ()
all_generative_model_classes = (TFT5ForConditionalGeneration,) if is_tf_available() else ()
test_onnx = False
def setUp(self):
self.model_tester = TFT5ModelTester(self)
self.config_tester = ConfigTester(self, config_class=T5Config, d_model=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_t5_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_t5_model(*config_and_inputs)
def test_t5_model_v1_1(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
config = config_and_inputs[0]
config.tie_word_embeddings = False
config.feed_forward_proj = "gated-gelu"
self.model_tester.create_and_check_t5_model(config, *config_and_inputs[1:])
def test_with_lm_head(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_t5_with_lm_head(*config_and_inputs)
def test_t5_decoder_model_past(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_t5_decoder_model_past(*config_and_inputs)
def test_t5_decoder_model_past_with_attn_mask(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_t5_decoder_model_attention_mask_past(*config_and_inputs)
def test_t5_decoder_model_past_large_inputs(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_t5_decoder_model_past_large_inputs(*config_and_inputs)
def test_t5_model_xla_generate(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_t5_xla_generate(*config_and_inputs)
def test_model_common_attributes(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
assert isinstance(model.get_input_embeddings(), tf.keras.layers.Layer)
if model_class in self.all_generative_model_classes:
x = model.get_output_embeddings()
assert isinstance(x, tf.keras.layers.Layer)
name = model.get_bias()
assert name is None
else:
x = model.get_output_embeddings()
assert x is None
name = model.get_bias()
assert name is None
def test_saved_model_creation(self):
# This test is too long (>30sec) and makes fail the CI
pass
@slow
def test_model_from_pretrained(self):
model = TFT5Model.from_pretrained("t5-small")
self.assertIsNotNone(model)
def test_generate_with_headmasking(self):
# TODO: Fix head-masking according to PyTorch T5 model
pass
@slow
def test_resize_embeddings(self):
model = TFT5ForConditionalGeneration.from_pretrained("t5-small")
original_vocab_size = model.get_input_embeddings().weight.shape[0]
# the vocab size is defined in the model config
self.assertEqual(original_vocab_size, model.config.vocab_size)
tokenizer = T5Tokenizer.from_pretrained("t5-small")
tokenizer.add_special_tokens({"bos_token": "", "eos_token": ""})
model._resize_token_embeddings(len(tokenizer))
# the vocab size is now resized to the length of the tokenizer, which is different from the original size
self.assertEqual(model.get_input_embeddings().weight.shape[0], len(tokenizer))
self.assertNotEqual(model.get_input_embeddings().weight.shape[0], original_vocab_size)
class TFT5EncoderOnlyModelTester:
def __init__(
self,
parent,
vocab_size=99,
batch_size=13,
encoder_seq_length=7,
# For common tests
use_attention_mask=True,
hidden_size=32,
num_hidden_layers=5,
num_attention_heads=4,
d_ff=37,
relative_attention_num_buckets=8,
is_training=False,
dropout_rate=0.1,
initializer_factor=0.002,
is_encoder_decoder=False,
eos_token_id=1,
pad_token_id=0,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.encoder_seq_length = encoder_seq_length
# For common tests
self.seq_length = self.encoder_seq_length
self.use_attention_mask = use_attention_mask
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.d_ff = d_ff
self.relative_attention_num_buckets = relative_attention_num_buckets
self.dropout_rate = dropout_rate
self.initializer_factor = initializer_factor
self.eos_token_id = eos_token_id
self.pad_token_id = pad_token_id
self.is_encoder_decoder = is_encoder_decoder
self.scope = None
self.is_training = is_training
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size)
attention_mask = None
if self.use_attention_mask:
attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2)
config = T5Config(
vocab_size=self.vocab_size,
d_model=self.hidden_size,
d_ff=self.d_ff,
d_kv=self.hidden_size // self.num_attention_heads,
num_layers=self.num_hidden_layers,
num_heads=self.num_attention_heads,
relative_attention_num_buckets=self.relative_attention_num_buckets,
dropout_rate=self.dropout_rate,
initializer_factor=self.initializer_factor,
eos_token_id=self.eos_token_id,
bos_token_id=self.pad_token_id,
pad_token_id=self.pad_token_id,
is_encoder_decoder=self.is_encoder_decoder,
)
return (
config,
input_ids,
attention_mask,
)
def create_and_check_model(
self,
config,
input_ids,
attention_mask,
):
model = TFT5EncoderModel(config=config)
result = model(
input_ids=input_ids,
attention_mask=attention_mask,
)
result = model(input_ids=input_ids)
encoder_output = result.last_hidden_state
self.parent.assertEqual(encoder_output.shape, (self.batch_size, self.encoder_seq_length, self.hidden_size))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
attention_mask,
) = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
class TFT5EncoderOnlyModelTest(TFModelTesterMixin, unittest.TestCase):
is_encoder_decoder = False
all_model_classes = (TFT5EncoderModel,) if is_tf_available() else ()
test_onnx = False
def setUp(self):
self.model_tester = TFT5EncoderOnlyModelTester(self)
self.config_tester = ConfigTester(self, config_class=T5Config, d_model=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
# is not able to be part of a pipeline
def test_train_pipeline_custom_model(self):
pass
@require_tf
@require_sentencepiece
@require_tokenizers
class TFT5GenerationIntegrationTests(unittest.TestCase):
@slow
def test_greedy_xla_generate_simple(self):
model = TFT5ForConditionalGeneration.from_pretrained("t5-small")
tokenizer = T5Tokenizer.from_pretrained("t5-small")
sentence = "Translate English to German: Today is a beautiful day."
input_ids = tokenizer(sentence, return_tensors="tf", padding=True).input_ids
xla_generate = tf.function(model.generate, jit_compile=True)
output_ids = model.generate(input_ids)
output_ids_xla = xla_generate(input_ids)
output_strings = tokenizer.batch_decode(output_ids, skip_special_tokens=True)
output_strings_xla = tokenizer.batch_decode(output_ids_xla, skip_special_tokens=True)
expected_output_string = ["Heute ist ein schöner Tag."]
self.assertListEqual(expected_output_string, output_strings)
self.assertListEqual(expected_output_string, output_strings_xla)
@slow
def test_greedy_generate(self):
model = TFT5ForConditionalGeneration.from_pretrained("t5-small")
tokenizer = T5Tokenizer.from_pretrained("t5-small")
sentences = ["Yesterday, my name was", "Today is a beautiful day and"]
input_ids = tokenizer(sentences, return_tensors="tf", padding=True).input_ids
generation_kwargs = {
"bad_words_ids": [tokenizer("my").input_ids, tokenizer("ein schöner").input_ids],
"no_repeat_ngram_size": 3,
"do_sample": False,
"repetition_penalty": 2.2,
}
output_ids = model.generate(input_ids, **generation_kwargs)
output_strings = tokenizer.batch_decode(output_ids, skip_special_tokens=True)
expected_output_string = ["Yesterday, my name was", "Heute ist ein schöne Tag und"]
self.assertListEqual(expected_output_string, output_strings)
@slow
def test_sample_generate(self):
model = TFT5ForConditionalGeneration.from_pretrained("t5-small")
tokenizer = T5Tokenizer.from_pretrained("t5-small")
sentences = ["I really love my", "Translate English to German: the transformers are truly amazing"]
input_ids = tokenizer(sentences, return_tensors="tf", padding=True).input_ids
generation_kwargs = {
"do_sample": True,
"bad_words_ids": [tokenizer("my").input_ids, tokenizer("ein schöner").input_ids],
"no_repeat_ngram_size": 3,
"repetition_penalty": 2.2,
"temperature": 0.8,
"top_k": 500,
"top_p": 0.9,
}
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(":/CPU:0"):
tf.random.set_seed(42) # deterministic sampling sequence -> deterministic generation
output_ids = model.generate(input_ids, **generation_kwargs)
output_strings = tokenizer.batch_decode(output_ids, skip_special_tokens=True)
expected_output_string = ["i love her I really love my heart", "die Transformatoren sind wirklich erstaunlich"]
self.assertListEqual(expected_output_string, output_strings)
@require_tf
@require_sentencepiece
@require_tokenizers
class TFT5ModelIntegrationTests(unittest.TestCase):
@cached_property
def model(self):
return TFT5ForConditionalGeneration.from_pretrained("t5-base")
@slow
def test_small_integration_test(self):
"""
For comparision run:
>>> import t5 # pip install t5==0.7.1
>>> from t5.data.sentencepiece_vocabulary import SentencePieceVocabulary
>>> path_to_mtf_small_t5_checkpoint = '<fill_in>'
>>> path_to_mtf_small_spm_model_path = '<fill_in>'
>>> t5_model = t5.models.MtfModel(model_dir=path_to_mtf_small_t5_checkpoint, batch_size=1, tpu=None)
>>> vocab = SentencePieceVocabulary(path_to_mtf_small_spm_model_path, extra_ids=100)
>>> score = t5_model.score(inputs=["Hello there"], targets=["Hi I am"], vocabulary=vocab)
"""
model = TFT5ForConditionalGeneration.from_pretrained("t5-small")
tokenizer = T5Tokenizer.from_pretrained("t5-small")
input_ids = tokenizer("Hello there", return_tensors="tf").input_ids
labels = tokenizer("Hi I am", return_tensors="tf").input_ids
loss = model(input_ids, labels=labels).loss
mtf_score = -tf.math.reduce_sum(loss).numpy()
EXPECTED_SCORE = -19.0845
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)
@slow
def test_small_v1_1_integration_test(self):
"""
For comparision run:
>>> import t5 # pip install t5==0.7.1
>>> from t5.data.sentencepiece_vocabulary import SentencePieceVocabulary
>>> path_to_mtf_small_t5_v1.1_checkpoint = '<fill_in>'
>>> path_to_mtf_small_spm_model_path = '<fill_in>'
>>> t5_model = t5.models.MtfModel(model_dir=path_to_mtf_small_t5_v1.1_checkpoint, batch_size=1, tpu=None)
>>> vocab = SentencePieceVocabulary(path_to_mtf_small_spm_model_path, extra_ids=100)
>>> score = t5_model.score(inputs=["Hello there"], targets=["Hi I am"], vocabulary=vocab)
"""
model = TFT5ForConditionalGeneration.from_pretrained("google/t5-v1_1-small")
tokenizer = T5Tokenizer.from_pretrained("google/t5-v1_1-small")
input_ids = tokenizer("Hello there", return_tensors="tf").input_ids
labels = tokenizer("Hi I am", return_tensors="tf").input_ids
loss = model(input_ids, labels=labels).loss
mtf_score = -tf.math.reduce_sum(loss).numpy()
EXPECTED_SCORE = -59.0293
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)
@slow
def test_small_byt5_integration_test(self):
"""
For comparision run:
>>> import t5 # pip install t5==0.9.1
>>> path_to_byt5_small_checkpoint = '<fill_in>'
>>> t5_model = t5.models.MtfModel(model_dir=path_to_tf_checkpoint, batch_size=1, tpu=None)
>>> vocab = t5.data.ByteVocabulary()
>>> score = t5_model.score(inputs=["Hello there"], targets=["Hi I am"], vocabulary=vocab)
"""
model = TFT5ForConditionalGeneration.from_pretrained("google/byt5-small")
tokenizer = ByT5Tokenizer.from_pretrained("google/byt5-small")
input_ids = tokenizer("Hello there", return_tensors="tf").input_ids
labels = tokenizer("Hi I am", return_tensors="tf").input_ids
loss = model(input_ids, labels=labels).loss
mtf_score = -tf.math.reduce_sum(loss).numpy()
EXPECTED_SCORE = -60.7397
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)
@slow
def test_summarization(self):
model = self.model
tok = T5Tokenizer.from_pretrained("t5-base")
FRANCE_ARTICLE = 'Marseille, France (CNN)The French prosecutor leading an investigation into the crash of Germanwings Flight 9525 insisted Wednesday that he was not aware of any video footage from on board the plane. Marseille prosecutor Brice Robin told CNN that "so far no videos were used in the crash investigation." He added, "A person who has such a video needs to immediately give it to the investigators." Robin\'s comments follow claims by two magazines, German daily Bild and French Paris Match, of a cell phone video showing the harrowing final seconds from on board Germanwings Flight 9525 as it crashed into the French Alps. All 150 on board were killed. Paris Match and Bild reported that the video was recovered from a phone at the wreckage site. The two publications described the supposed video, but did not post it on their websites. The publications said that they watched the video, which was found by a source close to the investigation. "One can hear cries of \'My God\' in several languages," Paris Match reported. "Metallic banging can also be heard more than three times, perhaps of the pilot trying to open the cockpit door with a heavy object. Towards the end, after a heavy shake, stronger than the others, the screaming intensifies. Then nothing." "It is a very disturbing scene," said Julian Reichelt, editor-in-chief of Bild online. An official with France\'s accident investigation agency, the BEA, said the agency is not aware of any such video. Lt. Col. Jean-Marc Menichini, a French Gendarmerie spokesman in charge of communications on rescue efforts around the Germanwings crash site, told CNN that the reports were "completely wrong" and "unwarranted." Cell phones have been collected at the site, he said, but that they "hadn\'t been exploited yet." Menichini said he believed the cell phones would need to be sent to the Criminal Research Institute in Rosny sous-Bois, near Paris, in order to be analyzed by specialized technicians working hand-in-hand with investigators. But none of the cell phones found so far have been sent to the institute, Menichini said. Asked whether staff involved in the search could have leaked a memory card to the media, Menichini answered with a categorical "no." Reichelt told "Erin Burnett: Outfront" that he had watched the video and stood by the report, saying Bild and Paris Match are "very confident" that the clip is real. He noted that investigators only revealed they\'d recovered cell phones from the crash site after Bild and Paris Match published their reports. "That is something we did not know before. ... Overall we can say many things of the investigation weren\'t revealed by the investigation at the beginning," he said. What was mental state of Germanwings co-pilot? German airline Lufthansa confirmed Tuesday that co-pilot Andreas Lubitz had battled depression years before he took the controls of Germanwings Flight 9525, which he\'s accused of deliberately crashing last week in the French Alps. Lubitz told his Lufthansa flight training school in 2009 that he had a "previous episode of severe depression," the airline said Tuesday. Email correspondence between Lubitz and the school discovered in an internal investigation, Lufthansa said, included medical documents he submitted in connection with resuming his flight training. The announcement indicates that Lufthansa, the parent company of Germanwings, knew of Lubitz\'s battle with depression, allowed him to continue training and ultimately put him in the cockpit. Lufthansa, whose CEO Carsten Spohr previously said Lubitz was 100% fit to fly, described its statement Tuesday as a "swift and seamless clarification" and said it was sharing the information and documents -- including training and medical records -- with public prosecutors. Spohr traveled to the crash site Wednesday, where recovery teams have been working for the past week to recover human remains and plane debris scattered across a steep mountainside. He saw the crisis center set up in Seyne-les-Alpes, laid a wreath in the village of Le Vernet, closer to the crash site, where grieving families have left flowers at a simple stone memorial. Menichini told CNN late Tuesday that no visible human remains were left at the site but recovery teams would keep searching. French President Francois Hollande, speaking Tuesday, said that it should be possible to identify all the victims using DNA analysis by the end of the week, sooner than authorities had previously suggested. In the meantime, the recovery of the victims\' personal belongings will start Wednesday, Menichini said. Among those personal belongings could be more cell phones belonging to the 144 passengers and six crew on board. Check out the latest from our correspondents . The details about Lubitz\'s correspondence with the flight school during his training were among several developments as investigators continued to delve into what caused the crash and Lubitz\'s possible motive for downing the jet. A Lufthansa spokesperson told CNN on Tuesday that Lubitz had a valid medical certificate, had passed all his examinations and "held all the licenses required." Earlier, a spokesman for the prosecutor\'s office in Dusseldorf, Christoph Kumpa, said medical records reveal Lubitz suffered from suicidal tendencies at some point before his aviation career and underwent psychotherapy before he got his pilot\'s license. Kumpa emphasized there\'s no evidence suggesting Lubitz was suicidal or acting aggressively before the crash. Investigators are looking into whether Lubitz feared his medical condition would cause him to lose his pilot\'s license, a European government official briefed on the investigation told CNN on Tuesday. While flying was "a big part of his life," the source said, it\'s only one theory being considered. Another source, a law enforcement official briefed on the investigation, also told CNN that authorities believe the primary motive for Lubitz to bring down the plane was that he feared he would not be allowed to fly because of his medical problems. Lubitz\'s girlfriend told investigators he had seen an eye doctor and a neuropsychologist, both of whom deemed him unfit to work recently and concluded he had psychological issues, the European government official said. But no matter what details emerge about his previous mental health struggles, there\'s more to the story, said Brian Russell, a forensic psychologist. "Psychology can explain why somebody would turn rage inward on themselves about the fact that maybe they weren\'t going to keep doing their job and they\'re upset about that and so they\'re suicidal," he said. "But there is no mental illness that explains why somebody then feels entitled to also take that rage and turn it outward on 149 other people who had nothing to do with the person\'s problems." Germanwings crash compensation: What we know . Who was the captain of Germanwings Flight 9525? CNN\'s Margot Haddad reported from Marseille and Pamela Brown from Dusseldorf, while Laura Smith-Spark wrote from London. CNN\'s Frederik Pleitgen, Pamela Boykoff, Antonia Mortensen, Sandrine Amiel and Anna-Maja Rappard contributed to this report.' # @noqa
SHORTER_ARTICLE = '(CNN)The Palestinian Authority officially became the 123rd member of the International Criminal Court on Wednesday, a step that gives the court jurisdiction over alleged crimes in Palestinian territories. The formal accession was marked with a ceremony at The Hague, in the Netherlands, where the court is based. The Palestinians signed the ICC\'s founding Rome Statute in January, when they also accepted its jurisdiction over alleged crimes committed "in the occupied Palestinian territory, including East Jerusalem, since June 13, 2014." Later that month, the ICC opened a preliminary examination into the situation in Palestinian territories, paving the way for possible war crimes investigations against Israelis. As members of the court, Palestinians may be subject to counter-charges as well. Israel and the United States, neither of which is an ICC member, opposed the Palestinians\' efforts to join the body. But Palestinian Foreign Minister Riad al-Malki, speaking at Wednesday\'s ceremony, said it was a move toward greater justice. "As Palestine formally becomes a State Party to the Rome Statute today, the world is also a step closer to ending a long era of impunity and injustice," he said, according to an ICC news release. "Indeed, today brings us closer to our shared goals of justice and peace." Judge Kuniko Ozaki, a vice president of the ICC, said acceding to the treaty was just the first step for the Palestinians. "As the Rome Statute today enters into force for the State of Palestine, Palestine acquires all the rights as well as responsibilities that come with being a State Party to the Statute. These are substantive commitments, which cannot be taken lightly," she said. Rights group Human Rights Watch welcomed the development. "Governments seeking to penalize Palestine for joining the ICC should immediately end their pressure, and countries that support universal acceptance of the court\'s treaty should speak out to welcome its membership," said Balkees Jarrah, international justice counsel for the group. "What\'s objectionable is the attempts to undermine international justice, not Palestine\'s decision to join a treaty to which over 100 countries around the world are members." In January, when the preliminary ICC examination was opened, Israeli Prime Minister Benjamin Netanyahu described it as an outrage, saying the court was overstepping its boundaries. The United States also said it "strongly" disagreed with the court\'s decision. "As we have said repeatedly, we do not believe that Palestine is a state and therefore we do not believe that it is eligible to join the ICC," the State Department said in a statement. It urged the warring sides to resolve their differences through direct negotiations. "We will continue to oppose actions against Israel at the ICC as counterproductive to the cause of peace," it said. But the ICC begs to differ with the definition of a state for its purposes and refers to the territories as "Palestine." While a preliminary examination is not a formal investigation, it allows the court to review evidence and determine whether to investigate suspects on both sides. Prosecutor Fatou Bensouda said her office would "conduct its analysis in full independence and impartiality." The war between Israel and Hamas militants in Gaza last summer left more than 2,000 people dead. The inquiry will include alleged war crimes committed since June. The International Criminal Court was set up in 2002 to prosecute genocide, crimes against humanity and war crimes. CNN\'s Vasco Cotovio, Kareem Khadder and Faith Karimi contributed to this report.'
IRAN_ARTICLE = "(CNN)The United States and its negotiating partners reached a very strong framework agreement with Iran in Lausanne, Switzerland, on Thursday that limits Iran's nuclear program in such a way as to effectively block it from building a nuclear weapon. Expect pushback anyway, if the recent past is any harbinger. Just last month, in an attempt to head off such an agreement, House Speaker John Boehner invited Israeli Prime Minister Benjamin Netanyahu to preemptively blast it before Congress, and 47 senators sent a letter to the Iranian leadership warning them away from a deal. The debate that has already begun since the announcement of the new framework will likely result in more heat than light. It will not be helped by the gathering swirl of dubious assumptions and doubtful assertions. Let us address some of these: . The most misleading assertion, despite universal rejection by experts, is that the negotiations' objective at the outset was the total elimination of any nuclear program in Iran. That is the position of Netanyahu and his acolytes in the U.S. Congress. But that is not and never was the objective. If it had been, there would have been no Iranian team at the negotiating table. Rather, the objective has always been to structure an agreement or series of agreements so that Iran could not covertly develop a nuclear arsenal before the United States and its allies could respond. The new framework has exceeded expectations in achieving that goal. It would reduce Iran's low-enriched uranium stockpile, cut by two-thirds its number of installed centrifuges and implement a rigorous inspection regime. Another dubious assumption of opponents is that the Iranian nuclear program is a covert weapons program. Despite sharp accusations by some in the United States and its allies, Iran denies having such a program, and U.S. intelligence contends that Iran has not yet made the decision to build a nuclear weapon. Iran's continued cooperation with International Atomic Energy Agency inspections is further evidence on this point, and we'll know even more about Iran's program in the coming months and years because of the deal. In fact, the inspections provisions that are part of this agreement are designed to protect against any covert action by the Iranians. What's more, the rhetoric of some members of Congress has implied that the negotiations have been between only the United States and Iran (i.e., the 47 senators' letter warning that a deal might be killed by Congress or a future president). This of course is not the case. The talks were between Iran and the five permanent members of the U.N. Security Council (United States, United Kingdom, France, China and Russia) plus Germany, dubbed the P5+1. While the United States has played a leading role in the effort, it negotiated the terms alongside its partners. If the agreement reached by the P5+1 is rejected by Congress, it could result in an unraveling of the sanctions on Iran and threaten NATO cohesion in other areas. Another questionable assertion is that this agreement contains a sunset clause, after which Iran will be free to do as it pleases. Again, this is not the case. Some of the restrictions on Iran's nuclear activities, such as uranium enrichment, will be eased or eliminated over time, as long as 15 years. But most importantly, the framework agreement includes Iran's ratification of the Additional Protocol, which allows IAEA inspectors expanded access to nuclear sites both declared and nondeclared. This provision will be permanent. It does not sunset. Thus, going forward, if Iran decides to enrich uranium to weapons-grade levels, monitors will be able to detect such a move in a matter of days and alert the U.N. Security Council. Many in Congress have said that the agreement should be a formal treaty requiring the Senate to \"advise and consent.\" But the issue is not suited for a treaty. Treaties impose equivalent obligations on all signatories. For example, the New START treaty limits Russia and the United States to 1,550 deployed strategic warheads. But any agreement with Iran will not be so balanced. The restrictions and obligations in the final framework agreement will be imposed almost exclusively on Iran. The P5+1 are obligated only to ease and eventually remove most but not all economic sanctions, which were imposed as leverage to gain this final deal. Finally some insist that any agreement must address Iranian missile programs, human rights violations or support for Hamas or Hezbollah. As important as these issues are, and they must indeed be addressed, they are unrelated to the most important aim of a nuclear deal: preventing a nuclear Iran. To include them in the negotiations would be a poison pill. This agreement should be judged on its merits and on how it affects the security of our negotiating partners and allies, including Israel. Those judgments should be fact-based, not based on questionable assertions or dubious assumptions."
ARTICLE_SUBWAY = 'New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County, New York. A year later, she got married again in Westchester County, but to a different man and without divorcing her first husband. Only 18 days after that marriage, she got hitched yet again. Then, Barrientos declared "I do" five more times, sometimes only within two weeks of each other. In 2010, she married once more, this time in the Bronx. In an application for a marriage license, she stated it was her "first and only" marriage. Barrientos, now 39, is facing two criminal counts of "offering a false instrument for filing in the first degree," referring to her false statements on the 2010 marriage license application, according to court documents. Prosecutors said the marriages were part of an immigration scam. On Friday, she pleaded not guilty at State Supreme Court in the Bronx, according to her attorney, Christopher Wright, who declined to comment further. After leaving court, Barrientos was arrested and charged with theft of service and criminal trespass for allegedly sneaking into the New York subway through an emergency exit, said Detective Annette Markowski, a police spokeswoman. In total, Barrientos has been married 10 times, with nine of her marriages occurring between 1999 and 2002. All occurred either in Westchester County, Long Island, New Jersey or the Bronx. She is believed to still be married to four men, and at one time, she was married to eight men at once, prosecutors say. Prosecutors said the immigration scam involved some of her husbands, who filed for permanent residence status shortly after the marriages. Any divorces happened only after such filings were approved. It was unclear whether any of the men will be prosecuted. The case was referred to the Bronx District Attorney\'s Office by Immigration and Customs Enforcement and the Department of Homeland Security\'s Investigation Division. Seven of the men are from so-called "red-flagged" countries, including Egypt, Turkey, Georgia, Pakistan and Mali. Her eighth husband, Rashid Rajput, was deported in 2006 to his native Pakistan after an investigation by the Joint Terrorism Task Force. If convicted, Barrientos faces up to four years in prison. Her next court appearance is scheduled for May 18.'
expected_summaries = [
'prosecutor: "so far no videos were used in the crash investigation" two magazines claim to have found a cell phone video of the final seconds . "one can hear cries of \'My God\' in several languages," one magazine says .',
"the formal accession was marked by a ceremony at The Hague, in the Netherlands . the ICC opened a preliminary examination into the situation in the occupied Palestinian territory . as members of the court, Palestinians may be subject to counter-charges as well .",
"the u.s. and its negotiating partners reached a very strong framework agreement with Iran . aaron miller: the debate that has already begun since the announcement of the new framework will likely result in more heat than light . the deal would reduce Iran's low-enriched uranium stockpile, cut centrifuges and implement a rigorous inspection regime .",
'prosecutors say the marriages were part of an immigration scam . if convicted, barrientos faces two criminal counts of "offering a false instrument for filing in the first degree" she has been married 10 times, with nine of her marriages occurring between 1999 and 2002 .',
]
task_specific_config = getattr(model.config, "task_specific_params", {})
summarization_config = task_specific_config.get("summarization", {})
model.config.update(summarization_config)
dct = tok(
[model.config.prefix + x for x in [FRANCE_ARTICLE, SHORTER_ARTICLE, IRAN_ARTICLE, ARTICLE_SUBWAY]],
max_length=512,
padding="max_length",
truncation=True,
return_tensors="tf",
)
self.assertEqual(512, dct["input_ids"].shape[1])
hypotheses_batch = model.generate(
input_ids=dct["input_ids"],
attention_mask=dct["attention_mask"],
num_beams=4,
length_penalty=2.0,
max_length=142,
min_length=56,
no_repeat_ngram_size=3,
do_sample=False,
early_stopping=True,
)
decoded = [
tok.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in hypotheses_batch
]
self.assertListEqual(
expected_summaries,
decoded,
)
@slow
def test_translation_en_to_de(self):
tok = T5Tokenizer.from_pretrained("t5-base")
model = self.model
task_specific_config = getattr(model.config, "task_specific_params", {})
translation_config = task_specific_config.get("translation_en_to_de", {})
self.model.config.update(translation_config)
original_input = '"Luigi often said to me that he never wanted the brothers to end up in court", she wrote.'
expected_translation = (
'"Luigi sagte mir oft, dass er nie wollte, dass die Brüder am Gericht sitzen", schrieb sie.'
)
input_ids = tok.encode(model.config.prefix + original_input, return_tensors="tf")
output = model.generate(
input_ids=input_ids,
num_beams=4,
length_penalty=2.0,
max_length=50,
no_repeat_ngram_size=3,
do_sample=False,
early_stopping=True,
)
translation = tok.decode(output[0], skip_special_tokens=True, clean_up_tokenization_spaces=False)
self.assertEqual(translation, expected_translation)
@slow
def test_translation_en_to_fr(self):
model = self.model
tok = T5Tokenizer.from_pretrained("t5-base")
task_specific_config = getattr(model.config, "task_specific_params", {})
translation_config = task_specific_config.get("translation_en_to_fr", {})
model.config.update(translation_config)
en_text = ' This image section from an infrared recording by the Spitzer telescope shows a "family portrait" of countless generations of stars: the oldest stars are seen as blue dots. '
new_truncated_translation = (
"Cette section d'images provenant de l'enregistrement infrarouge effectué par le télescope Spitzer montre "
"un "
"« portrait familial » de générations innombrables d’étoiles : les plus anciennes sont observées "
"sous forme "
"de points bleus."
)
input_ids = tok(model.config.prefix + en_text, return_tensors="tf").input_ids
output = model.generate(
input_ids=input_ids,
num_beams=4,
length_penalty=2.0,
max_length=100,
no_repeat_ngram_size=3,
do_sample=False,
early_stopping=True,
)
translation = tok.decode(output[0], skip_special_tokens=True, clean_up_tokenization_spaces=False)
self.assertEqual(translation, new_truncated_translation)
@slow
def test_translation_en_to_ro(self):
model = self.model
tok = T5Tokenizer.from_pretrained("t5-base")
task_specific_config = getattr(model.config, "task_specific_params", {})
translation_config = task_specific_config.get("translation_en_to_ro", {})
model.config.update(translation_config)
original_input = "Taco Bell said it plans to add 2,000 locations in the US by 2022."
expected_translation = "Taco Bell a declarat că intenţionează să adauge 2 000 de locaţii în SUA până în 2022."
input_ids = tok.encode(model.config.prefix + original_input, return_tensors="tf")
output = model.generate(
input_ids=input_ids,
num_beams=4,
length_penalty=2.0,
max_length=50,
no_repeat_ngram_size=3,
do_sample=False,
early_stopping=True,
)
translation = tok.decode(output[0], skip_special_tokens=True, clean_up_tokenization_spaces=False)
self.assertEqual(translation, expected_translation)
def test_finetune_keras_trainer(self):
"""Ensure that the model can be fine-tuned via the keras API and
that metrics work as expected.
"""
# This metric expects to be called with the logits output
def _accuracy(y_true, y_pred):
return tf.keras.metrics.sparse_categorical_crossentropy(y_true[:, 0], y_pred[:, 0])
# measure the accuracy of the first token
class FirstTokenAccuracy(tf.keras.metrics.MeanMetricWrapper):
def __init__(self, name="accuracy", **kwargs):
super().__init__(_accuracy, name=name, **kwargs)
model = self.model
model.compile("adam", metrics=FirstTokenAccuracy())
tokenizer = T5Tokenizer.from_pretrained("t5-small")
examples = [
("sentiment: Everything is awesome!", "positive"),
("sentiment: Tensorflow datasets are hard to use", "negative"),
]
inputs = dict(tokenizer([x[0] for x in examples], padding=True, return_tensors="tf"))
inputs["labels"] = tokenizer([x[1] for x in examples], return_tensors="tf").input_ids
model.fit(inputs)
m = model.evaluate(inputs)
self.assertEqual(len(m), 2)
| 63.15204 | 7,207 | 0.727324 |
import unittest
from transformers import T5Config, is_tf_available
from transformers.file_utils import cached_property
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ..test_configuration_common import ConfigTester
from ..test_modeling_tf_common import TFModelTesterMixin, ids_tensor
if is_tf_available():
import tensorflow as tf
from transformers import ByT5Tokenizer, T5Tokenizer, TFT5EncoderModel, TFT5ForConditionalGeneration, TFT5Model
class TFT5ModelTester:
def __init__(
self,
parent,
):
self.parent = parent
self.batch_size = 13
self.seq_length = 7
self.is_training = True
self.use_input_mask = True
self.use_labels = True
self.vocab_size = 99
self.n_positions = 14
self.hidden_size = 32
self.num_hidden_layers = 5
self.num_attention_heads = 4
self.d_ff = 37
self.relative_attention_num_buckets = 8
self.dropout_rate = 0.1
self.initializer_factor = 0.002
self.eos_token_id = 1
self.pad_token_id = 0
self.scope = None
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
token_labels = None
if self.use_labels:
token_labels = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
config = T5Config(
vocab_size=self.vocab_size,
n_positions=self.n_positions,
d_model=self.hidden_size,
d_ff=self.d_ff,
d_kv=self.hidden_size // self.num_attention_heads,
num_layers=self.num_hidden_layers,
num_heads=self.num_attention_heads,
relative_attention_num_buckets=self.relative_attention_num_buckets,
dropout_rate=self.dropout_rate,
initializer_factor=self.initializer_factor,
eos_token_id=self.eos_token_id,
bos_token_id=self.pad_token_id,
pad_token_id=self.pad_token_id,
decoder_start_token_id=self.pad_token_id,
)
return (config, input_ids, input_mask, token_labels)
def create_and_check_t5_model(self, config, input_ids, input_mask, token_labels):
model = TFT5Model(config=config)
inputs = {
"input_ids": input_ids,
"decoder_input_ids": input_ids,
"decoder_attention_mask": input_mask,
}
result = model(inputs)
result = model(input_ids, decoder_attention_mask=input_mask, decoder_input_ids=input_ids)
decoder_output = result.last_hidden_state
decoder_past = result.past_key_values
encoder_output = result.encoder_last_hidden_state
self.parent.assertListEqual(list(encoder_output.shape), [self.batch_size, self.seq_length, self.hidden_size])
self.parent.assertListEqual(list(decoder_output.shape), [self.batch_size, self.seq_length, self.hidden_size])
self.parent.assertEqual(len(decoder_past), config.num_layers)
self.parent.assertEqual(len(decoder_past[0]), 4)
def create_and_check_t5_with_lm_head(self, config, input_ids, input_mask, token_labels):
model = TFT5ForConditionalGeneration(config=config)
inputs_dict = {
"input_ids": input_ids,
"decoder_input_ids": input_ids,
"decoder_attention_mask": input_mask,
}
result = model(inputs_dict)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_t5_decoder_model_past(self, config, input_ids, decoder_input_ids, attention_mask):
model = TFT5Model(config=config).get_decoder()
input_ids = input_ids[:1, :]
self.batch_size = 1
outputs = model(input_ids, use_cache=True)
outputs_use_cache_conf = model(input_ids)
outputs_no_past = model(input_ids, use_cache=False)
self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf))
self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1)
next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
next_input_ids = tf.concat([input_ids, next_tokens], axis=-1)
output_from_no_past = model(next_input_ids)[0]
output_from_past = model(next_tokens, past_key_values=outputs.past_key_values)[0]
random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1]))
output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx]
output_from_past_slice = output_from_past[:, 0, random_slice_idx]
tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-3)
def create_and_check_t5_decoder_model_attention_mask_past(
self, config, input_ids, decoder_input_ids, attention_mask
):
model = TFT5Model(config=config).get_decoder()
half_seq_length = self.seq_length // 2
attn_mask_begin = tf.ones((self.batch_size, half_seq_length), dtype=tf.int32)
attn_mask_end = tf.zeros((self.batch_size, self.seq_length - half_seq_length), dtype=tf.int32)
attn_mask = tf.concat([attn_mask_begin, attn_mask_end], axis=1)
outputs = model(input_ids, attention_mask=attn_mask, use_cache=True)
next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
random_seq_idx_to_change = ids_tensor((1,), half_seq_length).numpy() + 1
random_other_next_tokens = ids_tensor((self.batch_size, self.seq_length), config.vocab_size)
vector_condition = tf.range(self.seq_length) == (self.seq_length - random_seq_idx_to_change)
condition = tf.transpose(
tf.broadcast_to(tf.expand_dims(vector_condition, -1), (self.seq_length, self.batch_size))
)
input_ids = tf.where(condition, random_other_next_tokens, input_ids)
next_input_ids = tf.concat([input_ids, next_tokens], axis=-1)
attn_mask = tf.concat(
[attn_mask, tf.ones((attn_mask.shape[0], 1), dtype=tf.int32)],
axis=1,
)
output_from_no_past = model(next_input_ids, attention_mask=attn_mask)[0]
output_from_past = model(next_tokens, past_key_values=outputs.past_key_values, attention_mask=attn_mask)[0]
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).numpy().item()
output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx]
output_from_past_slice = output_from_past[:, 0, random_slice_idx]
tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-3)
def create_and_check_t5_decoder_model_past_large_inputs(
self, config, input_ids, decoder_input_ids, attention_mask
):
model = TFT5Model(config=config).get_decoder()
input_ids = input_ids[:1, :]
attention_mask = attention_mask[:1, :]
self.batch_size = 1
outputs = model(input_ids, attention_mask=attention_mask, use_cache=True)
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
next_attn_mask = ids_tensor((self.batch_size, 3), 2)
next_input_ids = tf.concat([input_ids, next_tokens], axis=-1)
next_attention_mask = tf.concat([attention_mask, next_attn_mask], axis=-1)
output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)[0]
output_from_past = model(
next_tokens, attention_mask=next_attention_mask, past_key_values=outputs.past_key_values
)[0]
self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1])
random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1]))
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx]
output_from_past_slice = output_from_past[:, :, random_slice_idx]
tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-3)
def create_and_check_t5_xla_generate(self, config, input_ids, *args):
config.eos_token_id = None
config.max_length = 10
config.do_sample = False
config.num_beams = 1
model = TFT5ForConditionalGeneration(config=config)
input_ids = tf.where(input_ids != config.pad_token_id, input_ids, config.pad_token_id + 5)
generated = model.generate(input_ids)
generate_xla = tf.function(model.generate, jit_compile=True)
generated_xla = generate_xla(input_ids)
self.parent.assertListEqual(generated.numpy().tolist(), generated_xla.numpy().tolist())
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(config, input_ids, input_mask, token_labels) = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"decoder_input_ids": input_ids,
"decoder_attention_mask": input_mask,
}
return config, inputs_dict
@require_tf
class TFT5ModelTest(TFModelTesterMixin, unittest.TestCase):
is_encoder_decoder = True
all_model_classes = (TFT5Model, TFT5ForConditionalGeneration) if is_tf_available() else ()
all_generative_model_classes = (TFT5ForConditionalGeneration,) if is_tf_available() else ()
test_onnx = False
def setUp(self):
self.model_tester = TFT5ModelTester(self)
self.config_tester = ConfigTester(self, config_class=T5Config, d_model=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_t5_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_t5_model(*config_and_inputs)
def test_t5_model_v1_1(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
config = config_and_inputs[0]
config.tie_word_embeddings = False
config.feed_forward_proj = "gated-gelu"
self.model_tester.create_and_check_t5_model(config, *config_and_inputs[1:])
def test_with_lm_head(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_t5_with_lm_head(*config_and_inputs)
def test_t5_decoder_model_past(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_t5_decoder_model_past(*config_and_inputs)
def test_t5_decoder_model_past_with_attn_mask(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_t5_decoder_model_attention_mask_past(*config_and_inputs)
def test_t5_decoder_model_past_large_inputs(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_t5_decoder_model_past_large_inputs(*config_and_inputs)
def test_t5_model_xla_generate(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_t5_xla_generate(*config_and_inputs)
def test_model_common_attributes(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
assert isinstance(model.get_input_embeddings(), tf.keras.layers.Layer)
if model_class in self.all_generative_model_classes:
x = model.get_output_embeddings()
assert isinstance(x, tf.keras.layers.Layer)
name = model.get_bias()
assert name is None
else:
x = model.get_output_embeddings()
assert x is None
name = model.get_bias()
assert name is None
def test_saved_model_creation(self):
pass
@slow
def test_model_from_pretrained(self):
model = TFT5Model.from_pretrained("t5-small")
self.assertIsNotNone(model)
def test_generate_with_headmasking(self):
pass
@slow
def test_resize_embeddings(self):
model = TFT5ForConditionalGeneration.from_pretrained("t5-small")
original_vocab_size = model.get_input_embeddings().weight.shape[0]
self.assertEqual(original_vocab_size, model.config.vocab_size)
tokenizer = T5Tokenizer.from_pretrained("t5-small")
tokenizer.add_special_tokens({"bos_token": "", "eos_token": ""})
model._resize_token_embeddings(len(tokenizer))
self.assertEqual(model.get_input_embeddings().weight.shape[0], len(tokenizer))
self.assertNotEqual(model.get_input_embeddings().weight.shape[0], original_vocab_size)
class TFT5EncoderOnlyModelTester:
def __init__(
self,
parent,
vocab_size=99,
batch_size=13,
encoder_seq_length=7,
use_attention_mask=True,
hidden_size=32,
num_hidden_layers=5,
num_attention_heads=4,
d_ff=37,
relative_attention_num_buckets=8,
is_training=False,
dropout_rate=0.1,
initializer_factor=0.002,
is_encoder_decoder=False,
eos_token_id=1,
pad_token_id=0,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.encoder_seq_length = encoder_seq_length
self.seq_length = self.encoder_seq_length
self.use_attention_mask = use_attention_mask
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.d_ff = d_ff
self.relative_attention_num_buckets = relative_attention_num_buckets
self.dropout_rate = dropout_rate
self.initializer_factor = initializer_factor
self.eos_token_id = eos_token_id
self.pad_token_id = pad_token_id
self.is_encoder_decoder = is_encoder_decoder
self.scope = None
self.is_training = is_training
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size)
attention_mask = None
if self.use_attention_mask:
attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2)
config = T5Config(
vocab_size=self.vocab_size,
d_model=self.hidden_size,
d_ff=self.d_ff,
d_kv=self.hidden_size // self.num_attention_heads,
num_layers=self.num_hidden_layers,
num_heads=self.num_attention_heads,
relative_attention_num_buckets=self.relative_attention_num_buckets,
dropout_rate=self.dropout_rate,
initializer_factor=self.initializer_factor,
eos_token_id=self.eos_token_id,
bos_token_id=self.pad_token_id,
pad_token_id=self.pad_token_id,
is_encoder_decoder=self.is_encoder_decoder,
)
return (
config,
input_ids,
attention_mask,
)
def create_and_check_model(
self,
config,
input_ids,
attention_mask,
):
model = TFT5EncoderModel(config=config)
result = model(
input_ids=input_ids,
attention_mask=attention_mask,
)
result = model(input_ids=input_ids)
encoder_output = result.last_hidden_state
self.parent.assertEqual(encoder_output.shape, (self.batch_size, self.encoder_seq_length, self.hidden_size))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
attention_mask,
) = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
class TFT5EncoderOnlyModelTest(TFModelTesterMixin, unittest.TestCase):
is_encoder_decoder = False
all_model_classes = (TFT5EncoderModel,) if is_tf_available() else ()
test_onnx = False
def setUp(self):
self.model_tester = TFT5EncoderOnlyModelTester(self)
self.config_tester = ConfigTester(self, config_class=T5Config, d_model=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_train_pipeline_custom_model(self):
pass
@require_tf
@require_sentencepiece
@require_tokenizers
class TFT5GenerationIntegrationTests(unittest.TestCase):
@slow
def test_greedy_xla_generate_simple(self):
model = TFT5ForConditionalGeneration.from_pretrained("t5-small")
tokenizer = T5Tokenizer.from_pretrained("t5-small")
sentence = "Translate English to German: Today is a beautiful day."
input_ids = tokenizer(sentence, return_tensors="tf", padding=True).input_ids
xla_generate = tf.function(model.generate, jit_compile=True)
output_ids = model.generate(input_ids)
output_ids_xla = xla_generate(input_ids)
output_strings = tokenizer.batch_decode(output_ids, skip_special_tokens=True)
output_strings_xla = tokenizer.batch_decode(output_ids_xla, skip_special_tokens=True)
expected_output_string = ["Heute ist ein schöner Tag."]
self.assertListEqual(expected_output_string, output_strings)
self.assertListEqual(expected_output_string, output_strings_xla)
@slow
def test_greedy_generate(self):
model = TFT5ForConditionalGeneration.from_pretrained("t5-small")
tokenizer = T5Tokenizer.from_pretrained("t5-small")
sentences = ["Yesterday, my name was", "Today is a beautiful day and"]
input_ids = tokenizer(sentences, return_tensors="tf", padding=True).input_ids
generation_kwargs = {
"bad_words_ids": [tokenizer("my").input_ids, tokenizer("ein schöner").input_ids],
"no_repeat_ngram_size": 3,
"do_sample": False,
"repetition_penalty": 2.2,
}
output_ids = model.generate(input_ids, **generation_kwargs)
output_strings = tokenizer.batch_decode(output_ids, skip_special_tokens=True)
expected_output_string = ["Yesterday, my name was", "Heute ist ein schöne Tag und"]
self.assertListEqual(expected_output_string, output_strings)
@slow
def test_sample_generate(self):
model = TFT5ForConditionalGeneration.from_pretrained("t5-small")
tokenizer = T5Tokenizer.from_pretrained("t5-small")
sentences = ["I really love my", "Translate English to German: the transformers are truly amazing"]
input_ids = tokenizer(sentences, return_tensors="tf", padding=True).input_ids
generation_kwargs = {
"do_sample": True,
"bad_words_ids": [tokenizer("my").input_ids, tokenizer("ein schöner").input_ids],
"no_repeat_ngram_size": 3,
"repetition_penalty": 2.2,
"temperature": 0.8,
"top_k": 500,
"top_p": 0.9,
}
with tf.device(":/CPU:0"):
tf.random.set_seed(42)
output_ids = model.generate(input_ids, **generation_kwargs)
output_strings = tokenizer.batch_decode(output_ids, skip_special_tokens=True)
expected_output_string = ["i love her I really love my heart", "die Transformatoren sind wirklich erstaunlich"]
self.assertListEqual(expected_output_string, output_strings)
@require_tf
@require_sentencepiece
@require_tokenizers
class TFT5ModelIntegrationTests(unittest.TestCase):
@cached_property
def model(self):
return TFT5ForConditionalGeneration.from_pretrained("t5-base")
@slow
def test_small_integration_test(self):
model = TFT5ForConditionalGeneration.from_pretrained("t5-small")
tokenizer = T5Tokenizer.from_pretrained("t5-small")
input_ids = tokenizer("Hello there", return_tensors="tf").input_ids
labels = tokenizer("Hi I am", return_tensors="tf").input_ids
loss = model(input_ids, labels=labels).loss
mtf_score = -tf.math.reduce_sum(loss).numpy()
EXPECTED_SCORE = -19.0845
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)
@slow
def test_small_v1_1_integration_test(self):
model = TFT5ForConditionalGeneration.from_pretrained("google/t5-v1_1-small")
tokenizer = T5Tokenizer.from_pretrained("google/t5-v1_1-small")
input_ids = tokenizer("Hello there", return_tensors="tf").input_ids
labels = tokenizer("Hi I am", return_tensors="tf").input_ids
loss = model(input_ids, labels=labels).loss
mtf_score = -tf.math.reduce_sum(loss).numpy()
EXPECTED_SCORE = -59.0293
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)
@slow
def test_small_byt5_integration_test(self):
model = TFT5ForConditionalGeneration.from_pretrained("google/byt5-small")
tokenizer = ByT5Tokenizer.from_pretrained("google/byt5-small")
input_ids = tokenizer("Hello there", return_tensors="tf").input_ids
labels = tokenizer("Hi I am", return_tensors="tf").input_ids
loss = model(input_ids, labels=labels).loss
mtf_score = -tf.math.reduce_sum(loss).numpy()
EXPECTED_SCORE = -60.7397
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)
@slow
def test_summarization(self):
model = self.model
tok = T5Tokenizer.from_pretrained("t5-base")
FRANCE_ARTICLE = 'Marseille, France (CNN)The French prosecutor leading an investigation into the crash of Germanwings Flight 9525 insisted Wednesday that he was not aware of any video footage from on board the plane. Marseille prosecutor Brice Robin told CNN that "so far no videos were used in the crash investigation." He added, "A person who has such a video needs to immediately give it to the investigators." Robin\'s comments follow claims by two magazines, German daily Bild and French Paris Match, of a cell phone video showing the harrowing final seconds from on board Germanwings Flight 9525 as it crashed into the French Alps. All 150 on board were killed. Paris Match and Bild reported that the video was recovered from a phone at the wreckage site. The two publications described the supposed video, but did not post it on their websites. The publications said that they watched the video, which was found by a source close to the investigation. "One can hear cries of \'My God\' in several languages," Paris Match reported. "Metallic banging can also be heard more than three times, perhaps of the pilot trying to open the cockpit door with a heavy object. Towards the end, after a heavy shake, stronger than the others, the screaming intensifies. Then nothing." "It is a very disturbing scene," said Julian Reichelt, editor-in-chief of Bild online. An official with France\'s accident investigation agency, the BEA, said the agency is not aware of any such video. Lt. Col. Jean-Marc Menichini, a French Gendarmerie spokesman in charge of communications on rescue efforts around the Germanwings crash site, told CNN that the reports were "completely wrong" and "unwarranted." Cell phones have been collected at the site, he said, but that they "hadn\'t been exploited yet." Menichini said he believed the cell phones would need to be sent to the Criminal Research Institute in Rosny sous-Bois, near Paris, in order to be analyzed by specialized technicians working hand-in-hand with investigators. But none of the cell phones found so far have been sent to the institute, Menichini said. Asked whether staff involved in the search could have leaked a memory card to the media, Menichini answered with a categorical "no." Reichelt told "Erin Burnett: Outfront" that he had watched the video and stood by the report, saying Bild and Paris Match are "very confident" that the clip is real. He noted that investigators only revealed they\'d recovered cell phones from the crash site after Bild and Paris Match published their reports. "That is something we did not know before. ... Overall we can say many things of the investigation weren\'t revealed by the investigation at the beginning," he said. What was mental state of Germanwings co-pilot? German airline Lufthansa confirmed Tuesday that co-pilot Andreas Lubitz had battled depression years before he took the controls of Germanwings Flight 9525, which he\'s accused of deliberately crashing last week in the French Alps. Lubitz told his Lufthansa flight training school in 2009 that he had a "previous episode of severe depression," the airline said Tuesday. Email correspondence between Lubitz and the school discovered in an internal investigation, Lufthansa said, included medical documents he submitted in connection with resuming his flight training. The announcement indicates that Lufthansa, the parent company of Germanwings, knew of Lubitz\'s battle with depression, allowed him to continue training and ultimately put him in the cockpit. Lufthansa, whose CEO Carsten Spohr previously said Lubitz was 100% fit to fly, described its statement Tuesday as a "swift and seamless clarification" and said it was sharing the information and documents -- including training and medical records -- with public prosecutors. Spohr traveled to the crash site Wednesday, where recovery teams have been working for the past week to recover human remains and plane debris scattered across a steep mountainside. He saw the crisis center set up in Seyne-les-Alpes, laid a wreath in the village of Le Vernet, closer to the crash site, where grieving families have left flowers at a simple stone memorial. Menichini told CNN late Tuesday that no visible human remains were left at the site but recovery teams would keep searching. French President Francois Hollande, speaking Tuesday, said that it should be possible to identify all the victims using DNA analysis by the end of the week, sooner than authorities had previously suggested. In the meantime, the recovery of the victims\' personal belongings will start Wednesday, Menichini said. Among those personal belongings could be more cell phones belonging to the 144 passengers and six crew on board. Check out the latest from our correspondents . The details about Lubitz\'s correspondence with the flight school during his training were among several developments as investigators continued to delve into what caused the crash and Lubitz\'s possible motive for downing the jet. A Lufthansa spokesperson told CNN on Tuesday that Lubitz had a valid medical certificate, had passed all his examinations and "held all the licenses required." Earlier, a spokesman for the prosecutor\'s office in Dusseldorf, Christoph Kumpa, said medical records reveal Lubitz suffered from suicidal tendencies at some point before his aviation career and underwent psychotherapy before he got his pilot\'s license. Kumpa emphasized there\'s no evidence suggesting Lubitz was suicidal or acting aggressively before the crash. Investigators are looking into whether Lubitz feared his medical condition would cause him to lose his pilot\'s license, a European government official briefed on the investigation told CNN on Tuesday. While flying was "a big part of his life," the source said, it\'s only one theory being considered. Another source, a law enforcement official briefed on the investigation, also told CNN that authorities believe the primary motive for Lubitz to bring down the plane was that he feared he would not be allowed to fly because of his medical problems. Lubitz\'s girlfriend told investigators he had seen an eye doctor and a neuropsychologist, both of whom deemed him unfit to work recently and concluded he had psychological issues, the European government official said. But no matter what details emerge about his previous mental health struggles, there\'s more to the story, said Brian Russell, a forensic psychologist. "Psychology can explain why somebody would turn rage inward on themselves about the fact that maybe they weren\'t going to keep doing their job and they\'re upset about that and so they\'re suicidal," he said. "But there is no mental illness that explains why somebody then feels entitled to also take that rage and turn it outward on 149 other people who had nothing to do with the person\'s problems." Germanwings crash compensation: What we know . Who was the captain of Germanwings Flight 9525? CNN\'s Margot Haddad reported from Marseille and Pamela Brown from Dusseldorf, while Laura Smith-Spark wrote from London. CNN\'s Frederik Pleitgen, Pamela Boykoff, Antonia Mortensen, Sandrine Amiel and Anna-Maja Rappard contributed to this report.' # @noqa
SHORTER_ARTICLE = '(CNN)The Palestinian Authority officially became the 123rd member of the International Criminal Court on Wednesday, a step that gives the court jurisdiction over alleged crimes in Palestinian territories. The formal accession was marked with a ceremony at The Hague, in the Netherlands, where the court is based. The Palestinians signed the ICC\'s founding Rome Statute in January, when they also accepted its jurisdiction over alleged crimes committed "in the occupied Palestinian territory, including East Jerusalem, since June 13, 2014." Later that month, the ICC opened a preliminary examination into the situation in Palestinian territories, paving the way for possible war crimes investigations against Israelis. As members of the court, Palestinians may be subject to counter-charges as well. Israel and the United States, neither of which is an ICC member, opposed the Palestinians\' efforts to join the body. But Palestinian Foreign Minister Riad al-Malki, speaking at Wednesday\'s ceremony, said it was a move toward greater justice. "As Palestine formally becomes a State Party to the Rome Statute today, the world is also a step closer to ending a long era of impunity and injustice," he said, according to an ICC news release. "Indeed, today brings us closer to our shared goals of justice and peace." Judge Kuniko Ozaki, a vice president of the ICC, said acceding to the treaty was just the first step for the Palestinians. "As the Rome Statute today enters into force for the State of Palestine, Palestine acquires all the rights as well as responsibilities that come with being a State Party to the Statute. These are substantive commitments, which cannot be taken lightly," she said. Rights group Human Rights Watch welcomed the development. "Governments seeking to penalize Palestine for joining the ICC should immediately end their pressure, and countries that support universal acceptance of the court\'s treaty should speak out to welcome its membership," said Balkees Jarrah, international justice counsel for the group. "What\'s objectionable is the attempts to undermine international justice, not Palestine\'s decision to join a treaty to which over 100 countries around the world are members." In January, when the preliminary ICC examination was opened, Israeli Prime Minister Benjamin Netanyahu described it as an outrage, saying the court was overstepping its boundaries. The United States also said it "strongly" disagreed with the court\'s decision. "As we have said repeatedly, we do not believe that Palestine is a state and therefore we do not believe that it is eligible to join the ICC," the State Department said in a statement. It urged the warring sides to resolve their differences through direct negotiations. "We will continue to oppose actions against Israel at the ICC as counterproductive to the cause of peace," it said. But the ICC begs to differ with the definition of a state for its purposes and refers to the territories as "Palestine." While a preliminary examination is not a formal investigation, it allows the court to review evidence and determine whether to investigate suspects on both sides. Prosecutor Fatou Bensouda said her office would "conduct its analysis in full independence and impartiality." The war between Israel and Hamas militants in Gaza last summer left more than 2,000 people dead. The inquiry will include alleged war crimes committed since June. The International Criminal Court was set up in 2002 to prosecute genocide, crimes against humanity and war crimes. CNN\'s Vasco Cotovio, Kareem Khadder and Faith Karimi contributed to this report.'
IRAN_ARTICLE = "(CNN)The United States and its negotiating partners reached a very strong framework agreement with Iran in Lausanne, Switzerland, on Thursday that limits Iran's nuclear program in such a way as to effectively block it from building a nuclear weapon. Expect pushback anyway, if the recent past is any harbinger. Just last month, in an attempt to head off such an agreement, House Speaker John Boehner invited Israeli Prime Minister Benjamin Netanyahu to preemptively blast it before Congress, and 47 senators sent a letter to the Iranian leadership warning them away from a deal. The debate that has already begun since the announcement of the new framework will likely result in more heat than light. It will not be helped by the gathering swirl of dubious assumptions and doubtful assertions. Let us address some of these: . The most misleading assertion, despite universal rejection by experts, is that the negotiations' objective at the outset was the total elimination of any nuclear program in Iran. That is the position of Netanyahu and his acolytes in the U.S. Congress. But that is not and never was the objective. If it had been, there would have been no Iranian team at the negotiating table. Rather, the objective has always been to structure an agreement or series of agreements so that Iran could not covertly develop a nuclear arsenal before the United States and its allies could respond. The new framework has exceeded expectations in achieving that goal. It would reduce Iran's low-enriched uranium stockpile, cut by two-thirds its number of installed centrifuges and implement a rigorous inspection regime. Another dubious assumption of opponents is that the Iranian nuclear program is a covert weapons program. Despite sharp accusations by some in the United States and its allies, Iran denies having such a program, and U.S. intelligence contends that Iran has not yet made the decision to build a nuclear weapon. Iran's continued cooperation with International Atomic Energy Agency inspections is further evidence on this point, and we'll know even more about Iran's program in the coming months and years because of the deal. In fact, the inspections provisions that are part of this agreement are designed to protect against any covert action by the Iranians. What's more, the rhetoric of some members of Congress has implied that the negotiations have been between only the United States and Iran (i.e., the 47 senators' letter warning that a deal might be killed by Congress or a future president). This of course is not the case. The talks were between Iran and the five permanent members of the U.N. Security Council (United States, United Kingdom, France, China and Russia) plus Germany, dubbed the P5+1. While the United States has played a leading role in the effort, it negotiated the terms alongside its partners. If the agreement reached by the P5+1 is rejected by Congress, it could result in an unraveling of the sanctions on Iran and threaten NATO cohesion in other areas. Another questionable assertion is that this agreement contains a sunset clause, after which Iran will be free to do as it pleases. Again, this is not the case. Some of the restrictions on Iran's nuclear activities, such as uranium enrichment, will be eased or eliminated over time, as long as 15 years. But most importantly, the framework agreement includes Iran's ratification of the Additional Protocol, which allows IAEA inspectors expanded access to nuclear sites both declared and nondeclared. This provision will be permanent. It does not sunset. Thus, going forward, if Iran decides to enrich uranium to weapons-grade levels, monitors will be able to detect such a move in a matter of days and alert the U.N. Security Council. Many in Congress have said that the agreement should be a formal treaty requiring the Senate to \"advise and consent.\" But the issue is not suited for a treaty. Treaties impose equivalent obligations on all signatories. For example, the New START treaty limits Russia and the United States to 1,550 deployed strategic warheads. But any agreement with Iran will not be so balanced. The restrictions and obligations in the final framework agreement will be imposed almost exclusively on Iran. The P5+1 are obligated only to ease and eventually remove most but not all economic sanctions, which were imposed as leverage to gain this final deal. Finally some insist that any agreement must address Iranian missile programs, human rights violations or support for Hamas or Hezbollah. As important as these issues are, and they must indeed be addressed, they are unrelated to the most important aim of a nuclear deal: preventing a nuclear Iran. To include them in the negotiations would be a poison pill. This agreement should be judged on its merits and on how it affects the security of our negotiating partners and allies, including Israel. Those judgments should be fact-based, not based on questionable assertions or dubious assumptions."
ARTICLE_SUBWAY = 'New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County, New York. A year later, she got married again in Westchester County, but to a different man and without divorcing her first husband. Only 18 days after that marriage, she got hitched yet again. Then, Barrientos declared "I do" five more times, sometimes only within two weeks of each other. In 2010, she married once more, this time in the Bronx. In an application for a marriage license, she stated it was her "first and only" marriage. Barrientos, now 39, is facing two criminal counts of "offering a false instrument for filing in the first degree," referring to her false statements on the 2010 marriage license application, according to court documents. Prosecutors said the marriages were part of an immigration scam. On Friday, she pleaded not guilty at State Supreme Court in the Bronx, according to her attorney, Christopher Wright, who declined to comment further. After leaving court, Barrientos was arrested and charged with theft of service and criminal trespass for allegedly sneaking into the New York subway through an emergency exit, said Detective Annette Markowski, a police spokeswoman. In total, Barrientos has been married 10 times, with nine of her marriages occurring between 1999 and 2002. All occurred either in Westchester County, Long Island, New Jersey or the Bronx. She is believed to still be married to four men, and at one time, she was married to eight men at once, prosecutors say. Prosecutors said the immigration scam involved some of her husbands, who filed for permanent residence status shortly after the marriages. Any divorces happened only after such filings were approved. It was unclear whether any of the men will be prosecuted. The case was referred to the Bronx District Attorney\'s Office by Immigration and Customs Enforcement and the Department of Homeland Security\'s Investigation Division. Seven of the men are from so-called "red-flagged" countries, including Egypt, Turkey, Georgia, Pakistan and Mali. Her eighth husband, Rashid Rajput, was deported in 2006 to his native Pakistan after an investigation by the Joint Terrorism Task Force. If convicted, Barrientos faces up to four years in prison. Her next court appearance is scheduled for May 18.'
expected_summaries = [
'prosecutor: "so far no videos were used in the crash investigation" two magazines claim to have found a cell phone video of the final seconds . "one can hear cries of \'My God\' in several languages," one magazine says .',
"the formal accession was marked by a ceremony at The Hague, in the Netherlands . the ICC opened a preliminary examination into the situation in the occupied Palestinian territory . as members of the court, Palestinians may be subject to counter-charges as well .",
"the u.s. and its negotiating partners reached a very strong framework agreement with Iran . aaron miller: the debate that has already begun since the announcement of the new framework will likely result in more heat than light . the deal would reduce Iran's low-enriched uranium stockpile, cut centrifuges and implement a rigorous inspection regime .",
'prosecutors say the marriages were part of an immigration scam . if convicted, barrientos faces two criminal counts of "offering a false instrument for filing in the first degree" she has been married 10 times, with nine of her marriages occurring between 1999 and 2002 .',
]
task_specific_config = getattr(model.config, "task_specific_params", {})
summarization_config = task_specific_config.get("summarization", {})
model.config.update(summarization_config)
dct = tok(
[model.config.prefix + x for x in [FRANCE_ARTICLE, SHORTER_ARTICLE, IRAN_ARTICLE, ARTICLE_SUBWAY]],
max_length=512,
padding="max_length",
truncation=True,
return_tensors="tf",
)
self.assertEqual(512, dct["input_ids"].shape[1])
hypotheses_batch = model.generate(
input_ids=dct["input_ids"],
attention_mask=dct["attention_mask"],
num_beams=4,
length_penalty=2.0,
max_length=142,
min_length=56,
no_repeat_ngram_size=3,
do_sample=False,
early_stopping=True,
)
decoded = [
tok.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in hypotheses_batch
]
self.assertListEqual(
expected_summaries,
decoded,
)
@slow
def test_translation_en_to_de(self):
tok = T5Tokenizer.from_pretrained("t5-base")
model = self.model
task_specific_config = getattr(model.config, "task_specific_params", {})
translation_config = task_specific_config.get("translation_en_to_de", {})
self.model.config.update(translation_config)
original_input = '"Luigi often said to me that he never wanted the brothers to end up in court", she wrote.'
expected_translation = (
'"Luigi sagte mir oft, dass er nie wollte, dass die Brüder am Gericht sitzen", schrieb sie.'
)
input_ids = tok.encode(model.config.prefix + original_input, return_tensors="tf")
output = model.generate(
input_ids=input_ids,
num_beams=4,
length_penalty=2.0,
max_length=50,
no_repeat_ngram_size=3,
do_sample=False,
early_stopping=True,
)
translation = tok.decode(output[0], skip_special_tokens=True, clean_up_tokenization_spaces=False)
self.assertEqual(translation, expected_translation)
@slow
def test_translation_en_to_fr(self):
model = self.model
tok = T5Tokenizer.from_pretrained("t5-base")
task_specific_config = getattr(model.config, "task_specific_params", {})
translation_config = task_specific_config.get("translation_en_to_fr", {})
model.config.update(translation_config)
en_text = ' This image section from an infrared recording by the Spitzer telescope shows a "family portrait" of countless generations of stars: the oldest stars are seen as blue dots. '
new_truncated_translation = (
"Cette section d'images provenant de l'enregistrement infrarouge effectué par le télescope Spitzer montre "
"un "
"« portrait familial » de générations innombrables d’étoiles : les plus anciennes sont observées "
"sous forme "
"de points bleus."
)
input_ids = tok(model.config.prefix + en_text, return_tensors="tf").input_ids
output = model.generate(
input_ids=input_ids,
num_beams=4,
length_penalty=2.0,
max_length=100,
no_repeat_ngram_size=3,
do_sample=False,
early_stopping=True,
)
translation = tok.decode(output[0], skip_special_tokens=True, clean_up_tokenization_spaces=False)
self.assertEqual(translation, new_truncated_translation)
@slow
def test_translation_en_to_ro(self):
model = self.model
tok = T5Tokenizer.from_pretrained("t5-base")
task_specific_config = getattr(model.config, "task_specific_params", {})
translation_config = task_specific_config.get("translation_en_to_ro", {})
model.config.update(translation_config)
original_input = "Taco Bell said it plans to add 2,000 locations in the US by 2022."
expected_translation = "Taco Bell a declarat că intenţionează să adauge 2 000 de locaţii în SUA până în 2022."
input_ids = tok.encode(model.config.prefix + original_input, return_tensors="tf")
output = model.generate(
input_ids=input_ids,
num_beams=4,
length_penalty=2.0,
max_length=50,
no_repeat_ngram_size=3,
do_sample=False,
early_stopping=True,
)
translation = tok.decode(output[0], skip_special_tokens=True, clean_up_tokenization_spaces=False)
self.assertEqual(translation, expected_translation)
def test_finetune_keras_trainer(self):
def _accuracy(y_true, y_pred):
return tf.keras.metrics.sparse_categorical_crossentropy(y_true[:, 0], y_pred[:, 0])
class FirstTokenAccuracy(tf.keras.metrics.MeanMetricWrapper):
def __init__(self, name="accuracy", **kwargs):
super().__init__(_accuracy, name=name, **kwargs)
model = self.model
model.compile("adam", metrics=FirstTokenAccuracy())
tokenizer = T5Tokenizer.from_pretrained("t5-small")
examples = [
("sentiment: Everything is awesome!", "positive"),
("sentiment: Tensorflow datasets are hard to use", "negative"),
]
inputs = dict(tokenizer([x[0] for x in examples], padding=True, return_tensors="tf"))
inputs["labels"] = tokenizer([x[1] for x in examples], return_tensors="tf").input_ids
model.fit(inputs)
m = model.evaluate(inputs)
self.assertEqual(len(m), 2)
| true | true |
f7397ceefd9a0bfecc0fb5077977ef0966957942 | 8,844 | py | Python | pyutils/iolib/audio.py | SantiagoJN/spatialaudiogen | 5092b8988731f9704914beb44c5688a819508ade | [
"Apache-2.0",
"BSD-2-Clause"
] | null | null | null | pyutils/iolib/audio.py | SantiagoJN/spatialaudiogen | 5092b8988731f9704914beb44c5688a819508ade | [
"Apache-2.0",
"BSD-2-Clause"
] | null | null | null | pyutils/iolib/audio.py | SantiagoJN/spatialaudiogen | 5092b8988731f9704914beb44c5688a819508ade | [
"Apache-2.0",
"BSD-2-Clause"
] | null | null | null | import os
import scipy.signal
import numpy as np
from soundfile import SoundFile
from pyutils.iolib.video import getFFprobeMeta
from pyutils.cmd import runSystemCMD
# from scikits.audiolab import Sndfile, Format
import tempfile
import resampy
# import librosa
def load_wav(fname, rate=None):
# fp = Sndfile(fname, 'r')
fp = SoundFile(fname, 'r')
#_signal = fp.read_frames(fp.nframes)
_signal = fp.buffer_read(dtype="int32")
_signal = np.asarray(_signal).reshape((-1, fp.channels))
_rate = fp.samplerate
if _signal.ndim == 1:
_signal.reshape((-1, 1))
if rate is not None and rate != _rate:
# _num_frames = _signal.shape[0]
# _duration = _num_frames / float(_rate)
# signal = scipy.signal.resample(_signal, int(rate * _duration))
signal = resampy.resample(_signal, _rate, rate, axis=0, filter='kaiser_fast')
else:
signal = _signal
rate = _rate
return signal, rate
def save_wav(fname, signal, rate):
fp = SoundFile(fname, 'w', rate, signal.shape[1])
#fp.write(fname, signal, rate)
#print(f'########################fp: {fp}')
fp.write(signal)
# with SoundFile(fname, 'w', rate, signal.shape[1], 'PCM_24') as f:
# f.write(signal)
#fp.close()
# Intento 3
# y, sr = librosa.load(librosa.util.example_audio_file(), duration=5.0)
# librosa.output.write_wav(fname, signal, rate)
# fp = SoundFile(fname, 'w', rate, signal.shape[1])
# # d, sr = fp.read()
# fp.write(signal)
# Intento 4
def convert2wav(inp_fn, out_fn, rate=None):
cmd = ['ffmpeg', '-y',
'-i', inp_fn,
'-map', '0:a',
'-acodec', 'pcm_s16le']
if rate is not None:
cmd += ['-ar', str(rate),]
cmd += [out_fn]
stdout, stderr = runSystemCMD(' '.join(cmd))
if any([l.startswith('Output file is empty,')
for l in stderr.split('\n')]):
raise (ValueError, 'Output file is empty.\n' + stderr)
class AudioReader:
def __init__(self, fn, rate=None, pad_start=0, seek=None, duration=None, rotation=None):
fp = Sndfile(fn, 'r') if fn.endswith('.wav') else None
if fp is None or (rate is not None and fp.samplerate != rate):
# Convert to wav file
if not os.path.isdir('c:/Users/santy/OneDrive/Escritorio/Compartida/spatialaudiogen-/tmp/'):
os.makedirs('c:/Users/santy/OneDrive/Escritorio/Compartida/spatialaudiogen-/tmp/')
snd_file = tempfile.NamedTemporaryFile('w', prefix='c:/Users/santy/OneDrive/Escritorio/Compartida/spatialaudiogen-/tmp/', suffix='.wav', delete=False)
snd_file.close()
convert2wav(fn, snd_file.name, rate)
self.snd_fn = snd_file.name
self.rm_flag = True
else:
self.snd_fn = fn
self.rm_flag = False
self.fp = Sndfile(self.snd_fn, 'r')
self.num_channels = self.fp.channels
self.rate = self.fp.samplerate
self.num_frames = self.fp.nframes
self.duration = self.num_frames / float(self.rate)
self.k = 0
self.pad = pad_start
if seek is not None and seek > 0:
num_frames = int(seek * self.rate)
self.fp.read_frames(num_frames)
else:
seek = 0
if duration is not None:
self.duration = min(duration, self.duration-seek)
self.num_frames = int(self.duration * self.rate)
if rotation is not None:
assert self.num_channels > 2 # Spatial audio
assert -np.pi <= rotation < np.pi
c = np.cos(rotation)
s = np.sin(rotation)
rot_mtx = np.array([[1, 0, 0, 0], # W' = W
[0, c, 0, s], # Y' = X sin + Y cos
[0, 0, 1, 0], # Z' = Z
[0, -s, 0, c]]) # X' = X cos - Y sin
self.rot_mtx = rot_mtx
else:
self.rot_mtx = None
def __del__(self):
if self.rm_flag:
os.remove(self.snd_fn)
def get_chunk(self, n=1, force_size=False):
if self.k >= self.num_frames:
return None
frames_left = self.num_frames - self.k
if force_size and n > frames_left:
return None
# Pad zeros to start
if self.pad > 0:
pad_size = min(n, self.pad)
pad_chunk = np.zeros((pad_size, self.num_channels))
n -= pad_size
self.pad -= pad_size
else:
pad_chunk = None
# Read frames
chunk_size = min(n, frames_left)
chunk = self.fp.read_frames(chunk_size)
chunk = chunk.reshape((chunk.shape[0], self.num_channels))
self.k += chunk_size
if pad_chunk is not None:
chunk = np.concatenate((pad_chunk.astype(chunk.dtype), chunk), 0)
if self.rot_mtx is not None:
chunk = np.dot(chunk, self.rot_mtx.T)
return chunk
def loop_chunks(self, n=1, force_size=False):
while True:
chunk = self.get_chunk(n, force_size=False)
if chunk is None:
break
yield chunk
class AudioReader2:
def __init__(self, audio_folder, rate=None,
seek=0, duration=None, rotation=None):
self.audio_folder = audio_folder
fns = os.listdir(audio_folder)
self.num_files = len(fns)
# fp = Sndfile(os.path.join(self.audio_folder, fns[0]), 'r')
fp = SoundFile(os.path.join(self.audio_folder, fns[0]), 'r')
data, fps = load_wav(os.path.join(self.audio_folder, fns[0]))
self.rate = float(fp.samplerate) if rate is not None else fps
self.num_channels = fp.channels
self.duration = self.num_files
self.num_frames = int(self.duration * rate)
self.cur_frame = int(seek * self.rate)
self.time = self.cur_frame / self.rate
self.max_time = self.duration
if duration is not None:
self.max_time = min(seek + duration, self.max_time)
if rotation is not None:
assert self.num_channels > 2 # Spatial audio
assert -np.pi <= rotation < np.pi
c = np.cos(rotation)
s = np.sin(rotation)
rot_mtx = np.array([[1, 0, 0, 0], # W' = W
[0, c, 0, s], # Y' = X sin + Y cos
[0, 0, 1, 0], # Z' = Z
[0, -s, 0, c]]) # X' = X cos - Y sin
self.rot_mtx = rot_mtx
else:
self.rot_mtx = None
def get(self, start_time, size):
index = range(int(start_time), int(start_time + size / self.rate) + 1)
fns = [os.path.join(self.audio_folder, '{:06d}.wav'.format(i))
for i in index]
chunk = []
for fn in fns:
if not os.path.exists(fn):
return None
data, _ = load_wav(fn, self.rate)
chunk.append(data)
chunk = np.concatenate(chunk, 0) if len(chunk) > 1 else chunk[0]
ss = int((start_time - int(start_time)) * self.rate)
chunk = chunk[ss:ss+size, :]
return chunk
def get_chunk(self, n=1, force_size=False):
if self.time >= self.max_time:
return None
frames_left = int((self.max_time - self.time) * self.rate)
if force_size and n > frames_left:
return None
# Read frames
chunk_size = min(n, frames_left)
start_time = self.cur_frame / self.rate
end_frame_no = self.cur_frame + chunk_size - 1
end_time = end_frame_no / self.rate
index = range(int(start_time), int(end_time) + 1)
fns = [os.path.join(self.audio_folder, '{:06d}.wav'.format(i))
for i in index]
chunk = []
for fn in fns:
data, _ = load_wav(fn, self.rate)
chunk.append(data)
chunk = np.concatenate(chunk, 0) if len(chunk) > 1 else chunk[0]
ss = int((self.time - int(self.time)) * self.rate)
chunk = chunk[ss:ss+chunk_size, :]
self.cur_frame += chunk.shape[0]
self.time = self.cur_frame / self.rate
if self.rot_mtx is not None:
chunk = np.dot(chunk, self.rot_mtx.T)
return chunk
def loop_chunks(self, n=1, force_size=False):
while True:
chunk = self.get_chunk(n, force_size=False)
if chunk is None:
break
yield chunk
def test_audio_reader():
reader = AudioReader2('/gpu2_data/morgado/spatialaudiogen/youtube/train/687gkvLi5kI/ambix',
rate=10000, seek=0, duration=5.5)
for s in reader.loop_chunks(10000):
print(s.shape), s.max(), s.min()
# test_audio_reader()
| 33.885057 | 162 | 0.558005 | import os
import scipy.signal
import numpy as np
from soundfile import SoundFile
from pyutils.iolib.video import getFFprobeMeta
from pyutils.cmd import runSystemCMD
import tempfile
import resampy
def load_wav(fname, rate=None):
fp = SoundFile(fname, 'r')
_signal = fp.buffer_read(dtype="int32")
_signal = np.asarray(_signal).reshape((-1, fp.channels))
_rate = fp.samplerate
if _signal.ndim == 1:
_signal.reshape((-1, 1))
if rate is not None and rate != _rate:
signal = resampy.resample(_signal, _rate, rate, axis=0, filter='kaiser_fast')
else:
signal = _signal
rate = _rate
return signal, rate
def save_wav(fname, signal, rate):
fp = SoundFile(fname, 'w', rate, signal.shape[1])
fp.write(signal)
def convert2wav(inp_fn, out_fn, rate=None):
cmd = ['ffmpeg', '-y',
'-i', inp_fn,
'-map', '0:a',
'-acodec', 'pcm_s16le']
if rate is not None:
cmd += ['-ar', str(rate),]
cmd += [out_fn]
stdout, stderr = runSystemCMD(' '.join(cmd))
if any([l.startswith('Output file is empty,')
for l in stderr.split('\n')]):
raise (ValueError, 'Output file is empty.\n' + stderr)
class AudioReader:
def __init__(self, fn, rate=None, pad_start=0, seek=None, duration=None, rotation=None):
fp = Sndfile(fn, 'r') if fn.endswith('.wav') else None
if fp is None or (rate is not None and fp.samplerate != rate):
if not os.path.isdir('c:/Users/santy/OneDrive/Escritorio/Compartida/spatialaudiogen-/tmp/'):
os.makedirs('c:/Users/santy/OneDrive/Escritorio/Compartida/spatialaudiogen-/tmp/')
snd_file = tempfile.NamedTemporaryFile('w', prefix='c:/Users/santy/OneDrive/Escritorio/Compartida/spatialaudiogen-/tmp/', suffix='.wav', delete=False)
snd_file.close()
convert2wav(fn, snd_file.name, rate)
self.snd_fn = snd_file.name
self.rm_flag = True
else:
self.snd_fn = fn
self.rm_flag = False
self.fp = Sndfile(self.snd_fn, 'r')
self.num_channels = self.fp.channels
self.rate = self.fp.samplerate
self.num_frames = self.fp.nframes
self.duration = self.num_frames / float(self.rate)
self.k = 0
self.pad = pad_start
if seek is not None and seek > 0:
num_frames = int(seek * self.rate)
self.fp.read_frames(num_frames)
else:
seek = 0
if duration is not None:
self.duration = min(duration, self.duration-seek)
self.num_frames = int(self.duration * self.rate)
if rotation is not None:
assert self.num_channels > 2
assert -np.pi <= rotation < np.pi
c = np.cos(rotation)
s = np.sin(rotation)
rot_mtx = np.array([[1, 0, 0, 0],
[0, c, 0, s], # Y' = X sin + Y cos
[0, 0, 1, 0],
[0, -s, 0, c]]) # X' = X cos - Y sin
self.rot_mtx = rot_mtx
else:
self.rot_mtx = None
def __del__(self):
if self.rm_flag:
os.remove(self.snd_fn)
def get_chunk(self, n=1, force_size=False):
if self.k >= self.num_frames:
return None
frames_left = self.num_frames - self.k
if force_size and n > frames_left:
return None
if self.pad > 0:
pad_size = min(n, self.pad)
pad_chunk = np.zeros((pad_size, self.num_channels))
n -= pad_size
self.pad -= pad_size
else:
pad_chunk = None
chunk_size = min(n, frames_left)
chunk = self.fp.read_frames(chunk_size)
chunk = chunk.reshape((chunk.shape[0], self.num_channels))
self.k += chunk_size
if pad_chunk is not None:
chunk = np.concatenate((pad_chunk.astype(chunk.dtype), chunk), 0)
if self.rot_mtx is not None:
chunk = np.dot(chunk, self.rot_mtx.T)
return chunk
def loop_chunks(self, n=1, force_size=False):
while True:
chunk = self.get_chunk(n, force_size=False)
if chunk is None:
break
yield chunk
class AudioReader2:
def __init__(self, audio_folder, rate=None,
seek=0, duration=None, rotation=None):
self.audio_folder = audio_folder
fns = os.listdir(audio_folder)
self.num_files = len(fns)
fp = SoundFile(os.path.join(self.audio_folder, fns[0]), 'r')
data, fps = load_wav(os.path.join(self.audio_folder, fns[0]))
self.rate = float(fp.samplerate) if rate is not None else fps
self.num_channels = fp.channels
self.duration = self.num_files
self.num_frames = int(self.duration * rate)
self.cur_frame = int(seek * self.rate)
self.time = self.cur_frame / self.rate
self.max_time = self.duration
if duration is not None:
self.max_time = min(seek + duration, self.max_time)
if rotation is not None:
assert self.num_channels > 2
assert -np.pi <= rotation < np.pi
c = np.cos(rotation)
s = np.sin(rotation)
rot_mtx = np.array([[1, 0, 0, 0],
[0, c, 0, s], # Y' = X sin + Y cos
[0, 0, 1, 0],
[0, -s, 0, c]]) # X' = X cos - Y sin
self.rot_mtx = rot_mtx
else:
self.rot_mtx = None
def get(self, start_time, size):
index = range(int(start_time), int(start_time + size / self.rate) + 1)
fns = [os.path.join(self.audio_folder, '{:06d}.wav'.format(i))
for i in index]
chunk = []
for fn in fns:
if not os.path.exists(fn):
return None
data, _ = load_wav(fn, self.rate)
chunk.append(data)
chunk = np.concatenate(chunk, 0) if len(chunk) > 1 else chunk[0]
ss = int((start_time - int(start_time)) * self.rate)
chunk = chunk[ss:ss+size, :]
return chunk
def get_chunk(self, n=1, force_size=False):
if self.time >= self.max_time:
return None
frames_left = int((self.max_time - self.time) * self.rate)
if force_size and n > frames_left:
return None
chunk_size = min(n, frames_left)
start_time = self.cur_frame / self.rate
end_frame_no = self.cur_frame + chunk_size - 1
end_time = end_frame_no / self.rate
index = range(int(start_time), int(end_time) + 1)
fns = [os.path.join(self.audio_folder, '{:06d}.wav'.format(i))
for i in index]
chunk = []
for fn in fns:
data, _ = load_wav(fn, self.rate)
chunk.append(data)
chunk = np.concatenate(chunk, 0) if len(chunk) > 1 else chunk[0]
ss = int((self.time - int(self.time)) * self.rate)
chunk = chunk[ss:ss+chunk_size, :]
self.cur_frame += chunk.shape[0]
self.time = self.cur_frame / self.rate
if self.rot_mtx is not None:
chunk = np.dot(chunk, self.rot_mtx.T)
return chunk
def loop_chunks(self, n=1, force_size=False):
while True:
chunk = self.get_chunk(n, force_size=False)
if chunk is None:
break
yield chunk
def test_audio_reader():
reader = AudioReader2('/gpu2_data/morgado/spatialaudiogen/youtube/train/687gkvLi5kI/ambix',
rate=10000, seek=0, duration=5.5)
for s in reader.loop_chunks(10000):
print(s.shape), s.max(), s.min()
| true | true |
f7397d6d8749cf208e8db8144b75bcfd12aa7be3 | 46 | py | Python | payeer_api/__init__.py | MrGreen0/payeer_api | ccac0d91f5c33718a98b0803740e1ee4577d8da2 | [
"MIT"
] | 15 | 2018-05-17T09:52:58.000Z | 2022-03-26T01:25:24.000Z | payeer_api/__init__.py | naprsa/payeer_api | 247fe588dd5552cce93e41cc922f4f85a338f8d2 | [
"MIT"
] | 4 | 2020-03-08T01:56:27.000Z | 2022-03-26T07:23:01.000Z | payeer_api/__init__.py | naprsa/payeer_api | 247fe588dd5552cce93e41cc922f4f85a338f8d2 | [
"MIT"
] | 11 | 2018-04-01T13:42:34.000Z | 2022-03-26T01:24:25.000Z | from .api import PayeerAPI, PayeerAPIException | 46 | 46 | 0.869565 | from .api import PayeerAPI, PayeerAPIException | true | true |
f7397e75fe3089c6eeef45bde9626ad365d0d2ff | 1,289 | py | Python | test/functional/p2p_mempool.py | thehomosapien/AMLBitcoin | f097ca52c2e8039761f1927d83a9fe0b4c355b1c | [
"MIT"
] | null | null | null | test/functional/p2p_mempool.py | thehomosapien/AMLBitcoin | f097ca52c2e8039761f1927d83a9fe0b4c355b1c | [
"MIT"
] | null | null | null | test/functional/p2p_mempool.py | thehomosapien/AMLBitcoin | f097ca52c2e8039761f1927d83a9fe0b4c355b1c | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Copyright (c) 2017 The AmlBitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test p2p mempool message.
Test that nodes are disconnected if they send mempool messages when bloom
filters are not enabled.
"""
from test_framework.mininode import *
from test_framework.test_framework import AmlBitcoinTestFramework
from test_framework.util import *
class P2PMempoolTests(AmlBitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [["-peerbloomfilters=0"]]
def run_test(self):
#connect a mininode
aTestNode = NodeConnCB()
node = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], aTestNode)
aTestNode.add_connection(node)
NetworkThread().start()
aTestNode.wait_for_verack()
#request mempool
aTestNode.send_message(msg_mempool())
aTestNode.wait_for_disconnect()
#mininode must be disconnected at this point
assert_equal(len(self.nodes[0].getpeerinfo()), 0)
if __name__ == '__main__':
P2PMempoolTests().main()
| 33.051282 | 75 | 0.711404 |
from test_framework.mininode import *
from test_framework.test_framework import AmlBitcoinTestFramework
from test_framework.util import *
class P2PMempoolTests(AmlBitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [["-peerbloomfilters=0"]]
def run_test(self):
aTestNode = NodeConnCB()
node = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], aTestNode)
aTestNode.add_connection(node)
NetworkThread().start()
aTestNode.wait_for_verack()
aTestNode.send_message(msg_mempool())
aTestNode.wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 0)
if __name__ == '__main__':
P2PMempoolTests().main()
| true | true |
f7397f5d7807ad22c78ac3a4d71ca0baada27b94 | 235,533 | py | Python | tools/cpplint.py | weizhenwei/OpenRTP | 7254c61064b2ed5d0023c688daa1c9fe768c3cb0 | [
"BSD-3-Clause"
] | 13 | 2016-10-03T12:18:13.000Z | 2021-06-29T06:09:02.000Z | tools/cpplint.py | weizhenwei/OpenRTP | 7254c61064b2ed5d0023c688daa1c9fe768c3cb0 | [
"BSD-3-Clause"
] | null | null | null | tools/cpplint.py | weizhenwei/OpenRTP | 7254c61064b2ed5d0023c688daa1c9fe768c3cb0 | [
"BSD-3-Clause"
] | 6 | 2017-04-10T02:46:25.000Z | 2021-11-30T06:13:57.000Z | #!/usr/bin/python
#
# Copyright (c) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Does google-lint on c++ files.
The goal of this script is to identify places in the code that *may*
be in non-compliance with google style. It does not attempt to fix
up these problems -- the point is to educate. It does also not
attempt to find all problems, or to ensure that everything it does
find is legitimately a problem.
In particular, we can get very confused by /* and // inside strings!
We do a small hack, which is to ignore //'s with "'s after them on the
same line, but it is far from perfect (in either direction).
"""
import codecs
import copy
import getopt
import math # for log
import os
import re
import sre_compile
import string
import sys
import unicodedata
_USAGE = """
Syntax: cpplint.py [--verbose=#] [--output=vs7] [--filter=-x,+y,...]
[--counting=total|toplevel|detailed] [--root=subdir]
[--linelength=digits]
<file> [file] ...
The style guidelines this tries to follow are those in
http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml
Every problem is given a confidence score from 1-5, with 5 meaning we are
certain of the problem, and 1 meaning it could be a legitimate construct.
This will miss some errors, and is not a substitute for a code review.
To suppress false-positive errors of a certain category, add a
'NOLINT(category)' comment to the line. NOLINT or NOLINT(*)
suppresses errors of all categories on that line.
The files passed in will be linted; at least one file must be provided.
Default linted extensions are .cc, .cpp, .cu, .cuh and .h. Change the
extensions with the --extensions flag.
Flags:
output=vs7
By default, the output is formatted to ease emacs parsing. Visual Studio
compatible output (vs7) may also be used. Other formats are unsupported.
verbose=#
Specify a number 0-5 to restrict errors to certain verbosity levels.
filter=-x,+y,...
Specify a comma-separated list of category-filters to apply: only
error messages whose category names pass the filters will be printed.
(Category names are printed with the message and look like
"[whitespace/indent]".) Filters are evaluated left to right.
"-FOO" and "FOO" means "do not print categories that start with FOO".
"+FOO" means "do print categories that start with FOO".
Examples: --filter=-whitespace,+whitespace/braces
--filter=whitespace,runtime/printf,+runtime/printf_format
--filter=-,+build/include_what_you_use
To see a list of all the categories used in cpplint, pass no arg:
--filter=
counting=total|toplevel|detailed
The total number of errors found is always printed. If
'toplevel' is provided, then the count of errors in each of
the top-level categories like 'build' and 'whitespace' will
also be printed. If 'detailed' is provided, then a count
is provided for each category like 'build/class'.
root=subdir
The root directory used for deriving header guard CPP variable.
By default, the header guard CPP variable is calculated as the relative
path to the directory that contains .git, .hg, or .svn. When this flag
is specified, the relative path is calculated from the specified
directory. If the specified directory does not exist, this flag is
ignored.
Examples:
Assuming that src/.git exists, the header guard CPP variables for
src/chrome/browser/ui/browser.h are:
No flag => CHROME_BROWSER_UI_BROWSER_H_
--root=chrome => BROWSER_UI_BROWSER_H_
--root=chrome/browser => UI_BROWSER_H_
linelength=digits
This is the allowed line length for the project. The default value is
80 characters.
Examples:
--linelength=120
extensions=extension,extension,...
The allowed file extensions that cpplint will check
Examples:
--extensions=hpp,cpp
cpplint.py supports per-directory configurations specified in CPPLINT.cfg
files. CPPLINT.cfg file can contain a number of key=value pairs.
Currently the following options are supported:
set noparent
filter=+filter1,-filter2,...
exclude_files=regex
linelength=80
"set noparent" option prevents cpplint from traversing directory tree
upwards looking for more .cfg files in parent directories. This option
is usually placed in the top-level project directory.
The "filter" option is similar in function to --filter flag. It specifies
message filters in addition to the |_DEFAULT_FILTERS| and those specified
through --filter command-line flag.
"exclude_files" allows to specify a regular expression to be matched against
a file name. If the expression matches, the file is skipped and not run
through liner.
"linelength" allows to specify the allowed line length for the project.
CPPLINT.cfg has an effect on files in the same directory and all
sub-directories, unless overridden by a nested configuration file.
Example file:
filter=-build/include_order,+build/include_alpha
exclude_files=.*\.cc
The above example disables build/include_order warning and enables
build/include_alpha as well as excludes all .cc from being
processed by linter, in the current directory (where the .cfg
file is located) and all sub-directories.
"""
# We categorize each error message we print. Here are the categories.
# We want an explicit list so we can list them all in cpplint --filter=.
# If you add a new error message with a new category, add it to the list
# here! cpplint_unittest.py should tell you if you forget to do this.
_ERROR_CATEGORIES = [
'build/class',
'build/c++11',
'build/deprecated',
'build/endif_comment',
'build/explicit_make_pair',
'build/forward_decl',
'build/header_guard',
'build/include',
'build/include_alpha',
'build/include_order',
'build/include_what_you_use',
'build/namespaces',
'build/printf_format',
'build/storage_class',
'legal/copyright',
'readability/alt_tokens',
'readability/braces',
'readability/casting',
'readability/check',
'readability/constructors',
'readability/fn_size',
'readability/function',
'readability/inheritance',
'readability/multiline_comment',
'readability/multiline_string',
'readability/namespace',
'readability/nolint',
'readability/nul',
'readability/streams',
'readability/todo',
'readability/utf8',
'runtime/arrays',
'runtime/casting',
'runtime/explicit',
'runtime/int',
'runtime/init',
'runtime/invalid_increment',
'runtime/member_string_references',
'runtime/memset',
'runtime/indentation_namespace',
'runtime/operator',
'runtime/printf',
'runtime/printf_format',
'runtime/references',
'runtime/string',
'runtime/threadsafe_fn',
'runtime/vlog',
'whitespace/blank_line',
'whitespace/braces',
'whitespace/comma',
'whitespace/comments',
'whitespace/empty_conditional_body',
'whitespace/empty_loop_body',
'whitespace/end_of_line',
'whitespace/ending_newline',
'whitespace/forcolon',
'whitespace/indent',
'whitespace/line_length',
'whitespace/newline',
'whitespace/operators',
'whitespace/parens',
'whitespace/semicolon',
'whitespace/tab',
'whitespace/todo'
]
# The default state of the category filter. This is overridden by the --filter=
# flag. By default all errors are on, so only add here categories that should be
# off by default (i.e., categories that must be enabled by the --filter= flags).
# All entries here should start with a '-' or '+', as in the --filter= flag.
_DEFAULT_FILTERS = ['-build/include_alpha']
# We used to check for high-bit characters, but after much discussion we
# decided those were OK, as long as they were in UTF-8 and didn't represent
# hard-coded international strings, which belong in a separate i18n file.
# C++ headers
_CPP_HEADERS = frozenset([
# Legacy
'algobase.h',
'algo.h',
'alloc.h',
'builtinbuf.h',
'bvector.h',
'complex.h',
'defalloc.h',
'deque.h',
'editbuf.h',
'fstream.h',
'function.h',
'hash_map',
'hash_map.h',
'hash_set',
'hash_set.h',
'hashtable.h',
'heap.h',
'indstream.h',
'iomanip.h',
'iostream.h',
'istream.h',
'iterator.h',
'list.h',
'map.h',
'multimap.h',
'multiset.h',
'ostream.h',
'pair.h',
'parsestream.h',
'pfstream.h',
'procbuf.h',
'pthread_alloc',
'pthread_alloc.h',
'rope',
'rope.h',
'ropeimpl.h',
'set.h',
'slist',
'slist.h',
'stack.h',
'stdiostream.h',
'stl_alloc.h',
'stl_relops.h',
'streambuf.h',
'stream.h',
'strfile.h',
'strstream.h',
'tempbuf.h',
'tree.h',
'type_traits.h',
'vector.h',
# 17.6.1.2 C++ library headers
'algorithm',
'array',
'atomic',
'bitset',
'chrono',
'codecvt',
'complex',
'condition_variable',
'deque',
'exception',
'forward_list',
'fstream',
'functional',
'future',
'initializer_list',
'iomanip',
'ios',
'iosfwd',
'iostream',
'istream',
'iterator',
'limits',
'list',
'locale',
'map',
'memory',
'mutex',
'new',
'numeric',
'ostream',
'queue',
'random',
'ratio',
'regex',
'set',
'sstream',
'stack',
'stdexcept',
'streambuf',
'string',
'strstream',
'system_error',
'thread',
'tuple',
'typeindex',
'typeinfo',
'type_traits',
'unordered_map',
'unordered_set',
'utility',
'valarray',
'vector',
# 17.6.1.2 C++ headers for C library facilities
'cassert',
'ccomplex',
'cctype',
'cerrno',
'cfenv',
'cfloat',
'cinttypes',
'ciso646',
'climits',
'clocale',
'cmath',
'csetjmp',
'csignal',
'cstdalign',
'cstdarg',
'cstdbool',
'cstddef',
'cstdint',
'cstdio',
'cstdlib',
'cstring',
'ctgmath',
'ctime',
'cuchar',
'cwchar',
'cwctype',
])
# These headers are excluded from [build/include] and [build/include_order]
# checks:
# - Anything not following google file name conventions (containing an
# uppercase character, such as Python.h or nsStringAPI.h, for example).
# - Lua headers.
_THIRD_PARTY_HEADERS_PATTERN = re.compile(
r'^(?:[^/]*[A-Z][^/]*\.h|lua\.h|lauxlib\.h|lualib\.h)$')
# Assertion macros. These are defined in base/logging.h and
# testing/base/gunit.h. Note that the _M versions need to come first
# for substring matching to work.
_CHECK_MACROS = [
'DCHECK', 'CHECK',
'EXPECT_TRUE_M', 'EXPECT_TRUE',
'ASSERT_TRUE_M', 'ASSERT_TRUE',
'EXPECT_FALSE_M', 'EXPECT_FALSE',
'ASSERT_FALSE_M', 'ASSERT_FALSE',
]
# Replacement macros for CHECK/DCHECK/EXPECT_TRUE/EXPECT_FALSE
_CHECK_REPLACEMENT = dict([(m, {}) for m in _CHECK_MACROS])
for op, replacement in [('==', 'EQ'), ('!=', 'NE'),
('>=', 'GE'), ('>', 'GT'),
('<=', 'LE'), ('<', 'LT')]:
_CHECK_REPLACEMENT['DCHECK'][op] = 'DCHECK_%s' % replacement
_CHECK_REPLACEMENT['CHECK'][op] = 'CHECK_%s' % replacement
_CHECK_REPLACEMENT['EXPECT_TRUE'][op] = 'EXPECT_%s' % replacement
_CHECK_REPLACEMENT['ASSERT_TRUE'][op] = 'ASSERT_%s' % replacement
_CHECK_REPLACEMENT['EXPECT_TRUE_M'][op] = 'EXPECT_%s_M' % replacement
_CHECK_REPLACEMENT['ASSERT_TRUE_M'][op] = 'ASSERT_%s_M' % replacement
for op, inv_replacement in [('==', 'NE'), ('!=', 'EQ'),
('>=', 'LT'), ('>', 'LE'),
('<=', 'GT'), ('<', 'GE')]:
_CHECK_REPLACEMENT['EXPECT_FALSE'][op] = 'EXPECT_%s' % inv_replacement
_CHECK_REPLACEMENT['ASSERT_FALSE'][op] = 'ASSERT_%s' % inv_replacement
_CHECK_REPLACEMENT['EXPECT_FALSE_M'][op] = 'EXPECT_%s_M' % inv_replacement
_CHECK_REPLACEMENT['ASSERT_FALSE_M'][op] = 'ASSERT_%s_M' % inv_replacement
# Alternative tokens and their replacements. For full list, see section 2.5
# Alternative tokens [lex.digraph] in the C++ standard.
#
# Digraphs (such as '%:') are not included here since it's a mess to
# match those on a word boundary.
_ALT_TOKEN_REPLACEMENT = {
'and': '&&',
'bitor': '|',
'or': '||',
'xor': '^',
'compl': '~',
'bitand': '&',
'and_eq': '&=',
'or_eq': '|=',
'xor_eq': '^=',
'not': '!',
'not_eq': '!='
}
# Compile regular expression that matches all the above keywords. The "[ =()]"
# bit is meant to avoid matching these keywords outside of boolean expressions.
#
# False positives include C-style multi-line comments and multi-line strings
# but those have always been troublesome for cpplint.
_ALT_TOKEN_REPLACEMENT_PATTERN = re.compile(
r'[ =()](' + ('|'.join(_ALT_TOKEN_REPLACEMENT.keys())) + r')(?=[ (]|$)')
# These constants define types of headers for use with
# _IncludeState.CheckNextIncludeOrder().
_C_SYS_HEADER = 1
_CPP_SYS_HEADER = 2
_LIKELY_MY_HEADER = 3
_POSSIBLE_MY_HEADER = 4
_OTHER_HEADER = 5
# These constants define the current inline assembly state
_NO_ASM = 0 # Outside of inline assembly block
_INSIDE_ASM = 1 # Inside inline assembly block
_END_ASM = 2 # Last line of inline assembly block
_BLOCK_ASM = 3 # The whole block is an inline assembly block
# Match start of assembly blocks
_MATCH_ASM = re.compile(r'^\s*(?:asm|_asm|__asm|__asm__)'
r'(?:\s+(volatile|__volatile__))?'
r'\s*[{(]')
_regexp_compile_cache = {}
# {str, set(int)}: a map from error categories to sets of linenumbers
# on which those errors are expected and should be suppressed.
_error_suppressions = {}
# The root directory used for deriving header guard CPP variable.
# This is set by --root flag.
_root = None
# The allowed line length of files.
# This is set by --linelength flag.
_line_length = 80
# The allowed extensions for file names
# This is set by --extensions flag.
_valid_extensions = set(['c', 'cc', 'h', 'cpp', 'cu', 'cuh', "m", "mm"])
def ParseNolintSuppressions(filename, raw_line, linenum, error):
"""Updates the global list of error-suppressions.
Parses any NOLINT comments on the current line, updating the global
error_suppressions store. Reports an error if the NOLINT comment
was malformed.
Args:
filename: str, the name of the input file.
raw_line: str, the line of input text, with comments.
linenum: int, the number of the current line.
error: function, an error handler.
"""
matched = Search(r'\bNOLINT(NEXTLINE)?\b(\([^)]+\))?', raw_line)
if matched:
if matched.group(1):
suppressed_line = linenum + 1
else:
suppressed_line = linenum
category = matched.group(2)
if category in (None, '(*)'): # => "suppress all"
_error_suppressions.setdefault(None, set()).add(suppressed_line)
else:
if category.startswith('(') and category.endswith(')'):
category = category[1:-1]
if category in _ERROR_CATEGORIES:
_error_suppressions.setdefault(category, set()).add(suppressed_line)
else:
error(filename, linenum, 'readability/nolint', 5,
'Unknown NOLINT error category: %s' % category)
def ResetNolintSuppressions():
"""Resets the set of NOLINT suppressions to empty."""
_error_suppressions.clear()
def IsErrorSuppressedByNolint(category, linenum):
"""Returns true if the specified error category is suppressed on this line.
Consults the global error_suppressions map populated by
ParseNolintSuppressions/ResetNolintSuppressions.
Args:
category: str, the category of the error.
linenum: int, the current line number.
Returns:
bool, True iff the error should be suppressed due to a NOLINT comment.
"""
return (linenum in _error_suppressions.get(category, set()) or
linenum in _error_suppressions.get(None, set()))
def Match(pattern, s):
"""Matches the string with the pattern, caching the compiled regexp."""
# The regexp compilation caching is inlined in both Match and Search for
# performance reasons; factoring it out into a separate function turns out
# to be noticeably expensive.
if pattern not in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].match(s)
def ReplaceAll(pattern, rep, s):
"""Replaces instances of pattern in a string with a replacement.
The compiled regex is kept in a cache shared by Match and Search.
Args:
pattern: regex pattern
rep: replacement text
s: search string
Returns:
string with replacements made (or original string if no replacements)
"""
if pattern not in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].sub(rep, s)
def Search(pattern, s):
"""Searches the string for the pattern, caching the compiled regexp."""
if pattern not in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].search(s)
class _IncludeState(object):
"""Tracks line numbers for includes, and the order in which includes appear.
include_list contains list of lists of (header, line number) pairs.
It's a lists of lists rather than just one flat list to make it
easier to update across preprocessor boundaries.
Call CheckNextIncludeOrder() once for each header in the file, passing
in the type constants defined above. Calls in an illegal order will
raise an _IncludeError with an appropriate error message.
"""
# self._section will move monotonically through this set. If it ever
# needs to move backwards, CheckNextIncludeOrder will raise an error.
_INITIAL_SECTION = 0
_MY_H_SECTION = 1
_C_SECTION = 2
_CPP_SECTION = 3
_OTHER_H_SECTION = 4
_TYPE_NAMES = {
_C_SYS_HEADER: 'C system header',
_CPP_SYS_HEADER: 'C++ system header',
_LIKELY_MY_HEADER: 'header this file implements',
_POSSIBLE_MY_HEADER: 'header this file may implement',
_OTHER_HEADER: 'other header',
}
_SECTION_NAMES = {
_INITIAL_SECTION: "... nothing. (This can't be an error.)",
_MY_H_SECTION: 'a header this file implements',
_C_SECTION: 'C system header',
_CPP_SECTION: 'C++ system header',
_OTHER_H_SECTION: 'other header',
}
def __init__(self):
self.include_list = [[]]
self.ResetSection('')
def FindHeader(self, header):
"""Check if a header has already been included.
Args:
header: header to check.
Returns:
Line number of previous occurrence, or -1 if the header has not
been seen before.
"""
for section_list in self.include_list:
for f in section_list:
if f[0] == header:
return f[1]
return -1
def ResetSection(self, directive):
"""Reset section checking for preprocessor directive.
Args:
directive: preprocessor directive (e.g. "if", "else").
"""
# The name of the current section.
self._section = self._INITIAL_SECTION
# The path of last found header.
self._last_header = ''
# Update list of includes. Note that we never pop from the
# include list.
if directive in ('if', 'ifdef', 'ifndef'):
self.include_list.append([])
elif directive in ('else', 'elif'):
self.include_list[-1] = []
def SetLastHeader(self, header_path):
self._last_header = header_path
def CanonicalizeAlphabeticalOrder(self, header_path):
"""Returns a path canonicalized for alphabetical comparison.
- replaces "-" with "_" so they both cmp the same.
- removes '-inl' since we don't require them to be after the main header.
- lowercase everything, just in case.
Args:
header_path: Path to be canonicalized.
Returns:
Canonicalized path.
"""
return header_path.replace('-inl.h', '.h').replace('-', '_').lower()
def IsInAlphabeticalOrder(self, clean_lines, linenum, header_path):
"""Check if a header is in alphabetical order with the previous header.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
header_path: Canonicalized header to be checked.
Returns:
Returns true if the header is in alphabetical order.
"""
# If previous section is different from current section, _last_header will
# be reset to empty string, so it's always less than current header.
#
# If previous line was a blank line, assume that the headers are
# intentionally sorted the way they are.
if (self._last_header > header_path and
not Match(r'^\s*$', clean_lines.elided[linenum - 1])):
return False
return True
def CheckNextIncludeOrder(self, header_type):
"""Returns a non-empty error message if the next header is out of order.
This function also updates the internal state to be ready to check
the next include.
Args:
header_type: One of the _XXX_HEADER constants defined above.
Returns:
The empty string if the header is in the right order, or an
error message describing what's wrong.
"""
error_message = ('Found %s after %s' %
(self._TYPE_NAMES[header_type],
self._SECTION_NAMES[self._section]))
last_section = self._section
if header_type == _C_SYS_HEADER:
if self._section <= self._C_SECTION:
self._section = self._C_SECTION
else:
self._last_header = ''
return error_message
elif header_type == _CPP_SYS_HEADER:
if self._section <= self._CPP_SECTION:
self._section = self._CPP_SECTION
else:
self._last_header = ''
return error_message
elif header_type == _LIKELY_MY_HEADER:
if self._section <= self._MY_H_SECTION:
self._section = self._MY_H_SECTION
else:
self._section = self._OTHER_H_SECTION
elif header_type == _POSSIBLE_MY_HEADER:
if self._section <= self._MY_H_SECTION:
self._section = self._MY_H_SECTION
else:
# This will always be the fallback because we're not sure
# enough that the header is associated with this file.
self._section = self._OTHER_H_SECTION
else:
assert header_type == _OTHER_HEADER
self._section = self._OTHER_H_SECTION
if last_section != self._section:
self._last_header = ''
return ''
class _CppLintState(object):
"""Maintains module-wide state.."""
def __init__(self):
self.verbose_level = 1 # global setting.
self.error_count = 0 # global count of reported errors
# filters to apply when emitting error messages
self.filters = _DEFAULT_FILTERS[:]
# backup of filter list. Used to restore the state after each file.
self._filters_backup = self.filters[:]
self.counting = 'total' # In what way are we counting errors?
self.errors_by_category = {} # string to int dict storing error counts
# output format:
# "emacs" - format that emacs can parse (default)
# "vs7" - format that Microsoft Visual Studio 7 can parse
self.output_format = 'emacs'
def SetOutputFormat(self, output_format):
"""Sets the output format for errors."""
self.output_format = output_format
def SetVerboseLevel(self, level):
"""Sets the module's verbosity, and returns the previous setting."""
last_verbose_level = self.verbose_level
self.verbose_level = level
return last_verbose_level
def SetCountingStyle(self, counting_style):
"""Sets the module's counting options."""
self.counting = counting_style
def SetFilters(self, filters):
"""Sets the error-message filters.
These filters are applied when deciding whether to emit a given
error message.
Args:
filters: A string of comma-separated filters (eg "+whitespace/indent").
Each filter should start with + or -; else we die.
Raises:
ValueError: The comma-separated filters did not all start with '+' or '-'.
E.g. "-,+whitespace,-whitespace/indent,whitespace/badfilter"
"""
# Default filters always have less priority than the flag ones.
self.filters = _DEFAULT_FILTERS[:]
self.AddFilters(filters)
def AddFilters(self, filters):
""" Adds more filters to the existing list of error-message filters. """
for filt in filters.split(','):
clean_filt = filt.strip()
if clean_filt:
self.filters.append(clean_filt)
for filt in self.filters:
if not (filt.startswith('+') or filt.startswith('-')):
raise ValueError('Every filter in --filters must start with + or -'
' (%s does not)' % filt)
def BackupFilters(self):
""" Saves the current filter list to backup storage."""
self._filters_backup = self.filters[:]
def RestoreFilters(self):
""" Restores filters previously backed up."""
self.filters = self._filters_backup[:]
def ResetErrorCounts(self):
"""Sets the module's error statistic back to zero."""
self.error_count = 0
self.errors_by_category = {}
def IncrementErrorCount(self, category):
"""Bumps the module's error statistic."""
self.error_count += 1
if self.counting in ('toplevel', 'detailed'):
if self.counting != 'detailed':
category = category.split('/')[0]
if category not in self.errors_by_category:
self.errors_by_category[category] = 0
self.errors_by_category[category] += 1
def PrintErrorCounts(self):
"""Print a summary of errors by category, and the total."""
for category, count in self.errors_by_category.iteritems():
sys.stderr.write('Category \'%s\' errors found: %d\n' %
(category, count))
sys.stderr.write('Total errors found: %d\n' % self.error_count)
_cpplint_state = _CppLintState()
def _OutputFormat():
"""Gets the module's output format."""
return _cpplint_state.output_format
def _SetOutputFormat(output_format):
"""Sets the module's output format."""
_cpplint_state.SetOutputFormat(output_format)
def _VerboseLevel():
"""Returns the module's verbosity setting."""
return _cpplint_state.verbose_level
def _SetVerboseLevel(level):
"""Sets the module's verbosity, and returns the previous setting."""
return _cpplint_state.SetVerboseLevel(level)
def _SetCountingStyle(level):
"""Sets the module's counting options."""
_cpplint_state.SetCountingStyle(level)
def _Filters():
"""Returns the module's list of output filters, as a list."""
return _cpplint_state.filters
def _SetFilters(filters):
"""Sets the module's error-message filters.
These filters are applied when deciding whether to emit a given
error message.
Args:
filters: A string of comma-separated filters (eg "whitespace/indent").
Each filter should start with + or -; else we die.
"""
_cpplint_state.SetFilters(filters)
def _AddFilters(filters):
"""Adds more filter overrides.
Unlike _SetFilters, this function does not reset the current list of filters
available.
Args:
filters: A string of comma-separated filters (eg "whitespace/indent").
Each filter should start with + or -; else we die.
"""
_cpplint_state.AddFilters(filters)
def _BackupFilters():
""" Saves the current filter list to backup storage."""
_cpplint_state.BackupFilters()
def _RestoreFilters():
""" Restores filters previously backed up."""
_cpplint_state.RestoreFilters()
class _FunctionState(object):
"""Tracks current function name and the number of lines in its body."""
_NORMAL_TRIGGER = 250 # for --v=0, 500 for --v=1, etc.
_TEST_TRIGGER = 400 # about 50% more than _NORMAL_TRIGGER.
def __init__(self):
self.in_a_function = False
self.lines_in_function = 0
self.current_function = ''
def Begin(self, function_name):
"""Start analyzing function body.
Args:
function_name: The name of the function being tracked.
"""
self.in_a_function = True
self.lines_in_function = 0
self.current_function = function_name
def Count(self):
"""Count line in current function body."""
if self.in_a_function:
self.lines_in_function += 1
def Check(self, error, filename, linenum):
"""Report if too many lines in function body.
Args:
error: The function to call with any errors found.
filename: The name of the current file.
linenum: The number of the line to check.
"""
if Match(r'T(EST|est)', self.current_function):
base_trigger = self._TEST_TRIGGER
else:
base_trigger = self._NORMAL_TRIGGER
trigger = base_trigger * 2**_VerboseLevel()
if self.lines_in_function > trigger:
error_level = int(math.log(self.lines_in_function / base_trigger, 2))
# 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ...
if error_level > 5:
error_level = 5
error(filename, linenum, 'readability/fn_size', error_level,
'Small and focused functions are preferred:'
' %s has %d non-comment lines'
' (error triggered by exceeding %d lines).' % (
self.current_function, self.lines_in_function, trigger))
def End(self):
"""Stop analyzing function body."""
self.in_a_function = False
class _IncludeError(Exception):
"""Indicates a problem with the include order in a file."""
pass
class FileInfo(object):
"""Provides utility functions for filenames.
FileInfo provides easy access to the components of a file's path
relative to the project root.
"""
def __init__(self, filename):
self._filename = filename
def FullName(self):
"""Make Windows paths like Unix."""
return os.path.abspath(self._filename).replace('\\', '/')
def RepositoryName(self):
"""FullName after removing the local path to the repository.
If we have a real absolute path name here we can try to do something smart:
detecting the root of the checkout and truncating /path/to/checkout from
the name so that we get header guards that don't include things like
"C:\Documents and Settings\..." or "/home/username/..." in them and thus
people on different computers who have checked the source out to different
locations won't see bogus errors.
"""
fullname = self.FullName()
if os.path.exists(fullname):
project_dir = os.path.dirname(fullname)
if os.path.exists(os.path.join(project_dir, ".svn")):
# If there's a .svn file in the current directory, we recursively look
# up the directory tree for the top of the SVN checkout
root_dir = project_dir
one_up_dir = os.path.dirname(root_dir)
while os.path.exists(os.path.join(one_up_dir, ".svn")):
root_dir = os.path.dirname(root_dir)
one_up_dir = os.path.dirname(one_up_dir)
prefix = os.path.commonprefix([root_dir, project_dir])
return fullname[len(prefix) + 1:]
# Not SVN <= 1.6? Try to find a git, hg, or svn top level directory by
# searching up from the current path.
root_dir = os.path.dirname(fullname)
while (root_dir != os.path.dirname(root_dir) and
not os.path.exists(os.path.join(root_dir, ".git")) and
not os.path.exists(os.path.join(root_dir, ".hg")) and
not os.path.exists(os.path.join(root_dir, ".svn"))):
root_dir = os.path.dirname(root_dir)
if (os.path.exists(os.path.join(root_dir, ".git")) or
os.path.exists(os.path.join(root_dir, ".hg")) or
os.path.exists(os.path.join(root_dir, ".svn"))):
prefix = os.path.commonprefix([root_dir, project_dir])
return fullname[len(prefix) + 1:]
# Don't know what to do; header guard warnings may be wrong...
return fullname
def Split(self):
"""Splits the file into the directory, basename, and extension.
For 'chrome/browser/browser.cc', Split() would
return ('chrome/browser', 'browser', '.cc')
Returns:
A tuple of (directory, basename, extension).
"""
googlename = self.RepositoryName()
project, rest = os.path.split(googlename)
return (project,) + os.path.splitext(rest)
def BaseName(self):
"""File base name - text after the final slash, before the final period."""
return self.Split()[1]
def Extension(self):
"""File extension - text following the final period."""
return self.Split()[2]
def NoExtension(self):
"""File has no source file extension."""
return '/'.join(self.Split()[0:2])
def IsSource(self):
"""File has a source file extension."""
return self.Extension()[1:] in ('c', 'cc', 'cpp', 'cxx')
def _ShouldPrintError(category, confidence, linenum):
"""If confidence >= verbose, category passes filter and is not suppressed."""
# There are three ways we might decide not to print an error message:
# a "NOLINT(category)" comment appears in the source,
# the verbosity level isn't high enough, or the filters filter it out.
if IsErrorSuppressedByNolint(category, linenum):
return False
if confidence < _cpplint_state.verbose_level:
return False
is_filtered = False
for one_filter in _Filters():
if one_filter.startswith('-'):
if category.startswith(one_filter[1:]):
is_filtered = True
elif one_filter.startswith('+'):
if category.startswith(one_filter[1:]):
is_filtered = False
else:
assert False # should have been checked for in SetFilter.
if is_filtered:
return False
return True
def Error(filename, linenum, category, confidence, message):
"""Logs the fact we've found a lint error.
We log where the error was found, and also our confidence in the error,
that is, how certain we are this is a legitimate style regression, and
not a misidentification or a use that's sometimes justified.
False positives can be suppressed by the use of
"cpplint(category)" comments on the offending line. These are
parsed into _error_suppressions.
Args:
filename: The name of the file containing the error.
linenum: The number of the line containing the error.
category: A string used to describe the "category" this bug
falls under: "whitespace", say, or "runtime". Categories
may have a hierarchy separated by slashes: "whitespace/indent".
confidence: A number from 1-5 representing a confidence score for
the error, with 5 meaning that we are certain of the problem,
and 1 meaning that it could be a legitimate construct.
message: The error message.
"""
if _ShouldPrintError(category, confidence, linenum):
_cpplint_state.IncrementErrorCount(category)
if _cpplint_state.output_format == 'vs7':
sys.stderr.write('%s(%s): %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence))
elif _cpplint_state.output_format == 'eclipse':
sys.stderr.write('%s:%s: warning: %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence))
else:
sys.stderr.write('%s:%s: %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence))
# Matches standard C++ escape sequences per 2.13.2.3 of the C++ standard.
_RE_PATTERN_CLEANSE_LINE_ESCAPES = re.compile(
r'\\([abfnrtv?"\\\']|\d+|x[0-9a-fA-F]+)')
# Match a single C style comment on the same line.
_RE_PATTERN_C_COMMENTS = r'/\*(?:[^*]|\*(?!/))*\*/'
# Matches multi-line C style comments.
# This RE is a little bit more complicated than one might expect, because we
# have to take care of space removals tools so we can handle comments inside
# statements better.
# The current rule is: We only clear spaces from both sides when we're at the
# end of the line. Otherwise, we try to remove spaces from the right side,
# if this doesn't work we try on left side but only if there's a non-character
# on the right.
_RE_PATTERN_CLEANSE_LINE_C_COMMENTS = re.compile(
r'(\s*' + _RE_PATTERN_C_COMMENTS + r'\s*$|' +
_RE_PATTERN_C_COMMENTS + r'\s+|' +
r'\s+' + _RE_PATTERN_C_COMMENTS + r'(?=\W)|' +
_RE_PATTERN_C_COMMENTS + r')')
def IsCppString(line):
"""Does line terminate so, that the next symbol is in string constant.
This function does not consider single-line nor multi-line comments.
Args:
line: is a partial line of code starting from the 0..n.
Returns:
True, if next character appended to 'line' is inside a
string constant.
"""
line = line.replace(r'\\', 'XX') # after this, \\" does not match to \"
return ((line.count('"') - line.count(r'\"') - line.count("'\"'")) & 1) == 1
def CleanseRawStrings(raw_lines):
"""Removes C++11 raw strings from lines.
Before:
static const char kData[] = R"(
multi-line string
)";
After:
static const char kData[] = ""
(replaced by blank line)
"";
Args:
raw_lines: list of raw lines.
Returns:
list of lines with C++11 raw strings replaced by empty strings.
"""
delimiter = None
lines_without_raw_strings = []
for line in raw_lines:
if delimiter:
# Inside a raw string, look for the end
end = line.find(delimiter)
if end >= 0:
# Found the end of the string, match leading space for this
# line and resume copying the original lines, and also insert
# a "" on the last line.
leading_space = Match(r'^(\s*)\S', line)
line = leading_space.group(1) + '""' + line[end + len(delimiter):]
delimiter = None
else:
# Haven't found the end yet, append a blank line.
line = '""'
# Look for beginning of a raw string, and replace them with
# empty strings. This is done in a loop to handle multiple raw
# strings on the same line.
while delimiter is None:
# Look for beginning of a raw string.
# See 2.14.15 [lex.string] for syntax.
matched = Match(r'^(.*)\b(?:R|u8R|uR|UR|LR)"([^\s\\()]*)\((.*)$', line)
if matched:
delimiter = ')' + matched.group(2) + '"'
end = matched.group(3).find(delimiter)
if end >= 0:
# Raw string ended on same line
line = (matched.group(1) + '""' +
matched.group(3)[end + len(delimiter):])
delimiter = None
else:
# Start of a multi-line raw string
line = matched.group(1) + '""'
else:
break
lines_without_raw_strings.append(line)
# TODO(unknown): if delimiter is not None here, we might want to
# emit a warning for unterminated string.
return lines_without_raw_strings
def FindNextMultiLineCommentStart(lines, lineix):
"""Find the beginning marker for a multiline comment."""
while lineix < len(lines):
if lines[lineix].strip().startswith('/*'):
# Only return this marker if the comment goes beyond this line
if lines[lineix].strip().find('*/', 2) < 0:
return lineix
lineix += 1
return len(lines)
def FindNextMultiLineCommentEnd(lines, lineix):
"""We are inside a comment, find the end marker."""
while lineix < len(lines):
if lines[lineix].strip().endswith('*/'):
return lineix
lineix += 1
return len(lines)
def RemoveMultiLineCommentsFromRange(lines, begin, end):
"""Clears a range of lines for multi-line comments."""
# Having // dummy comments makes the lines non-empty, so we will not get
# unnecessary blank line warnings later in the code.
for i in range(begin, end):
lines[i] = '// dummy'
def RemoveMultiLineComments(filename, lines, error):
"""Removes multiline (c-style) comments from lines."""
lineix = 0
while lineix < len(lines):
lineix_begin = FindNextMultiLineCommentStart(lines, lineix)
if lineix_begin >= len(lines):
return
lineix_end = FindNextMultiLineCommentEnd(lines, lineix_begin)
if lineix_end >= len(lines):
error(filename, lineix_begin + 1, 'readability/multiline_comment', 5,
'Could not find end of multi-line comment')
return
RemoveMultiLineCommentsFromRange(lines, lineix_begin, lineix_end + 1)
lineix = lineix_end + 1
def CleanseComments(line):
"""Removes //-comments and single-line C-style /* */ comments.
Args:
line: A line of C++ source.
Returns:
The line with single-line comments removed.
"""
commentpos = line.find('//')
if commentpos != -1 and not IsCppString(line[:commentpos]):
line = line[:commentpos].rstrip()
# get rid of /* ... */
return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line)
class CleansedLines(object):
"""Holds 3 copies of all lines with different preprocessing applied to them.
1) elided member contains lines without strings and comments,
2) lines member contains lines without comments, and
3) raw_lines member contains all the lines without processing.
All these three members are of <type 'list'>, and of the same length.
"""
def __init__(self, lines):
self.elided = []
self.lines = []
self.raw_lines = lines
self.num_lines = len(lines)
self.lines_without_raw_strings = CleanseRawStrings(lines)
for linenum in range(len(self.lines_without_raw_strings)):
self.lines.append(CleanseComments(
self.lines_without_raw_strings[linenum]))
elided = self._CollapseStrings(self.lines_without_raw_strings[linenum])
self.elided.append(CleanseComments(elided))
def NumLines(self):
"""Returns the number of lines represented."""
return self.num_lines
@staticmethod
def _CollapseStrings(elided):
"""Collapses strings and chars on a line to simple "" or '' blocks.
We nix strings first so we're not fooled by text like '"http://"'
Args:
elided: The line being processed.
Returns:
The line with collapsed strings.
"""
if _RE_PATTERN_INCLUDE.match(elided):
return elided
# Remove escaped characters first to make quote/single quote collapsing
# basic. Things that look like escaped characters shouldn't occur
# outside of strings and chars.
elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub('', elided)
# Replace quoted strings and digit separators. Both single quotes
# and double quotes are processed in the same loop, otherwise
# nested quotes wouldn't work.
collapsed = ''
while True:
# Find the first quote character
match = Match(r'^([^\'"]*)([\'"])(.*)$', elided)
if not match:
collapsed += elided
break
head, quote, tail = match.groups()
if quote == '"':
# Collapse double quoted strings
second_quote = tail.find('"')
if second_quote >= 0:
collapsed += head + '""'
elided = tail[second_quote + 1:]
else:
# Unmatched double quote, don't bother processing the rest
# of the line since this is probably a multiline string.
collapsed += elided
break
else:
# Found single quote, check nearby text to eliminate digit separators.
#
# There is no special handling for floating point here, because
# the integer/fractional/exponent parts would all be parsed
# correctly as long as there are digits on both sides of the
# separator. So we are fine as long as we don't see something
# like "0.'3" (gcc 4.9.0 will not allow this literal).
if Search(r'\b(?:0[bBxX]?|[1-9])[0-9a-fA-F]*$', head):
match_literal = Match(r'^((?:\'?[0-9a-zA-Z_])*)(.*)$', "'" + tail)
collapsed += head + match_literal.group(1).replace("'", '')
elided = match_literal.group(2)
else:
second_quote = tail.find('\'')
if second_quote >= 0:
collapsed += head + "''"
elided = tail[second_quote + 1:]
else:
# Unmatched single quote
collapsed += elided
break
return collapsed
def FindEndOfExpressionInLine(line, startpos, stack):
"""Find the position just after the end of current parenthesized expression.
Args:
line: a CleansedLines line.
startpos: start searching at this position.
stack: nesting stack at startpos.
Returns:
On finding matching end: (index just after matching end, None)
On finding an unclosed expression: (-1, None)
Otherwise: (-1, new stack at end of this line)
"""
for i in xrange(startpos, len(line)):
char = line[i]
if char in '([{':
# Found start of parenthesized expression, push to expression stack
stack.append(char)
elif char == '<':
# Found potential start of template argument list
if i > 0 and line[i - 1] == '<':
# Left shift operator
if stack and stack[-1] == '<':
stack.pop()
if not stack:
return (-1, None)
elif i > 0 and Search(r'\boperator\s*$', line[0:i]):
# operator<, don't add to stack
continue
else:
# Tentative start of template argument list
stack.append('<')
elif char in ')]}':
# Found end of parenthesized expression.
#
# If we are currently expecting a matching '>', the pending '<'
# must have been an operator. Remove them from expression stack.
while stack and stack[-1] == '<':
stack.pop()
if not stack:
return (-1, None)
if ((stack[-1] == '(' and char == ')') or
(stack[-1] == '[' and char == ']') or
(stack[-1] == '{' and char == '}')):
stack.pop()
if not stack:
return (i + 1, None)
else:
# Mismatched parentheses
return (-1, None)
elif char == '>':
# Found potential end of template argument list.
# Ignore "->" and operator functions
if (i > 0 and
(line[i - 1] == '-' or Search(r'\boperator\s*$', line[0:i - 1]))):
continue
# Pop the stack if there is a matching '<'. Otherwise, ignore
# this '>' since it must be an operator.
if stack:
if stack[-1] == '<':
stack.pop()
if not stack:
return (i + 1, None)
elif char == ';':
# Found something that look like end of statements. If we are currently
# expecting a '>', the matching '<' must have been an operator, since
# template argument list should not contain statements.
while stack and stack[-1] == '<':
stack.pop()
if not stack:
return (-1, None)
# Did not find end of expression or unbalanced parentheses on this line
return (-1, stack)
def CloseExpression(clean_lines, linenum, pos):
"""If input points to ( or { or [ or <, finds the position that closes it.
If lines[linenum][pos] points to a '(' or '{' or '[' or '<', finds the
linenum/pos that correspond to the closing of the expression.
TODO(unknown): cpplint spends a fair bit of time matching parentheses.
Ideally we would want to index all opening and closing parentheses once
and have CloseExpression be just a simple lookup, but due to preprocessor
tricks, this is not so easy.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
pos: A position on the line.
Returns:
A tuple (line, linenum, pos) pointer *past* the closing brace, or
(line, len(lines), -1) if we never find a close. Note we ignore
strings and comments when matching; and the line we return is the
'cleansed' line at linenum.
"""
line = clean_lines.elided[linenum]
if (line[pos] not in '({[<') or Match(r'<[<=]', line[pos:]):
return (line, clean_lines.NumLines(), -1)
# Check first line
(end_pos, stack) = FindEndOfExpressionInLine(line, pos, [])
if end_pos > -1:
return (line, linenum, end_pos)
# Continue scanning forward
while stack and linenum < clean_lines.NumLines() - 1:
linenum += 1
line = clean_lines.elided[linenum]
(end_pos, stack) = FindEndOfExpressionInLine(line, 0, stack)
if end_pos > -1:
return (line, linenum, end_pos)
# Did not find end of expression before end of file, give up
return (line, clean_lines.NumLines(), -1)
def FindStartOfExpressionInLine(line, endpos, stack):
"""Find position at the matching start of current expression.
This is almost the reverse of FindEndOfExpressionInLine, but note
that the input position and returned position differs by 1.
Args:
line: a CleansedLines line.
endpos: start searching at this position.
stack: nesting stack at endpos.
Returns:
On finding matching start: (index at matching start, None)
On finding an unclosed expression: (-1, None)
Otherwise: (-1, new stack at beginning of this line)
"""
i = endpos
while i >= 0:
char = line[i]
if char in ')]}':
# Found end of expression, push to expression stack
stack.append(char)
elif char == '>':
# Found potential end of template argument list.
#
# Ignore it if it's a "->" or ">=" or "operator>"
if (i > 0 and
(line[i - 1] == '-' or
Match(r'\s>=\s', line[i - 1:]) or
Search(r'\boperator\s*$', line[0:i]))):
i -= 1
else:
stack.append('>')
elif char == '<':
# Found potential start of template argument list
if i > 0 and line[i - 1] == '<':
# Left shift operator
i -= 1
else:
# If there is a matching '>', we can pop the expression stack.
# Otherwise, ignore this '<' since it must be an operator.
if stack and stack[-1] == '>':
stack.pop()
if not stack:
return (i, None)
elif char in '([{':
# Found start of expression.
#
# If there are any unmatched '>' on the stack, they must be
# operators. Remove those.
while stack and stack[-1] == '>':
stack.pop()
if not stack:
return (-1, None)
if ((char == '(' and stack[-1] == ')') or
(char == '[' and stack[-1] == ']') or
(char == '{' and stack[-1] == '}')):
stack.pop()
if not stack:
return (i, None)
else:
# Mismatched parentheses
return (-1, None)
elif char == ';':
# Found something that look like end of statements. If we are currently
# expecting a '<', the matching '>' must have been an operator, since
# template argument list should not contain statements.
while stack and stack[-1] == '>':
stack.pop()
if not stack:
return (-1, None)
i -= 1
return (-1, stack)
def ReverseCloseExpression(clean_lines, linenum, pos):
"""If input points to ) or } or ] or >, finds the position that opens it.
If lines[linenum][pos] points to a ')' or '}' or ']' or '>', finds the
linenum/pos that correspond to the opening of the expression.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
pos: A position on the line.
Returns:
A tuple (line, linenum, pos) pointer *at* the opening brace, or
(line, 0, -1) if we never find the matching opening brace. Note
we ignore strings and comments when matching; and the line we
return is the 'cleansed' line at linenum.
"""
line = clean_lines.elided[linenum]
if line[pos] not in ')}]>':
return (line, 0, -1)
# Check last line
(start_pos, stack) = FindStartOfExpressionInLine(line, pos, [])
if start_pos > -1:
return (line, linenum, start_pos)
# Continue scanning backward
while stack and linenum > 0:
linenum -= 1
line = clean_lines.elided[linenum]
(start_pos, stack) = FindStartOfExpressionInLine(line, len(line) - 1, stack)
if start_pos > -1:
return (line, linenum, start_pos)
# Did not find start of expression before beginning of file, give up
return (line, 0, -1)
def CheckForCopyright(filename, lines, error):
"""Logs an error if no Copyright message appears at the top of the file."""
# We'll say it should occur by line 10. Don't forget there's a
# dummy line at the front.
for line in xrange(1, min(len(lines), 11)):
if re.search(r'Copyright', lines[line], re.I): break
else: # means no copyright line was found
error(filename, 0, 'legal/copyright', 5,
'No copyright message found. '
'You should have a line: "Copyright [year] <Copyright Owner>"')
def GetIndentLevel(line):
"""Return the number of leading spaces in line.
Args:
line: A string to check.
Returns:
An integer count of leading spaces, possibly zero.
"""
indent = Match(r'^( *)\S', line)
if indent:
return len(indent.group(1))
else:
return 0
def GetHeaderGuardCPPVariable(filename):
"""Returns the CPP variable that should be used as a header guard.
Args:
filename: The name of a C++ header file.
Returns:
The CPP variable that should be used as a header guard in the
named file.
"""
# Restores original filename in case that cpplint is invoked from Emacs's
# flymake.
filename = re.sub(r'_flymake\.h$', '.h', filename)
filename = re.sub(r'/\.flymake/([^/]*)$', r'/\1', filename)
fileinfo = FileInfo(filename)
file_path_from_root = fileinfo.RepositoryName()
if _root:
file_path_from_root = re.sub('^' + _root + os.sep, '', file_path_from_root)
# return re.sub(r'[-./\s]', '_', file_path_from_root).upper() + '_'
# replaced by weizhenwei, 2015.06.26; Remove the annoying '_';
return re.sub(r'[-./\s]', '_', file_path_from_root).upper()
def CheckForHeaderGuard(filename, lines, error):
"""Checks that the file contains a header guard.
Logs an error if no #ifndef header guard is present. For other
headers, checks that the full pathname is used.
Args:
filename: The name of the C++ header file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
# Don't check for header guards if there are error suppression
# comments somewhere in this file.
#
# Because this is silencing a warning for a nonexistent line, we
# only support the very specific NOLINT(build/header_guard) syntax,
# and not the general NOLINT or NOLINT(*) syntax.
for i in lines:
if Search(r'//\s*NOLINT\(build/header_guard\)', i):
return
cppvar = GetHeaderGuardCPPVariable(filename)
ifndef = None
ifndef_linenum = 0
define = None
endif = None
endif_linenum = 0
for linenum, line in enumerate(lines):
linesplit = line.split()
if len(linesplit) >= 2:
# find the first occurrence of #ifndef and #define, save arg
if not ifndef and linesplit[0] == '#ifndef':
# set ifndef to the header guard presented on the #ifndef line.
ifndef = linesplit[1]
ifndef_linenum = linenum
if not define and linesplit[0] == '#define':
define = linesplit[1]
# find the last occurrence of #endif, save entire line
if line.startswith('#endif'):
endif = line
endif_linenum = linenum
if not ifndef:
error(filename, 0, 'build/header_guard', 5,
'No #ifndef header guard found, suggested CPP variable is: %s' %
cppvar)
return
if not define:
error(filename, 0, 'build/header_guard', 5,
'No #define header guard found, suggested CPP variable is: %s' %
cppvar)
return
# The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__
# for backward compatibility.
if ifndef != cppvar:
# removed by weizhenwei, 2015.06.26;
# error_level = 0
# if ifndef != cppvar + '_':
# error_level = 5
error_level = 5
ParseNolintSuppressions(filename, lines[ifndef_linenum], ifndef_linenum,
error)
error(filename, ifndef_linenum, 'build/header_guard', error_level,
'#ifndef header guard has wrong style, please use: %s' % cppvar)
if define != ifndef:
error(filename, 0, 'build/header_guard', 5,
'#ifndef and #define don\'t match, suggested CPP variable is: %s' %
cppvar)
return
if endif != ('#endif // %s' % cppvar):
# removed by weizhenwei, 2015.06.26;
# error_level = 0
# if endif != ('#endif // %s' % (cppvar + '_')):
# error_level = 5
error_level = 5
ParseNolintSuppressions(filename, lines[endif_linenum], endif_linenum,
error)
error(filename, endif_linenum, 'build/header_guard', error_level,
'#endif line should be "#endif // %s"' % cppvar)
def CheckForBadCharacters(filename, lines, error):
"""Logs an error for each line containing bad characters.
Two kinds of bad characters:
1. Unicode replacement characters: These indicate that either the file
contained invalid UTF-8 (likely) or Unicode replacement characters (which
it shouldn't). Note that it's possible for this to throw off line
numbering if the invalid UTF-8 occurred adjacent to a newline.
2. NUL bytes. These are problematic for some tools.
Args:
filename: The name of the current file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
for linenum, line in enumerate(lines):
if u'\ufffd' in line:
error(filename, linenum, 'readability/utf8', 5,
'Line contains invalid UTF-8 (or Unicode replacement character).')
if '\0' in line:
error(filename, linenum, 'readability/nul', 5, 'Line contains NUL byte.')
def CheckForNewlineAtEOF(filename, lines, error):
"""Logs an error if there is no newline char at the end of the file.
Args:
filename: The name of the current file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
# The array lines() was created by adding two newlines to the
# original file (go figure), then splitting on \n.
# To verify that the file ends in \n, we just have to make sure the
# last-but-two element of lines() exists and is empty.
if len(lines) < 3 or lines[-2]:
error(filename, len(lines) - 2, 'whitespace/ending_newline', 5,
'Could not find a newline character at the end of the file.')
def CheckForMultilineCommentsAndStrings(filename, clean_lines, linenum, error):
"""Logs an error if we see /* ... */ or "..." that extend past one line.
/* ... */ comments are legit inside macros, for one line.
Otherwise, we prefer // comments, so it's ok to warn about the
other. Likewise, it's ok for strings to extend across multiple
lines, as long as a line continuation character (backslash)
terminates each line. Although not currently prohibited by the C++
style guide, it's ugly and unnecessary. We don't do well with either
in this lint program, so we warn about both.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Remove all \\ (escaped backslashes) from the line. They are OK, and the
# second (escaped) slash may trigger later \" detection erroneously.
line = line.replace('\\\\', '')
if line.count('/*') > line.count('*/'):
error(filename, linenum, 'readability/multiline_comment', 5,
'Complex multi-line /*...*/-style comment found. '
'Lint may give bogus warnings. '
'Consider replacing these with //-style comments, '
'with #if 0...#endif, '
'or with more clearly structured multi-line comments.')
if (line.count('"') - line.count('\\"')) % 2:
error(filename, linenum, 'readability/multiline_string', 5,
'Multi-line string ("...") found. This lint script doesn\'t '
'do well with such strings, and may give bogus warnings. '
'Use C++11 raw strings or concatenation instead.')
# (non-threadsafe name, thread-safe alternative, validation pattern)
#
# The validation pattern is used to eliminate false positives such as:
# _rand(); // false positive due to substring match.
# ->rand(); // some member function rand().
# ACMRandom rand(seed); // some variable named rand.
# ISAACRandom rand(); // another variable named rand.
#
# Basically we require the return value of these functions to be used
# in some expression context on the same line by matching on some
# operator before the function name. This eliminates constructors and
# member function calls.
_UNSAFE_FUNC_PREFIX = r'(?:[-+*/=%^&|(<]\s*|>\s+)'
_THREADING_LIST = (
('asctime(', 'asctime_r(', _UNSAFE_FUNC_PREFIX + r'asctime\([^)]+\)'),
('ctime(', 'ctime_r(', _UNSAFE_FUNC_PREFIX + r'ctime\([^)]+\)'),
('getgrgid(', 'getgrgid_r(', _UNSAFE_FUNC_PREFIX + r'getgrgid\([^)]+\)'),
('getgrnam(', 'getgrnam_r(', _UNSAFE_FUNC_PREFIX + r'getgrnam\([^)]+\)'),
('getlogin(', 'getlogin_r(', _UNSAFE_FUNC_PREFIX + r'getlogin\(\)'),
('getpwnam(', 'getpwnam_r(', _UNSAFE_FUNC_PREFIX + r'getpwnam\([^)]+\)'),
('getpwuid(', 'getpwuid_r(', _UNSAFE_FUNC_PREFIX + r'getpwuid\([^)]+\)'),
('gmtime(', 'gmtime_r(', _UNSAFE_FUNC_PREFIX + r'gmtime\([^)]+\)'),
('localtime(', 'localtime_r(', _UNSAFE_FUNC_PREFIX + r'localtime\([^)]+\)'),
('rand(', 'rand_r(', _UNSAFE_FUNC_PREFIX + r'rand\(\)'),
('strtok(', 'strtok_r(',
_UNSAFE_FUNC_PREFIX + r'strtok\([^)]+\)'),
('ttyname(', 'ttyname_r(', _UNSAFE_FUNC_PREFIX + r'ttyname\([^)]+\)'),
)
def CheckPosixThreading(filename, clean_lines, linenum, error):
"""Checks for calls to thread-unsafe functions.
Much code has been originally written without consideration of
multi-threading. Also, engineers are relying on their old experience;
they have learned posix before threading extensions were added. These
tests guide the engineers to use thread-safe functions (when using
posix directly).
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
for single_thread_func, multithread_safe_func, pattern in _THREADING_LIST:
# Additional pattern matching check to confirm that this is the
# function we are looking for
if Search(pattern, line):
error(filename, linenum, 'runtime/threadsafe_fn', 2,
'Consider using ' + multithread_safe_func +
'...) instead of ' + single_thread_func +
'...) for improved thread safety.')
def CheckVlogArguments(filename, clean_lines, linenum, error):
"""Checks that VLOG() is only used for defining a logging level.
For example, VLOG(2) is correct. VLOG(INFO), VLOG(WARNING), VLOG(ERROR), and
VLOG(FATAL) are not.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
if Search(r'\bVLOG\((INFO|ERROR|WARNING|DFATAL|FATAL)\)', line):
error(filename, linenum, 'runtime/vlog', 5,
'VLOG() should be used with numeric verbosity level. '
'Use LOG() if you want symbolic severity levels.')
# Matches invalid increment: *count++, which moves pointer instead of
# incrementing a value.
_RE_PATTERN_INVALID_INCREMENT = re.compile(
r'^\s*\*\w+(\+\+|--);')
def CheckInvalidIncrement(filename, clean_lines, linenum, error):
"""Checks for invalid increment *count++.
For example following function:
void increment_counter(int* count) {
*count++;
}
is invalid, because it effectively does count++, moving pointer, and should
be replaced with ++*count, (*count)++ or *count += 1.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
if _RE_PATTERN_INVALID_INCREMENT.match(line):
error(filename, linenum, 'runtime/invalid_increment', 5,
'Changing pointer instead of value (or unused value of operator*).')
def IsMacroDefinition(clean_lines, linenum):
if Search(r'^#define', clean_lines[linenum]):
return True
if linenum > 0 and Search(r'\\$', clean_lines[linenum - 1]):
return True
return False
def IsForwardClassDeclaration(clean_lines, linenum):
return Match(r'^\s*(\btemplate\b)*.*class\s+\w+;\s*$', clean_lines[linenum])
class _BlockInfo(object):
"""Stores information about a generic block of code."""
def __init__(self, seen_open_brace):
self.seen_open_brace = seen_open_brace
self.open_parentheses = 0
self.inline_asm = _NO_ASM
self.check_namespace_indentation = False
def CheckBegin(self, filename, clean_lines, linenum, error):
"""Run checks that applies to text up to the opening brace.
This is mostly for checking the text after the class identifier
and the "{", usually where the base class is specified. For other
blocks, there isn't much to check, so we always pass.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
pass
def CheckEnd(self, filename, clean_lines, linenum, error):
"""Run checks that applies to text after the closing brace.
This is mostly used for checking end of namespace comments.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
pass
def IsBlockInfo(self):
"""Returns true if this block is a _BlockInfo.
This is convenient for verifying that an object is an instance of
a _BlockInfo, but not an instance of any of the derived classes.
Returns:
True for this class, False for derived classes.
"""
return self.__class__ == _BlockInfo
class _ExternCInfo(_BlockInfo):
"""Stores information about an 'extern "C"' block."""
def __init__(self):
_BlockInfo.__init__(self, True)
class _ClassInfo(_BlockInfo):
"""Stores information about a class."""
def __init__(self, name, class_or_struct, clean_lines, linenum):
_BlockInfo.__init__(self, False)
self.name = name
self.starting_linenum = linenum
self.is_derived = False
self.check_namespace_indentation = True
if class_or_struct == 'struct':
self.access = 'public'
self.is_struct = True
else:
self.access = 'private'
self.is_struct = False
# Remember initial indentation level for this class. Using raw_lines here
# instead of elided to account for leading comments.
self.class_indent = GetIndentLevel(clean_lines.raw_lines[linenum])
# Try to find the end of the class. This will be confused by things like:
# class A {
# } *x = { ...
#
# But it's still good enough for CheckSectionSpacing.
self.last_line = 0
depth = 0
for i in range(linenum, clean_lines.NumLines()):
line = clean_lines.elided[i]
depth += line.count('{') - line.count('}')
if not depth:
self.last_line = i
break
def CheckBegin(self, filename, clean_lines, linenum, error):
# Look for a bare ':'
if Search('(^|[^:]):($|[^:])', clean_lines.elided[linenum]):
self.is_derived = True
def CheckEnd(self, filename, clean_lines, linenum, error):
# Check that closing brace is aligned with beginning of the class.
# Only do this if the closing brace is indented by only whitespaces.
# This means we will not check single-line class definitions.
indent = Match(r'^( *)\}', clean_lines.elided[linenum])
if indent and len(indent.group(1)) != self.class_indent:
if self.is_struct:
parent = 'struct ' + self.name
else:
parent = 'class ' + self.name
error(filename, linenum, 'whitespace/indent', 3,
'Closing brace should be aligned with beginning of %s' % parent)
class _NamespaceInfo(_BlockInfo):
"""Stores information about a namespace."""
def __init__(self, name, linenum):
_BlockInfo.__init__(self, False)
self.name = name or ''
self.starting_linenum = linenum
self.check_namespace_indentation = True
def CheckEnd(self, filename, clean_lines, linenum, error):
"""Check end of namespace comments."""
line = clean_lines.raw_lines[linenum]
# Check how many lines is enclosed in this namespace. Don't issue
# warning for missing namespace comments if there aren't enough
# lines. However, do apply checks if there is already an end of
# namespace comment and it's incorrect.
#
# TODO(unknown): We always want to check end of namespace comments
# if a namespace is large, but sometimes we also want to apply the
# check if a short namespace contained nontrivial things (something
# other than forward declarations). There is currently no logic on
# deciding what these nontrivial things are, so this check is
# triggered by namespace size only, which works most of the time.
if (linenum - self.starting_linenum < 10
and not Match(r'};*\s*(//|/\*).*\bnamespace\b', line)):
return
# Look for matching comment at end of namespace.
#
# Note that we accept C style "/* */" comments for terminating
# namespaces, so that code that terminate namespaces inside
# preprocessor macros can be cpplint clean.
#
# We also accept stuff like "// end of namespace <name>." with the
# period at the end.
#
# Besides these, we don't accept anything else, otherwise we might
# get false negatives when existing comment is a substring of the
# expected namespace.
if self.name:
# Named namespace
if not Match((r'};*\s*(//|/\*).*\bnamespace\s+' + re.escape(self.name) +
r'[\*/\.\\\s]*$'),
line):
error(filename, linenum, 'readability/namespace', 5,
'Namespace should be terminated with "// namespace %s"' %
self.name)
else:
# Anonymous namespace
if not Match(r'};*\s*(//|/\*).*\bnamespace[\*/\.\\\s]*$', line):
# If "// namespace anonymous" or "// anonymous namespace (more text)",
# mention "// anonymous namespace" as an acceptable form
if Match(r'}.*\b(namespace anonymous|anonymous namespace)\b', line):
error(filename, linenum, 'readability/namespace', 5,
'Anonymous namespace should be terminated with "// namespace"'
' or "// anonymous namespace"')
else:
error(filename, linenum, 'readability/namespace', 5,
'Anonymous namespace should be terminated with "// namespace"')
class _PreprocessorInfo(object):
"""Stores checkpoints of nesting stacks when #if/#else is seen."""
def __init__(self, stack_before_if):
# The entire nesting stack before #if
self.stack_before_if = stack_before_if
# The entire nesting stack up to #else
self.stack_before_else = []
# Whether we have already seen #else or #elif
self.seen_else = False
class NestingState(object):
"""Holds states related to parsing braces."""
def __init__(self):
# Stack for tracking all braces. An object is pushed whenever we
# see a "{", and popped when we see a "}". Only 3 types of
# objects are possible:
# - _ClassInfo: a class or struct.
# - _NamespaceInfo: a namespace.
# - _BlockInfo: some other type of block.
self.stack = []
# Top of the previous stack before each Update().
#
# Because the nesting_stack is updated at the end of each line, we
# had to do some convoluted checks to find out what is the current
# scope at the beginning of the line. This check is simplified by
# saving the previous top of nesting stack.
#
# We could save the full stack, but we only need the top. Copying
# the full nesting stack would slow down cpplint by ~10%.
self.previous_stack_top = []
# Stack of _PreprocessorInfo objects.
self.pp_stack = []
def SeenOpenBrace(self):
"""Check if we have seen the opening brace for the innermost block.
Returns:
True if we have seen the opening brace, False if the innermost
block is still expecting an opening brace.
"""
return (not self.stack) or self.stack[-1].seen_open_brace
def InNamespaceBody(self):
"""Check if we are currently one level inside a namespace body.
Returns:
True if top of the stack is a namespace block, False otherwise.
"""
return self.stack and isinstance(self.stack[-1], _NamespaceInfo)
def InExternC(self):
"""Check if we are currently one level inside an 'extern "C"' block.
Returns:
True if top of the stack is an extern block, False otherwise.
"""
return self.stack and isinstance(self.stack[-1], _ExternCInfo)
def InClassDeclaration(self):
"""Check if we are currently one level inside a class or struct declaration.
Returns:
True if top of the stack is a class/struct, False otherwise.
"""
return self.stack and isinstance(self.stack[-1], _ClassInfo)
def InAsmBlock(self):
"""Check if we are currently one level inside an inline ASM block.
Returns:
True if the top of the stack is a block containing inline ASM.
"""
return self.stack and self.stack[-1].inline_asm != _NO_ASM
def InTemplateArgumentList(self, clean_lines, linenum, pos):
"""Check if current position is inside template argument list.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
pos: position just after the suspected template argument.
Returns:
True if (linenum, pos) is inside template arguments.
"""
while linenum < clean_lines.NumLines():
# Find the earliest character that might indicate a template argument
line = clean_lines.elided[linenum]
match = Match(r'^[^{};=\[\]\.<>]*(.)', line[pos:])
if not match:
linenum += 1
pos = 0
continue
token = match.group(1)
pos += len(match.group(0))
# These things do not look like template argument list:
# class Suspect {
# class Suspect x; }
if token in ('{', '}', ';'): return False
# These things look like template argument list:
# template <class Suspect>
# template <class Suspect = default_value>
# template <class Suspect[]>
# template <class Suspect...>
if token in ('>', '=', '[', ']', '.'): return True
# Check if token is an unmatched '<'.
# If not, move on to the next character.
if token != '<':
pos += 1
if pos >= len(line):
linenum += 1
pos = 0
continue
# We can't be sure if we just find a single '<', and need to
# find the matching '>'.
(_, end_line, end_pos) = CloseExpression(clean_lines, linenum, pos - 1)
if end_pos < 0:
# Not sure if template argument list or syntax error in file
return False
linenum = end_line
pos = end_pos
return False
def UpdatePreprocessor(self, line):
"""Update preprocessor stack.
We need to handle preprocessors due to classes like this:
#ifdef SWIG
struct ResultDetailsPageElementExtensionPoint {
#else
struct ResultDetailsPageElementExtensionPoint : public Extension {
#endif
We make the following assumptions (good enough for most files):
- Preprocessor condition evaluates to true from #if up to first
#else/#elif/#endif.
- Preprocessor condition evaluates to false from #else/#elif up
to #endif. We still perform lint checks on these lines, but
these do not affect nesting stack.
Args:
line: current line to check.
"""
if Match(r'^\s*#\s*(if|ifdef|ifndef)\b', line):
# Beginning of #if block, save the nesting stack here. The saved
# stack will allow us to restore the parsing state in the #else case.
self.pp_stack.append(_PreprocessorInfo(copy.deepcopy(self.stack)))
elif Match(r'^\s*#\s*(else|elif)\b', line):
# Beginning of #else block
if self.pp_stack:
if not self.pp_stack[-1].seen_else:
# This is the first #else or #elif block. Remember the
# whole nesting stack up to this point. This is what we
# keep after the #endif.
self.pp_stack[-1].seen_else = True
self.pp_stack[-1].stack_before_else = copy.deepcopy(self.stack)
# Restore the stack to how it was before the #if
self.stack = copy.deepcopy(self.pp_stack[-1].stack_before_if)
else:
# TODO(unknown): unexpected #else, issue warning?
pass
elif Match(r'^\s*#\s*endif\b', line):
# End of #if or #else blocks.
if self.pp_stack:
# If we saw an #else, we will need to restore the nesting
# stack to its former state before the #else, otherwise we
# will just continue from where we left off.
if self.pp_stack[-1].seen_else:
# Here we can just use a shallow copy since we are the last
# reference to it.
self.stack = self.pp_stack[-1].stack_before_else
# Drop the corresponding #if
self.pp_stack.pop()
else:
# TODO(unknown): unexpected #endif, issue warning?
pass
# TODO(unknown): Update() is too long, but we will refactor later.
def Update(self, filename, clean_lines, linenum, error):
"""Update nesting state with current line.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Remember top of the previous nesting stack.
#
# The stack is always pushed/popped and not modified in place, so
# we can just do a shallow copy instead of copy.deepcopy. Using
# deepcopy would slow down cpplint by ~28%.
if self.stack:
self.previous_stack_top = self.stack[-1]
else:
self.previous_stack_top = None
# Update pp_stack
self.UpdatePreprocessor(line)
# Count parentheses. This is to avoid adding struct arguments to
# the nesting stack.
if self.stack:
inner_block = self.stack[-1]
depth_change = line.count('(') - line.count(')')
inner_block.open_parentheses += depth_change
# Also check if we are starting or ending an inline assembly block.
if inner_block.inline_asm in (_NO_ASM, _END_ASM):
if (depth_change != 0 and
inner_block.open_parentheses == 1 and
_MATCH_ASM.match(line)):
# Enter assembly block
inner_block.inline_asm = _INSIDE_ASM
else:
# Not entering assembly block. If previous line was _END_ASM,
# we will now shift to _NO_ASM state.
inner_block.inline_asm = _NO_ASM
elif (inner_block.inline_asm == _INSIDE_ASM and
inner_block.open_parentheses == 0):
# Exit assembly block
inner_block.inline_asm = _END_ASM
# Consume namespace declaration at the beginning of the line. Do
# this in a loop so that we catch same line declarations like this:
# namespace proto2 { namespace bridge { class MessageSet; } }
while True:
# Match start of namespace. The "\b\s*" below catches namespace
# declarations even if it weren't followed by a whitespace, this
# is so that we don't confuse our namespace checker. The
# missing spaces will be flagged by CheckSpacing.
namespace_decl_match = Match(r'^\s*namespace\b\s*([:\w]+)?(.*)$', line)
if not namespace_decl_match:
break
new_namespace = _NamespaceInfo(namespace_decl_match.group(1), linenum)
self.stack.append(new_namespace)
line = namespace_decl_match.group(2)
if line.find('{') != -1:
new_namespace.seen_open_brace = True
line = line[line.find('{') + 1:]
# Look for a class declaration in whatever is left of the line
# after parsing namespaces. The regexp accounts for decorated classes
# such as in:
# class LOCKABLE API Object {
# };
class_decl_match = Match(
r'^(\s*(?:template\s*<[\w\s<>,:]*>\s*)?'
r'(class|struct)\s+(?:[A-Z_]+\s+)*(\w+(?:::\w+)*))'
r'(.*)$', line)
if (class_decl_match and
(not self.stack or self.stack[-1].open_parentheses == 0)):
# We do not want to accept classes that are actually template arguments:
# template <class Ignore1,
# class Ignore2 = Default<Args>,
# template <Args> class Ignore3>
# void Function() {};
#
# To avoid template argument cases, we scan forward and look for
# an unmatched '>'. If we see one, assume we are inside a
# template argument list.
end_declaration = len(class_decl_match.group(1))
if not self.InTemplateArgumentList(clean_lines, linenum, end_declaration):
self.stack.append(_ClassInfo(
class_decl_match.group(3), class_decl_match.group(2),
clean_lines, linenum))
line = class_decl_match.group(4)
# If we have not yet seen the opening brace for the innermost block,
# run checks here.
if not self.SeenOpenBrace():
self.stack[-1].CheckBegin(filename, clean_lines, linenum, error)
# Update access control if we are inside a class/struct
if self.stack and isinstance(self.stack[-1], _ClassInfo):
classinfo = self.stack[-1]
access_match = Match(
r'^(.*)\b(public|private|protected|signals)(\s+(?:slots\s*)?)?'
r':(?:[^:]|$)',
line)
if access_match:
classinfo.access = access_match.group(2)
# Check that access keywords are indented +1 space. Skip this
# check if the keywords are not preceded by whitespaces.
# removed by weizhenwei, 2015.06.29;
# indent = access_match.group(1)
# if (len(indent) != classinfo.class_indent + 1 and
# Match(r'^\s*$', indent)):
# if classinfo.is_struct:
# parent = 'struct ' + classinfo.name
# else:
# parent = 'class ' + classinfo.name
# slots = ''
# if access_match.group(3):
# slots = access_match.group(3)
# error(filename, linenum, 'whitespace/indent', 3,
# '%s%s: should be indented +1 space inside %s' % (
# access_match.group(2), slots, parent))
# Consume braces or semicolons from what's left of the line
while True:
# Match first brace, semicolon, or closed parenthesis.
matched = Match(r'^[^{;)}]*([{;)}])(.*)$', line)
if not matched:
break
token = matched.group(1)
if token == '{':
# If namespace or class hasn't seen a opening brace yet, mark
# namespace/class head as complete. Push a new block onto the
# stack otherwise.
if not self.SeenOpenBrace():
self.stack[-1].seen_open_brace = True
elif Match(r'^extern\s*"[^"]*"\s*\{', line):
self.stack.append(_ExternCInfo())
else:
self.stack.append(_BlockInfo(True))
if _MATCH_ASM.match(line):
self.stack[-1].inline_asm = _BLOCK_ASM
elif token == ';' or token == ')':
# If we haven't seen an opening brace yet, but we already saw
# a semicolon, this is probably a forward declaration. Pop
# the stack for these.
#
# Similarly, if we haven't seen an opening brace yet, but we
# already saw a closing parenthesis, then these are probably
# function arguments with extra "class" or "struct" keywords.
# Also pop these stack for these.
if not self.SeenOpenBrace():
self.stack.pop()
else: # token == '}'
# Perform end of block checks and pop the stack.
if self.stack:
self.stack[-1].CheckEnd(filename, clean_lines, linenum, error)
self.stack.pop()
line = matched.group(2)
def InnermostClass(self):
"""Get class info on the top of the stack.
Returns:
A _ClassInfo object if we are inside a class, or None otherwise.
"""
for i in range(len(self.stack), 0, -1):
classinfo = self.stack[i - 1]
if isinstance(classinfo, _ClassInfo):
return classinfo
return None
def CheckCompletedBlocks(self, filename, error):
"""Checks that all classes and namespaces have been completely parsed.
Call this when all lines in a file have been processed.
Args:
filename: The name of the current file.
error: The function to call with any errors found.
"""
# Note: This test can result in false positives if #ifdef constructs
# get in the way of brace matching. See the testBuildClass test in
# cpplint_unittest.py for an example of this.
for obj in self.stack:
if isinstance(obj, _ClassInfo):
error(filename, obj.starting_linenum, 'build/class', 5,
'Failed to find complete declaration of class %s' %
obj.name)
elif isinstance(obj, _NamespaceInfo):
error(filename, obj.starting_linenum, 'build/namespaces', 5,
'Failed to find complete declaration of namespace %s' %
obj.name)
def CheckForNonStandardConstructs(filename, clean_lines, linenum,
nesting_state, error):
r"""Logs an error if we see certain non-ANSI constructs ignored by gcc-2.
Complain about several constructs which gcc-2 accepts, but which are
not standard C++. Warning about these in lint is one way to ease the
transition to new compilers.
- put storage class first (e.g. "static const" instead of "const static").
- "%lld" instead of %qd" in printf-type functions.
- "%1$d" is non-standard in printf-type functions.
- "\%" is an undefined character escape sequence.
- text after #endif is not allowed.
- invalid inner-style forward declaration.
- >? and <? operators, and their >?= and <?= cousins.
Additionally, check for constructor/destructor style violations and reference
members, as it is very convenient to do so while checking for
gcc-2 compliance.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
"""
# Remove comments from the line, but leave in strings for now.
line = clean_lines.lines[linenum]
if Search(r'printf\s*\(.*".*%[-+ ]?\d*q', line):
error(filename, linenum, 'runtime/printf_format', 3,
'%q in format strings is deprecated. Use %ll instead.')
if Search(r'printf\s*\(.*".*%\d+\$', line):
error(filename, linenum, 'runtime/printf_format', 2,
'%N$ formats are unconventional. Try rewriting to avoid them.')
# Remove escaped backslashes before looking for undefined escapes.
line = line.replace('\\\\', '')
if Search(r'("|\').*\\(%|\[|\(|{)', line):
error(filename, linenum, 'build/printf_format', 3,
'%, [, (, and { are undefined character escapes. Unescape them.')
# For the rest, work with both comments and strings removed.
line = clean_lines.elided[linenum]
if Search(r'\b(const|volatile|void|char|short|int|long'
r'|float|double|signed|unsigned'
r'|schar|u?int8|u?int16|u?int32|u?int64)'
r'\s+(register|static|extern|typedef)\b',
line):
error(filename, linenum, 'build/storage_class', 5,
'Storage class (static, extern, typedef, etc) should be first.')
if Match(r'\s*#\s*endif\s*[^/\s]+', line):
error(filename, linenum, 'build/endif_comment', 5,
'Uncommented text after #endif is non-standard. Use a comment.')
if Match(r'\s*class\s+(\w+\s*::\s*)+\w+\s*;', line):
error(filename, linenum, 'build/forward_decl', 5,
'Inner-style forward declarations are invalid. Remove this line.')
if Search(r'(\w+|[+-]?\d+(\.\d*)?)\s*(<|>)\?=?\s*(\w+|[+-]?\d+)(\.\d*)?',
line):
error(filename, linenum, 'build/deprecated', 3,
'>? and <? (max and min) operators are non-standard and deprecated.')
if Search(r'^\s*const\s*string\s*&\s*\w+\s*;', line):
# TODO(unknown): Could it be expanded safely to arbitrary references,
# without triggering too many false positives? The first
# attempt triggered 5 warnings for mostly benign code in the regtest, hence
# the restriction.
# Here's the original regexp, for the reference:
# type_name = r'\w+((\s*::\s*\w+)|(\s*<\s*\w+?\s*>))?'
# r'\s*const\s*' + type_name + '\s*&\s*\w+\s*;'
error(filename, linenum, 'runtime/member_string_references', 2,
'const string& members are dangerous. It is much better to use '
'alternatives, such as pointers or simple constants.')
# Everything else in this function operates on class declarations.
# Return early if the top of the nesting stack is not a class, or if
# the class head is not completed yet.
classinfo = nesting_state.InnermostClass()
if not classinfo or not classinfo.seen_open_brace:
return
# The class may have been declared with namespace or classname qualifiers.
# The constructor and destructor will not have those qualifiers.
base_classname = classinfo.name.split('::')[-1]
# Look for single-argument constructors that aren't marked explicit.
# Technically a valid construct, but against style. Also look for
# non-single-argument constructors which are also technically valid, but
# strongly suggest something is wrong.
explicit_constructor_match = Match(
r'\s+(?:inline\s+)?(explicit\s+)?(?:inline\s+)?%s\s*'
r'\(((?:[^()]|\([^()]*\))*)\)'
% re.escape(base_classname),
line)
if explicit_constructor_match:
is_marked_explicit = explicit_constructor_match.group(1)
if not explicit_constructor_match.group(2):
constructor_args = []
else:
constructor_args = explicit_constructor_match.group(2).split(',')
# collapse arguments so that commas in template parameter lists and function
# argument parameter lists don't split arguments in two
i = 0
while i < len(constructor_args):
constructor_arg = constructor_args[i]
while (constructor_arg.count('<') > constructor_arg.count('>') or
constructor_arg.count('(') > constructor_arg.count(')')):
constructor_arg += ',' + constructor_args[i + 1]
del constructor_args[i + 1]
constructor_args[i] = constructor_arg
i += 1
defaulted_args = [arg for arg in constructor_args if '=' in arg]
noarg_constructor = (not constructor_args or # empty arg list
# 'void' arg specifier
(len(constructor_args) == 1 and
constructor_args[0].strip() == 'void'))
onearg_constructor = ((len(constructor_args) == 1 and # exactly one arg
not noarg_constructor) or
# all but at most one arg defaulted
(len(constructor_args) >= 1 and
not noarg_constructor and
len(defaulted_args) >= len(constructor_args) - 1))
initializer_list_constructor = bool(
onearg_constructor and
Search(r'\bstd\s*::\s*initializer_list\b', constructor_args[0]))
copy_constructor = bool(
onearg_constructor and
Match(r'(const\s+)?%s(\s*<[^>]*>)?(\s+const)?\s*(?:<\w+>\s*)?&'
% re.escape(base_classname), constructor_args[0].strip()))
if (not is_marked_explicit and
onearg_constructor and
not initializer_list_constructor and
not copy_constructor):
if defaulted_args:
error(filename, linenum, 'runtime/explicit', 5,
'Constructors callable with one argument '
'should be marked explicit.')
else:
error(filename, linenum, 'runtime/explicit', 5,
'Single-parameter constructors should be marked explicit.')
elif is_marked_explicit and not onearg_constructor:
if noarg_constructor:
error(filename, linenum, 'runtime/explicit', 5,
'Zero-parameter constructors should not be marked explicit.')
else:
error(filename, linenum, 'runtime/explicit', 0,
'Constructors that require multiple arguments '
'should not be marked explicit.')
def CheckSpacingForFunctionCall(filename, clean_lines, linenum, error):
"""Checks for the correctness of various spacing around function calls.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Since function calls often occur inside if/for/while/switch
# expressions - which have their own, more liberal conventions - we
# first see if we should be looking inside such an expression for a
# function call, to which we can apply more strict standards.
fncall = line # if there's no control flow construct, look at whole line
for pattern in (r'\bif\s*\((.*)\)\s*{',
r'\bfor\s*\((.*)\)\s*{',
r'\bwhile\s*\((.*)\)\s*[{;]',
r'\bswitch\s*\((.*)\)\s*{'):
match = Search(pattern, line)
if match:
fncall = match.group(1) # look inside the parens for function calls
break
# Except in if/for/while/switch, there should never be space
# immediately inside parens (eg "f( 3, 4 )"). We make an exception
# for nested parens ( (a+b) + c ). Likewise, there should never be
# a space before a ( when it's a function argument. I assume it's a
# function argument when the char before the whitespace is legal in
# a function name (alnum + _) and we're not starting a macro. Also ignore
# pointers and references to arrays and functions coz they're too tricky:
# we use a very simple way to recognize these:
# " (something)(maybe-something)" or
# " (something)(maybe-something," or
# " (something)[something]"
# Note that we assume the contents of [] to be short enough that
# they'll never need to wrap.
if ( # Ignore control structures.
not Search(r'\b(if|for|while|switch|return|new|delete|catch|sizeof)\b',
fncall) and
# Ignore pointers/references to functions.
not Search(r' \([^)]+\)\([^)]*(\)|,$)', fncall) and
# Ignore pointers/references to arrays.
not Search(r' \([^)]+\)\[[^\]]+\]', fncall)):
if Search(r'\w\s*\(\s(?!\s*\\$)', fncall): # a ( used for a fn call
error(filename, linenum, 'whitespace/parens', 4,
'Extra space after ( in function call')
elif Search(r'\(\s+(?!(\s*\\)|\()', fncall):
error(filename, linenum, 'whitespace/parens', 2,
'Extra space after (')
if (Search(r'\w\s+\(', fncall) and
not Search(r'#\s*define|typedef|using\s+\w+\s*=', fncall) and
not Search(r'\w\s+\((\w+::)*\*\w+\)\(', fncall)):
# TODO(unknown): Space after an operator function seem to be a common
# error, silence those for now by restricting them to highest verbosity.
if Search(r'\boperator_*\b', line):
error(filename, linenum, 'whitespace/parens', 0,
'Extra space before ( in function call')
else:
error(filename, linenum, 'whitespace/parens', 4,
'Extra space before ( in function call')
# If the ) is followed only by a newline or a { + newline, assume it's
# part of a control statement (if/while/etc), and don't complain
if Search(r'[^)]\s+\)\s*[^{\s]', fncall):
# If the closing parenthesis is preceded by only whitespaces,
# try to give a more descriptive error message.
if Search(r'^\s+\)', fncall):
error(filename, linenum, 'whitespace/parens', 2,
'Closing ) should be moved to the previous line')
else:
error(filename, linenum, 'whitespace/parens', 2,
'Extra space before )')
def IsBlankLine(line):
"""Returns true if the given line is blank.
We consider a line to be blank if the line is empty or consists of
only white spaces.
Args:
line: A line of a string.
Returns:
True, if the given line is blank.
"""
return not line or line.isspace()
def CheckForNamespaceIndentation(filename, nesting_state, clean_lines, line,
error):
is_namespace_indent_item = (
len(nesting_state.stack) > 1 and
nesting_state.stack[-1].check_namespace_indentation and
isinstance(nesting_state.previous_stack_top, _NamespaceInfo) and
nesting_state.previous_stack_top == nesting_state.stack[-2])
if ShouldCheckNamespaceIndentation(nesting_state, is_namespace_indent_item,
clean_lines.elided, line):
CheckItemIndentationInNamespace(filename, clean_lines.elided,
line, error)
def CheckForFunctionLengths(filename, clean_lines, linenum,
function_state, error):
"""Reports for long function bodies.
For an overview why this is done, see:
http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions
Uses a simplistic algorithm assuming other style guidelines
(especially spacing) are followed.
Only checks unindented functions, so class members are unchecked.
Trivial bodies are unchecked, so constructors with huge initializer lists
may be missed.
Blank/comment lines are not counted so as to avoid encouraging the removal
of vertical space and comments just to get through a lint check.
NOLINT *on the last line of a function* disables this check.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
function_state: Current function name and lines in body so far.
error: The function to call with any errors found.
"""
lines = clean_lines.lines
line = lines[linenum]
joined_line = ''
starting_func = False
regexp = r'(\w(\w|::|\*|\&|\s)*)\(' # decls * & space::name( ...
match_result = Match(regexp, line)
if match_result:
# If the name is all caps and underscores, figure it's a macro and
# ignore it, unless it's TEST or TEST_F.
function_name = match_result.group(1).split()[-1]
if function_name == 'TEST' or function_name == 'TEST_F' or (
not Match(r'[A-Z_]+$', function_name)):
starting_func = True
if starting_func:
body_found = False
for start_linenum in xrange(linenum, clean_lines.NumLines()):
start_line = lines[start_linenum]
joined_line += ' ' + start_line.lstrip()
if Search(r'(;|})', start_line): # Declarations and trivial functions
body_found = True
break # ... ignore
elif Search(r'{', start_line):
body_found = True
function = Search(r'((\w|:)*)\(', line).group(1)
if Match(r'TEST', function): # Handle TEST... macros
parameter_regexp = Search(r'(\(.*\))', joined_line)
if parameter_regexp: # Ignore bad syntax
function += parameter_regexp.group(1)
else:
function += '()'
function_state.Begin(function)
break
if not body_found:
# No body for the function (or evidence of a non-function) was found.
error(filename, linenum, 'readability/fn_size', 5,
'Lint failed to find start of function body.')
elif Match(r'^\}\s*$', line): # function end
function_state.Check(error, filename, linenum)
function_state.End()
elif not Match(r'^\s*$', line):
function_state.Count() # Count non-blank/non-comment lines.
_RE_PATTERN_TODO = re.compile(r'^//(\s*)TODO(\(.+?\))?:?(\s|$)?')
def CheckComment(line, filename, linenum, next_line_start, error):
"""Checks for common mistakes in comments.
Args:
line: The line in question.
filename: The name of the current file.
linenum: The number of the line to check.
next_line_start: The first non-whitespace column of the next line.
error: The function to call with any errors found.
"""
commentpos = line.find('//')
if commentpos != -1:
# Check if the // may be in quotes. If so, ignore it
# Comparisons made explicit for clarity -- pylint: disable=g-explicit-bool-comparison
if (line.count('"', 0, commentpos) -
line.count('\\"', 0, commentpos)) % 2 == 0: # not in quotes
# Allow one space for new scopes, two spaces otherwise:
if (not (Match(r'^.*{ *//', line) and next_line_start == commentpos) and
((commentpos >= 1 and
line[commentpos-1] not in string.whitespace) or
(commentpos >= 2 and
line[commentpos-2] not in string.whitespace))):
error(filename, linenum, 'whitespace/comments', 2,
'At least two spaces is best between code and comments')
# Checks for common mistakes in TODO comments.
comment = line[commentpos:]
match = _RE_PATTERN_TODO.match(comment)
if match:
# One whitespace is correct; zero whitespace is handled elsewhere.
leading_whitespace = match.group(1)
if len(leading_whitespace) > 1:
error(filename, linenum, 'whitespace/todo', 2,
'Too many spaces before TODO')
username = match.group(2)
if not username:
error(filename, linenum, 'readability/todo', 2,
'Missing username in TODO; it should look like '
'"// TODO(my_username): Stuff."')
middle_whitespace = match.group(3)
# Comparisons made explicit for correctness -- pylint: disable=g-explicit-bool-comparison
if middle_whitespace != ' ' and middle_whitespace != '':
error(filename, linenum, 'whitespace/todo', 2,
'TODO(my_username) should be followed by a space')
# If the comment contains an alphanumeric character, there
# should be a space somewhere between it and the //.
if Match(r'//[^ ]*\w', comment):
error(filename, linenum, 'whitespace/comments', 4,
'Should have a space between // and comment')
def CheckAccess(filename, clean_lines, linenum, nesting_state, error):
"""Checks for improper use of DISALLOW* macros.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum] # get rid of comments and strings
matched = Match((r'\s*(DISALLOW_COPY_AND_ASSIGN|'
r'DISALLOW_IMPLICIT_CONSTRUCTORS)'), line)
if not matched:
return
if nesting_state.stack and isinstance(nesting_state.stack[-1], _ClassInfo):
if nesting_state.stack[-1].access != 'private':
error(filename, linenum, 'readability/constructors', 3,
'%s must be in the private: section' % matched.group(1))
else:
# Found DISALLOW* macro outside a class declaration, or perhaps it
# was used inside a function when it should have been part of the
# class declaration. We could issue a warning here, but it
# probably resulted in a compiler error already.
pass
def CheckSpacing(filename, clean_lines, linenum, nesting_state, error):
"""Checks for the correctness of various spacing issues in the code.
Things we check for: spaces around operators, spaces after
if/for/while/switch, no spaces around parens in function calls, two
spaces between code and comment, don't start a block with a blank
line, don't end a function with a blank line, don't add a blank line
after public/protected/private, don't have too many blank lines in a row.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Don't use "elided" lines here, otherwise we can't check commented lines.
# Don't want to use "raw" either, because we don't want to check inside C++11
# raw strings,
raw = clean_lines.lines_without_raw_strings
line = raw[linenum]
# Before nixing comments, check if the line is blank for no good
# reason. This includes the first line after a block is opened, and
# blank lines at the end of a function (ie, right before a line like '}'
#
# Skip all the blank line checks if we are immediately inside a
# namespace body. In other words, don't issue blank line warnings
# for this block:
# namespace {
#
# }
#
# A warning about missing end of namespace comments will be issued instead.
#
# Also skip blank line checks for 'extern "C"' blocks, which are formatted
# like namespaces.
if (IsBlankLine(line) and
not nesting_state.InNamespaceBody() and
not nesting_state.InExternC()):
elided = clean_lines.elided
prev_line = elided[linenum - 1]
prevbrace = prev_line.rfind('{')
# TODO(unknown): Don't complain if line before blank line, and line after,
# both start with alnums and are indented the same amount.
# This ignores whitespace at the start of a namespace block
# because those are not usually indented.
if prevbrace != -1 and prev_line[prevbrace:].find('}') == -1:
# OK, we have a blank line at the start of a code block. Before we
# complain, we check if it is an exception to the rule: The previous
# non-empty line has the parameters of a function header that are indented
# 4 spaces (because they did not fit in a 80 column line when placed on
# the same line as the function name). We also check for the case where
# the previous line is indented 6 spaces, which may happen when the
# initializers of a constructor do not fit into a 80 column line.
exception = False
if Match(r' {6}\w', prev_line): # Initializer list?
# We are looking for the opening column of initializer list, which
# should be indented 4 spaces to cause 6 space indentation afterwards.
search_position = linenum-2
while (search_position >= 0
and Match(r' {6}\w', elided[search_position])):
search_position -= 1
exception = (search_position >= 0
and elided[search_position][:5] == ' :')
else:
# Search for the function arguments or an initializer list. We use a
# simple heuristic here: If the line is indented 4 spaces; and we have a
# closing paren, without the opening paren, followed by an opening brace
# or colon (for initializer lists) we assume that it is the last line of
# a function header. If we have a colon indented 4 spaces, it is an
# initializer list.
exception = (Match(r' {4}\w[^\(]*\)\s*(const\s*)?(\{\s*$|:)',
prev_line)
or Match(r' {4}:', prev_line))
if not exception:
error(filename, linenum, 'whitespace/blank_line', 2,
'Redundant blank line at the start of a code block '
'should be deleted.')
# Ignore blank lines at the end of a block in a long if-else
# chain, like this:
# if (condition1) {
# // Something followed by a blank line
#
# } else if (condition2) {
# // Something else
# }
if linenum + 1 < clean_lines.NumLines():
next_line = raw[linenum + 1]
if (next_line
and Match(r'\s*}', next_line)
and next_line.find('} else ') == -1):
error(filename, linenum, 'whitespace/blank_line', 3,
'Redundant blank line at the end of a code block '
'should be deleted.')
matched = Match(r'\s*(public|protected|private):', prev_line)
if matched:
error(filename, linenum, 'whitespace/blank_line', 3,
'Do not leave a blank line after "%s:"' % matched.group(1))
# Next, check comments
next_line_start = 0
if linenum + 1 < clean_lines.NumLines():
next_line = raw[linenum + 1]
next_line_start = len(next_line) - len(next_line.lstrip())
CheckComment(line, filename, linenum, next_line_start, error)
# get rid of comments and strings
line = clean_lines.elided[linenum]
# You shouldn't have spaces before your brackets, except maybe after
# 'delete []' or 'return []() {};'
if Search(r'\w\s+\[', line) and not Search(r'(?:delete|return)\s+\[', line):
error(filename, linenum, 'whitespace/braces', 5,
'Extra space before [')
# In range-based for, we wanted spaces before and after the colon, but
# not around "::" tokens that might appear.
if (Search(r'for *\(.*[^:]:[^: ]', line) or
Search(r'for *\(.*[^: ]:[^:]', line)):
error(filename, linenum, 'whitespace/forcolon', 2,
'Missing space around colon in range-based for loop')
def CheckOperatorSpacing(filename, clean_lines, linenum, error):
"""Checks for horizontal spacing around operators.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Don't try to do spacing checks for operator methods. Do this by
# replacing the troublesome characters with something else,
# preserving column position for all other characters.
#
# The replacement is done repeatedly to avoid false positives from
# operators that call operators.
while True:
match = Match(r'^(.*\boperator\b)(\S+)(\s*\(.*)$', line)
if match:
line = match.group(1) + ('_' * len(match.group(2))) + match.group(3)
else:
break
# We allow no-spaces around = within an if: "if ( (a=Foo()) == 0 )".
# Otherwise not. Note we only check for non-spaces on *both* sides;
# sometimes people put non-spaces on one side when aligning ='s among
# many lines (not that this is behavior that I approve of...)
if Search(r'[\w.]=[\w.]', line) and not Search(r'\b(if|while) ', line):
error(filename, linenum, 'whitespace/operators', 4,
'Missing spaces around =')
# It's ok not to have spaces around binary operators like + - * /, but if
# there's too little whitespace, we get concerned. It's hard to tell,
# though, so we punt on this one for now. TODO.
# You should always have whitespace around binary operators.
#
# Check <= and >= first to avoid false positives with < and >, then
# check non-include lines for spacing around < and >.
#
# If the operator is followed by a comma, assume it's be used in a
# macro context and don't do any checks. This avoids false
# positives.
#
# Note that && is not included here. Those are checked separately
# in CheckRValueReference
match = Search(r'[^<>=!\s](==|!=|<=|>=|\|\|)[^<>=!\s,;\)]', line)
if match:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around %s' % match.group(1))
elif not Match(r'#.*include', line):
# Look for < that is not surrounded by spaces. This is only
# triggered if both sides are missing spaces, even though
# technically should should flag if at least one side is missing a
# space. This is done to avoid some false positives with shifts.
match = Match(r'^(.*[^\s<])<[^\s=<,]', line)
if match:
(_, _, end_pos) = CloseExpression(
clean_lines, linenum, len(match.group(1)))
if end_pos <= -1:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around <')
# Look for > that is not surrounded by spaces. Similar to the
# above, we only trigger if both sides are missing spaces to avoid
# false positives with shifts.
match = Match(r'^(.*[^-\s>])>[^\s=>,]', line)
if match:
(_, _, start_pos) = ReverseCloseExpression(
clean_lines, linenum, len(match.group(1)))
if start_pos <= -1:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around >')
# We allow no-spaces around << when used like this: 10<<20, but
# not otherwise (particularly, not when used as streams)
#
# We also allow operators following an opening parenthesis, since
# those tend to be macros that deal with operators.
match = Search(r'(operator|\S)(?:L|UL|ULL|l|ul|ull)?<<([^\s,=])', line)
if (match and match.group(1) != '(' and
not (match.group(1).isdigit() and match.group(2).isdigit()) and
not (match.group(1) == 'operator' and match.group(2) == ';')):
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around <<')
# We allow no-spaces around >> for almost anything. This is because
# C++11 allows ">>" to close nested templates, which accounts for
# most cases when ">>" is not followed by a space.
#
# We still warn on ">>" followed by alpha character, because that is
# likely due to ">>" being used for right shifts, e.g.:
# value >> alpha
#
# When ">>" is used to close templates, the alphanumeric letter that
# follows would be part of an identifier, and there should still be
# a space separating the template type and the identifier.
# type<type<type>> alpha
match = Search(r'>>[a-zA-Z_]', line)
if match:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around >>')
# There shouldn't be space around unary operators
match = Search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line)
if match:
error(filename, linenum, 'whitespace/operators', 4,
'Extra space for operator %s' % match.group(1))
def CheckParenthesisSpacing(filename, clean_lines, linenum, error):
"""Checks for horizontal spacing around parentheses.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# No spaces after an if, while, switch, or for
match = Search(r' (if\(|for\(|while\(|switch\()', line)
if match:
error(filename, linenum, 'whitespace/parens', 5,
'Missing space before ( in %s' % match.group(1))
# For if/for/while/switch, the left and right parens should be
# consistent about how many spaces are inside the parens, and
# there should either be zero or one spaces inside the parens.
# We don't want: "if ( foo)" or "if ( foo )".
# Exception: "for ( ; foo; bar)" and "for (foo; bar; )" are allowed.
match = Search(r'\b(if|for|while|switch)\s*'
r'\(([ ]*)(.).*[^ ]+([ ]*)\)\s*{\s*$',
line)
if match:
if len(match.group(2)) != len(match.group(4)):
if not (match.group(3) == ';' and
len(match.group(2)) == 1 + len(match.group(4)) or
not match.group(2) and Search(r'\bfor\s*\(.*; \)', line)):
error(filename, linenum, 'whitespace/parens', 5,
'Mismatching spaces inside () in %s' % match.group(1))
if len(match.group(2)) not in [0, 1]:
error(filename, linenum, 'whitespace/parens', 5,
'Should have zero or one spaces inside ( and ) in %s' %
match.group(1))
def CheckCommaSpacing(filename, clean_lines, linenum, error):
"""Checks for horizontal spacing near commas and semicolons.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
raw = clean_lines.lines_without_raw_strings
line = clean_lines.elided[linenum]
# You should always have a space after a comma (either as fn arg or operator)
#
# This does not apply when the non-space character following the
# comma is another comma, since the only time when that happens is
# for empty macro arguments.
#
# We run this check in two passes: first pass on elided lines to
# verify that lines contain missing whitespaces, second pass on raw
# lines to confirm that those missing whitespaces are not due to
# elided comments.
if (Search(r',[^,\s]', ReplaceAll(r'\boperator\s*,\s*\(', 'F(', line)) and
Search(r',[^,\s]', raw[linenum])):
error(filename, linenum, 'whitespace/comma', 3,
'Missing space after ,')
# You should always have a space after a semicolon
# except for few corner cases
# TODO(unknown): clarify if 'if (1) { return 1;}' is requires one more
# space after ;
if Search(r';[^\s};\\)/]', line):
error(filename, linenum, 'whitespace/semicolon', 3,
'Missing space after ;')
def CheckBracesSpacing(filename, clean_lines, linenum, error):
"""Checks for horizontal spacing near commas.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Except after an opening paren, or after another opening brace (in case of
# an initializer list, for instance), you should have spaces before your
# braces. And since you should never have braces at the beginning of a line,
# this is an easy test.
match = Match(r'^(.*[^ ({]){', line)
if match:
# Try a bit harder to check for brace initialization. This
# happens in one of the following forms:
# Constructor() : initializer_list_{} { ... }
# Constructor{}.MemberFunction()
# Type variable{};
# FunctionCall(type{}, ...);
# LastArgument(..., type{});
# LOG(INFO) << type{} << " ...";
# map_of_type[{...}] = ...;
# ternary = expr ? new type{} : nullptr;
# OuterTemplate<InnerTemplateConstructor<Type>{}>
#
# We check for the character following the closing brace, and
# silence the warning if it's one of those listed above, i.e.
# "{.;,)<>]:".
#
# To account for nested initializer list, we allow any number of
# closing braces up to "{;,)<". We can't simply silence the
# warning on first sight of closing brace, because that would
# cause false negatives for things that are not initializer lists.
# Silence this: But not this:
# Outer{ if (...) {
# Inner{...} if (...){ // Missing space before {
# }; }
#
# There is a false negative with this approach if people inserted
# spurious semicolons, e.g. "if (cond){};", but we will catch the
# spurious semicolon with a separate check.
(endline, endlinenum, endpos) = CloseExpression(
clean_lines, linenum, len(match.group(1)))
trailing_text = ''
if endpos > -1:
trailing_text = endline[endpos:]
for offset in xrange(endlinenum + 1,
min(endlinenum + 3, clean_lines.NumLines() - 1)):
trailing_text += clean_lines.elided[offset]
if not Match(r'^[\s}]*[{.;,)<>\]:]', trailing_text):
error(filename, linenum, 'whitespace/braces', 5,
'Missing space before {')
# Make sure '} else {' has spaces.
if Search(r'}else', line):
error(filename, linenum, 'whitespace/braces', 5,
'Missing space before else')
# You shouldn't have a space before a semicolon at the end of the line.
# There's a special case for "for" since the style guide allows space before
# the semicolon there.
if Search(r':\s*;\s*$', line):
error(filename, linenum, 'whitespace/semicolon', 5,
'Semicolon defining empty statement. Use {} instead.')
elif Search(r'^\s*;\s*$', line):
error(filename, linenum, 'whitespace/semicolon', 5,
'Line contains only semicolon. If this should be an empty statement, '
'use {} instead.')
elif (Search(r'\s+;\s*$', line) and
not Search(r'\bfor\b', line)):
error(filename, linenum, 'whitespace/semicolon', 5,
'Extra space before last semicolon. If this should be an empty '
'statement, use {} instead.')
def IsDecltype(clean_lines, linenum, column):
"""Check if the token ending on (linenum, column) is decltype().
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: the number of the line to check.
column: end column of the token to check.
Returns:
True if this token is decltype() expression, False otherwise.
"""
(text, _, start_col) = ReverseCloseExpression(clean_lines, linenum, column)
if start_col < 0:
return False
if Search(r'\bdecltype\s*$', text[0:start_col]):
return True
return False
def IsTemplateParameterList(clean_lines, linenum, column):
"""Check if the token ending on (linenum, column) is the end of template<>.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: the number of the line to check.
column: end column of the token to check.
Returns:
True if this token is end of a template parameter list, False otherwise.
"""
(_, startline, startpos) = ReverseCloseExpression(
clean_lines, linenum, column)
if (startpos > -1 and
Search(r'\btemplate\s*$', clean_lines.elided[startline][0:startpos])):
return True
return False
def IsRValueType(clean_lines, nesting_state, linenum, column):
"""Check if the token ending on (linenum, column) is a type.
Assumes that text to the right of the column is "&&" or a function
name.
Args:
clean_lines: A CleansedLines instance containing the file.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
linenum: the number of the line to check.
column: end column of the token to check.
Returns:
True if this token is a type, False if we are not sure.
"""
prefix = clean_lines.elided[linenum][0:column]
# Get one word to the left. If we failed to do so, this is most
# likely not a type, since it's unlikely that the type name and "&&"
# would be split across multiple lines.
match = Match(r'^(.*)(\b\w+|[>*)&])\s*$', prefix)
if not match:
return False
# Check text following the token. If it's "&&>" or "&&," or "&&...", it's
# most likely a rvalue reference used inside a template.
suffix = clean_lines.elided[linenum][column:]
if Match(r'&&\s*(?:[>,]|\.\.\.)', suffix):
return True
# Check for simple type and end of templates:
# int&& variable
# vector<int>&& variable
#
# Because this function is called recursively, we also need to
# recognize pointer and reference types:
# int* Function()
# int& Function()
if match.group(2) in ['char', 'char16_t', 'char32_t', 'wchar_t', 'bool',
'short', 'int', 'long', 'signed', 'unsigned',
'float', 'double', 'void', 'auto', '>', '*', '&']:
return True
# If we see a close parenthesis, look for decltype on the other side.
# decltype would unambiguously identify a type, anything else is
# probably a parenthesized expression and not a type.
if match.group(2) == ')':
return IsDecltype(
clean_lines, linenum, len(match.group(1)) + len(match.group(2)) - 1)
# Check for casts and cv-qualifiers.
# match.group(1) remainder
# -------------- ---------
# const_cast< type&&
# const type&&
# type const&&
if Search(r'\b(?:const_cast\s*<|static_cast\s*<|dynamic_cast\s*<|'
r'reinterpret_cast\s*<|\w+\s)\s*$',
match.group(1)):
return True
# Look for a preceding symbol that might help differentiate the context.
# These are the cases that would be ambiguous:
# match.group(1) remainder
# -------------- ---------
# Call ( expression &&
# Declaration ( type&&
# sizeof ( type&&
# if ( expression &&
# while ( expression &&
# for ( type&&
# for( ; expression &&
# statement ; type&&
# block { type&&
# constructor { expression &&
start = linenum
line = match.group(1)
match_symbol = None
while start >= 0:
# We want to skip over identifiers and commas to get to a symbol.
# Commas are skipped so that we can find the opening parenthesis
# for function parameter lists.
match_symbol = Match(r'^(.*)([^\w\s,])[\w\s,]*$', line)
if match_symbol:
break
start -= 1
line = clean_lines.elided[start]
if not match_symbol:
# Probably the first statement in the file is an rvalue reference
return True
if match_symbol.group(2) == '}':
# Found closing brace, probably an indicate of this:
# block{} type&&
return True
if match_symbol.group(2) == ';':
# Found semicolon, probably one of these:
# for(; expression &&
# statement; type&&
# Look for the previous 'for(' in the previous lines.
before_text = match_symbol.group(1)
for i in xrange(start - 1, max(start - 6, 0), -1):
before_text = clean_lines.elided[i] + before_text
if Search(r'for\s*\([^{};]*$', before_text):
# This is the condition inside a for-loop
return False
# Did not find a for-init-statement before this semicolon, so this
# is probably a new statement and not a condition.
return True
if match_symbol.group(2) == '{':
# Found opening brace, probably one of these:
# block{ type&& = ... ; }
# constructor{ expression && expression }
# Look for a closing brace or a semicolon. If we see a semicolon
# first, this is probably a rvalue reference.
line = clean_lines.elided[start][0:len(match_symbol.group(1)) + 1]
end = start
depth = 1
while True:
for ch in line:
if ch == ';':
return True
elif ch == '{':
depth += 1
elif ch == '}':
depth -= 1
if depth == 0:
return False
end += 1
if end >= clean_lines.NumLines():
break
line = clean_lines.elided[end]
# Incomplete program?
return False
if match_symbol.group(2) == '(':
# Opening parenthesis. Need to check what's to the left of the
# parenthesis. Look back one extra line for additional context.
before_text = match_symbol.group(1)
if linenum > 1:
before_text = clean_lines.elided[linenum - 1] + before_text
before_text = match_symbol.group(1)
# Patterns that are likely to be types:
# [](type&&
# for (type&&
# sizeof(type&&
# operator=(type&&
#
if Search(r'(?:\]|\bfor|\bsizeof|\boperator\s*\S+\s*)\s*$', before_text):
return True
# Patterns that are likely to be expressions:
# if (expression &&
# while (expression &&
# : initializer(expression &&
# , initializer(expression &&
# ( FunctionCall(expression &&
# + FunctionCall(expression &&
# + (expression &&
#
# The last '+' represents operators such as '+' and '-'.
if Search(r'(?:\bif|\bwhile|[-+=%^(<!?:,&*]\s*)$', before_text):
return False
# Something else. Check that tokens to the left look like
# return_type function_name
match_func = Match(r'^(.*)\s+\w(?:\w|::)*(?:<[^<>]*>)?\s*$',
match_symbol.group(1))
if match_func:
# Check for constructors, which don't have return types.
if Search(r'\b(?:explicit|inline)$', match_func.group(1)):
return True
implicit_constructor = Match(r'\s*(\w+)\((?:const\s+)?(\w+)', prefix)
if (implicit_constructor and
implicit_constructor.group(1) == implicit_constructor.group(2)):
return True
return IsRValueType(clean_lines, nesting_state, linenum,
len(match_func.group(1)))
# Nothing before the function name. If this is inside a block scope,
# this is probably a function call.
return not (nesting_state.previous_stack_top and
nesting_state.previous_stack_top.IsBlockInfo())
if match_symbol.group(2) == '>':
# Possibly a closing bracket, check that what's on the other side
# looks like the start of a template.
return IsTemplateParameterList(
clean_lines, start, len(match_symbol.group(1)))
# Some other symbol, usually something like "a=b&&c". This is most
# likely not a type.
return False
def IsDeletedOrDefault(clean_lines, linenum):
"""Check if current constructor or operator is deleted or default.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if this is a deleted or default constructor.
"""
open_paren = clean_lines.elided[linenum].find('(')
if open_paren < 0:
return False
(close_line, _, close_paren) = CloseExpression(
clean_lines, linenum, open_paren)
if close_paren < 0:
return False
return Match(r'\s*=\s*(?:delete|default)\b', close_line[close_paren:])
def IsRValueAllowed(clean_lines, linenum):
"""Check if RValue reference is allowed on a particular line.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if line is within the region where RValue references are allowed.
"""
# Allow region marked by PUSH/POP macros
for i in xrange(linenum, 0, -1):
line = clean_lines.elided[i]
if Match(r'GOOGLE_ALLOW_RVALUE_REFERENCES_(?:PUSH|POP)', line):
if not line.endswith('PUSH'):
return False
for j in xrange(linenum, clean_lines.NumLines(), 1):
line = clean_lines.elided[j]
if Match(r'GOOGLE_ALLOW_RVALUE_REFERENCES_(?:PUSH|POP)', line):
return line.endswith('POP')
# Allow operator=
line = clean_lines.elided[linenum]
if Search(r'\boperator\s*=\s*\(', line):
return IsDeletedOrDefault(clean_lines, linenum)
# Allow constructors
match = Match(r'\s*([\w<>]+)\s*::\s*([\w<>]+)\s*\(', line)
if match and match.group(1) == match.group(2):
return IsDeletedOrDefault(clean_lines, linenum)
if Search(r'\b(?:explicit|inline)\s+[\w<>]+\s*\(', line):
return IsDeletedOrDefault(clean_lines, linenum)
if Match(r'\s*[\w<>]+\s*\(', line):
previous_line = 'ReturnType'
if linenum > 0:
previous_line = clean_lines.elided[linenum - 1]
if Match(r'^\s*$', previous_line) or Search(r'[{}:;]\s*$', previous_line):
return IsDeletedOrDefault(clean_lines, linenum)
return False
def CheckRValueReference(filename, clean_lines, linenum, nesting_state, error):
"""Check for rvalue references.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Find lines missing spaces around &&.
# TODO(unknown): currently we don't check for rvalue references
# with spaces surrounding the && to avoid false positives with
# boolean expressions.
line = clean_lines.elided[linenum]
match = Match(r'^(.*\S)&&', line)
if not match:
match = Match(r'(.*)&&\S', line)
if (not match) or '(&&)' in line or Search(r'\boperator\s*$', match.group(1)):
return
# Either poorly formed && or an rvalue reference, check the context
# to get a more accurate error message. Mostly we want to determine
# if what's to the left of "&&" is a type or not.
and_pos = len(match.group(1))
if IsRValueType(clean_lines, nesting_state, linenum, and_pos):
if not IsRValueAllowed(clean_lines, linenum):
error(filename, linenum, 'build/c++11', 3,
'RValue references are an unapproved C++ feature.')
else:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around &&')
def CheckSectionSpacing(filename, clean_lines, class_info, linenum, error):
"""Checks for additional blank line issues related to sections.
Currently the only thing checked here is blank line before protected/private.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
class_info: A _ClassInfo objects.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Skip checks if the class is small, where small means 25 lines or less.
# 25 lines seems like a good cutoff since that's the usual height of
# terminals, and any class that can't fit in one screen can't really
# be considered "small".
#
# Also skip checks if we are on the first line. This accounts for
# classes that look like
# class Foo { public: ... };
#
# If we didn't find the end of the class, last_line would be zero,
# and the check will be skipped by the first condition.
if (class_info.last_line - class_info.starting_linenum <= 24 or
linenum <= class_info.starting_linenum):
return
matched = Match(r'\s*(public|protected|private):', clean_lines.lines[linenum])
if matched:
# Issue warning if the line before public/protected/private was
# not a blank line, but don't do this if the previous line contains
# "class" or "struct". This can happen two ways:
# - We are at the beginning of the class.
# - We are forward-declaring an inner class that is semantically
# private, but needed to be public for implementation reasons.
# Also ignores cases where the previous line ends with a backslash as can be
# common when defining classes in C macros.
prev_line = clean_lines.lines[linenum - 1]
if (not IsBlankLine(prev_line) and
not Search(r'\b(class|struct)\b', prev_line) and
not Search(r'\\$', prev_line)):
# Try a bit harder to find the beginning of the class. This is to
# account for multi-line base-specifier lists, e.g.:
# class Derived
# : public Base {
end_class_head = class_info.starting_linenum
for i in range(class_info.starting_linenum, linenum):
if Search(r'\{\s*$', clean_lines.lines[i]):
end_class_head = i
break
if end_class_head < linenum - 1:
error(filename, linenum, 'whitespace/blank_line', 3,
'"%s:" should be preceded by a blank line' % matched.group(1))
def GetPreviousNonBlankLine(clean_lines, linenum):
"""Return the most recent non-blank line and its line number.
Args:
clean_lines: A CleansedLines instance containing the file contents.
linenum: The number of the line to check.
Returns:
A tuple with two elements. The first element is the contents of the last
non-blank line before the current line, or the empty string if this is the
first non-blank line. The second is the line number of that line, or -1
if this is the first non-blank line.
"""
prevlinenum = linenum - 1
while prevlinenum >= 0:
prevline = clean_lines.elided[prevlinenum]
if not IsBlankLine(prevline): # if not a blank line...
return (prevline, prevlinenum)
prevlinenum -= 1
return ('', -1)
def CheckBraces(filename, clean_lines, linenum, error):
"""Looks for misplaced braces (e.g. at the end of line).
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum] # get rid of comments and strings
if Match(r'\s*{\s*$', line):
# We allow an open brace to start a line in the case where someone is using
# braces in a block to explicitly create a new scope, which is commonly used
# to control the lifetime of stack-allocated variables. Braces are also
# used for brace initializers inside function calls. We don't detect this
# perfectly: we just don't complain if the last non-whitespace character on
# the previous non-blank line is ',', ';', ':', '(', '{', or '}', or if the
# previous line starts a preprocessor block.
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if (not Search(r'[,;:}{(]\s*$', prevline) and
not Match(r'\s*#', prevline)):
error(filename, linenum, 'whitespace/braces', 4,
'{ should almost always be at the end of the previous line')
# An else clause should be on the same line as the preceding closing brace.
if Match(r'\s*else\b\s*(?:if\b|\{|$)', line):
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if Match(r'\s*}\s*$', prevline):
error(filename, linenum, 'whitespace/newline', 4,
'An else should appear on the same line as the preceding }')
# If braces come on one side of an else, they should be on both.
# However, we have to worry about "else if" that spans multiple lines!
if Search(r'else if\s*\(', line): # could be multi-line if
brace_on_left = bool(Search(r'}\s*else if\s*\(', line))
# find the ( after the if
pos = line.find('else if')
pos = line.find('(', pos)
if pos > 0:
(endline, _, endpos) = CloseExpression(clean_lines, linenum, pos)
brace_on_right = endline[endpos:].find('{') != -1
if brace_on_left != brace_on_right: # must be brace after if
error(filename, linenum, 'readability/braces', 5,
'If an else has a brace on one side, it should have it on both')
elif Search(r'}\s*else[^{]*$', line) or Match(r'[^}]*else\s*{', line):
error(filename, linenum, 'readability/braces', 5,
'If an else has a brace on one side, it should have it on both')
# Likewise, an else should never have the else clause on the same line
if Search(r'\belse [^\s{]', line) and not Search(r'\belse if\b', line):
error(filename, linenum, 'whitespace/newline', 4,
'Else clause should never be on same line as else (use 2 lines)')
# In the same way, a do/while should never be on one line
if Match(r'\s*do [^\s{]', line):
error(filename, linenum, 'whitespace/newline', 4,
'do/while clauses should not be on a single line')
# Check single-line if/else bodies. The style guide says 'curly braces are not
# required for single-line statements'. We additionally allow multi-line,
# single statements, but we reject anything with more than one semicolon in
# it. This means that the first semicolon after the if should be at the end of
# its line, and the line after that should have an indent level equal to or
# lower than the if. We also check for ambiguous if/else nesting without
# braces.
if_else_match = Search(r'\b(if\s*\(|else\b)', line)
if if_else_match and not Match(r'\s*#', line):
if_indent = GetIndentLevel(line)
endline, endlinenum, endpos = line, linenum, if_else_match.end()
if_match = Search(r'\bif\s*\(', line)
if if_match:
# This could be a multiline if condition, so find the end first.
pos = if_match.end() - 1
(endline, endlinenum, endpos) = CloseExpression(clean_lines, linenum, pos)
# Check for an opening brace, either directly after the if or on the next
# line. If found, this isn't a single-statement conditional.
if (not Match(r'\s*{', endline[endpos:])
and not (Match(r'\s*$', endline[endpos:])
and endlinenum < (len(clean_lines.elided) - 1)
and Match(r'\s*{', clean_lines.elided[endlinenum + 1]))):
while (endlinenum < len(clean_lines.elided)
and ';' not in clean_lines.elided[endlinenum][endpos:]):
endlinenum += 1
endpos = 0
if endlinenum < len(clean_lines.elided):
endline = clean_lines.elided[endlinenum]
# We allow a mix of whitespace and closing braces (e.g. for one-liner
# methods) and a single \ after the semicolon (for macros)
endpos = endline.find(';')
if not Match(r';[\s}]*(\\?)$', endline[endpos:]):
# Semicolon isn't the last character, there's something trailing.
# Output a warning if the semicolon is not contained inside
# a lambda expression.
if not Match(r'^[^{};]*\[[^\[\]]*\][^{}]*\{[^{}]*\}\s*\)*[;,]\s*$',
endline):
error(filename, linenum, 'readability/braces', 4,
'If/else bodies with multiple statements require braces')
elif endlinenum < len(clean_lines.elided) - 1:
# Make sure the next line is dedented
next_line = clean_lines.elided[endlinenum + 1]
next_indent = GetIndentLevel(next_line)
# With ambiguous nested if statements, this will error out on the
# if that *doesn't* match the else, regardless of whether it's the
# inner one or outer one.
if (if_match and Match(r'\s*else\b', next_line)
and next_indent != if_indent):
error(filename, linenum, 'readability/braces', 4,
'Else clause should be indented at the same level as if. '
'Ambiguous nested if/else chains require braces.')
elif next_indent > if_indent:
error(filename, linenum, 'readability/braces', 4,
'If/else bodies with multiple statements require braces')
def CheckTrailingSemicolon(filename, clean_lines, linenum, error):
"""Looks for redundant trailing semicolon.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Block bodies should not be followed by a semicolon. Due to C++11
# brace initialization, there are more places where semicolons are
# required than not, so we use a whitelist approach to check these
# rather than a blacklist. These are the places where "};" should
# be replaced by just "}":
# 1. Some flavor of block following closing parenthesis:
# for (;;) {};
# while (...) {};
# switch (...) {};
# Function(...) {};
# if (...) {};
# if (...) else if (...) {};
#
# 2. else block:
# if (...) else {};
#
# 3. const member function:
# Function(...) const {};
#
# 4. Block following some statement:
# x = 42;
# {};
#
# 5. Block at the beginning of a function:
# Function(...) {
# {};
# }
#
# Note that naively checking for the preceding "{" will also match
# braces inside multi-dimensional arrays, but this is fine since
# that expression will not contain semicolons.
#
# 6. Block following another block:
# while (true) {}
# {};
#
# 7. End of namespaces:
# namespace {};
#
# These semicolons seems far more common than other kinds of
# redundant semicolons, possibly due to people converting classes
# to namespaces. For now we do not warn for this case.
#
# Try matching case 1 first.
match = Match(r'^(.*\)\s*)\{', line)
if match:
# Matched closing parenthesis (case 1). Check the token before the
# matching opening parenthesis, and don't warn if it looks like a
# macro. This avoids these false positives:
# - macro that defines a base class
# - multi-line macro that defines a base class
# - macro that defines the whole class-head
#
# But we still issue warnings for macros that we know are safe to
# warn, specifically:
# - TEST, TEST_F, TEST_P, MATCHER, MATCHER_P
# - TYPED_TEST
# - INTERFACE_DEF
# - EXCLUSIVE_LOCKS_REQUIRED, SHARED_LOCKS_REQUIRED, LOCKS_EXCLUDED:
#
# We implement a whitelist of safe macros instead of a blacklist of
# unsafe macros, even though the latter appears less frequently in
# google code and would have been easier to implement. This is because
# the downside for getting the whitelist wrong means some extra
# semicolons, while the downside for getting the blacklist wrong
# would result in compile errors.
#
# In addition to macros, we also don't want to warn on compound
# literals and lambdas.
closing_brace_pos = match.group(1).rfind(')')
opening_parenthesis = ReverseCloseExpression(
clean_lines, linenum, closing_brace_pos)
if opening_parenthesis[2] > -1:
line_prefix = opening_parenthesis[0][0:opening_parenthesis[2]]
macro = Search(r'\b([A-Z_]+)\s*$', line_prefix)
func = Match(r'^(.*\])\s*$', line_prefix)
if ((macro and
macro.group(1) not in (
'TEST', 'TEST_F', 'MATCHER', 'MATCHER_P', 'TYPED_TEST',
'EXCLUSIVE_LOCKS_REQUIRED', 'SHARED_LOCKS_REQUIRED',
'LOCKS_EXCLUDED', 'INTERFACE_DEF')) or
(func and not Search(r'\boperator\s*\[\s*\]', func.group(1))) or
Search(r'\s+=\s*$', line_prefix)):
match = None
if (match and
opening_parenthesis[1] > 1 and
Search(r'\]\s*$', clean_lines.elided[opening_parenthesis[1] - 1])):
# Multi-line lambda-expression
match = None
else:
# Try matching cases 2-3.
match = Match(r'^(.*(?:else|\)\s*const)\s*)\{', line)
if not match:
# Try matching cases 4-6. These are always matched on separate lines.
#
# Note that we can't simply concatenate the previous line to the
# current line and do a single match, otherwise we may output
# duplicate warnings for the blank line case:
# if (cond) {
# // blank line
# }
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if prevline and Search(r'[;{}]\s*$', prevline):
match = Match(r'^(\s*)\{', line)
# Check matching closing brace
if match:
(endline, endlinenum, endpos) = CloseExpression(
clean_lines, linenum, len(match.group(1)))
if endpos > -1 and Match(r'^\s*;', endline[endpos:]):
# Current {} pair is eligible for semicolon check, and we have found
# the redundant semicolon, output warning here.
#
# Note: because we are scanning forward for opening braces, and
# outputting warnings for the matching closing brace, if there are
# nested blocks with trailing semicolons, we will get the error
# messages in reversed order.
error(filename, endlinenum, 'readability/braces', 4,
"You don't need a ; after a }")
def CheckEmptyBlockBody(filename, clean_lines, linenum, error):
"""Look for empty loop/conditional body with only a single semicolon.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Search for loop keywords at the beginning of the line. Because only
# whitespaces are allowed before the keywords, this will also ignore most
# do-while-loops, since those lines should start with closing brace.
#
# We also check "if" blocks here, since an empty conditional block
# is likely an error.
line = clean_lines.elided[linenum]
matched = Match(r'\s*(for|while|if)\s*\(', line)
if matched:
# Find the end of the conditional expression
(end_line, end_linenum, end_pos) = CloseExpression(
clean_lines, linenum, line.find('('))
# Output warning if what follows the condition expression is a semicolon.
# No warning for all other cases, including whitespace or newline, since we
# have a separate check for semicolons preceded by whitespace.
if end_pos >= 0 and Match(r';', end_line[end_pos:]):
if matched.group(1) == 'if':
error(filename, end_linenum, 'whitespace/empty_conditional_body', 5,
'Empty conditional bodies should use {}')
else:
error(filename, end_linenum, 'whitespace/empty_loop_body', 5,
'Empty loop bodies should use {} or continue')
def FindCheckMacro(line):
"""Find a replaceable CHECK-like macro.
Args:
line: line to search on.
Returns:
(macro name, start position), or (None, -1) if no replaceable
macro is found.
"""
for macro in _CHECK_MACROS:
i = line.find(macro)
if i >= 0:
# Find opening parenthesis. Do a regular expression match here
# to make sure that we are matching the expected CHECK macro, as
# opposed to some other macro that happens to contain the CHECK
# substring.
matched = Match(r'^(.*\b' + macro + r'\s*)\(', line)
if not matched:
continue
return (macro, len(matched.group(1)))
return (None, -1)
def CheckCheck(filename, clean_lines, linenum, error):
"""Checks the use of CHECK and EXPECT macros.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Decide the set of replacement macros that should be suggested
lines = clean_lines.elided
(check_macro, start_pos) = FindCheckMacro(lines[linenum])
if not check_macro:
return
# Find end of the boolean expression by matching parentheses
(last_line, end_line, end_pos) = CloseExpression(
clean_lines, linenum, start_pos)
if end_pos < 0:
return
# If the check macro is followed by something other than a
# semicolon, assume users will log their own custom error messages
# and don't suggest any replacements.
if not Match(r'\s*;', last_line[end_pos:]):
return
if linenum == end_line:
expression = lines[linenum][start_pos + 1:end_pos - 1]
else:
expression = lines[linenum][start_pos + 1:]
for i in xrange(linenum + 1, end_line):
expression += lines[i]
expression += last_line[0:end_pos - 1]
# Parse expression so that we can take parentheses into account.
# This avoids false positives for inputs like "CHECK((a < 4) == b)",
# which is not replaceable by CHECK_LE.
lhs = ''
rhs = ''
operator = None
while expression:
matched = Match(r'^\s*(<<|<<=|>>|>>=|->\*|->|&&|\|\||'
r'==|!=|>=|>|<=|<|\()(.*)$', expression)
if matched:
token = matched.group(1)
if token == '(':
# Parenthesized operand
expression = matched.group(2)
(end, _) = FindEndOfExpressionInLine(expression, 0, ['('])
if end < 0:
return # Unmatched parenthesis
lhs += '(' + expression[0:end]
expression = expression[end:]
elif token in ('&&', '||'):
# Logical and/or operators. This means the expression
# contains more than one term, for example:
# CHECK(42 < a && a < b);
#
# These are not replaceable with CHECK_LE, so bail out early.
return
elif token in ('<<', '<<=', '>>', '>>=', '->*', '->'):
# Non-relational operator
lhs += token
expression = matched.group(2)
else:
# Relational operator
operator = token
rhs = matched.group(2)
break
else:
# Unparenthesized operand. Instead of appending to lhs one character
# at a time, we do another regular expression match to consume several
# characters at once if possible. Trivial benchmark shows that this
# is more efficient when the operands are longer than a single
# character, which is generally the case.
matched = Match(r'^([^-=!<>()&|]+)(.*)$', expression)
if not matched:
matched = Match(r'^(\s*\S)(.*)$', expression)
if not matched:
break
lhs += matched.group(1)
expression = matched.group(2)
# Only apply checks if we got all parts of the boolean expression
if not (lhs and operator and rhs):
return
# Check that rhs do not contain logical operators. We already know
# that lhs is fine since the loop above parses out && and ||.
if rhs.find('&&') > -1 or rhs.find('||') > -1:
return
# At least one of the operands must be a constant literal. This is
# to avoid suggesting replacements for unprintable things like
# CHECK(variable != iterator)
#
# The following pattern matches decimal, hex integers, strings, and
# characters (in that order).
lhs = lhs.strip()
rhs = rhs.strip()
match_constant = r'^([-+]?(\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\'.*\')$'
if Match(match_constant, lhs) or Match(match_constant, rhs):
# Note: since we know both lhs and rhs, we can provide a more
# descriptive error message like:
# Consider using CHECK_EQ(x, 42) instead of CHECK(x == 42)
# Instead of:
# Consider using CHECK_EQ instead of CHECK(a == b)
#
# We are still keeping the less descriptive message because if lhs
# or rhs gets long, the error message might become unreadable.
error(filename, linenum, 'readability/check', 2,
'Consider using %s instead of %s(a %s b)' % (
_CHECK_REPLACEMENT[check_macro][operator],
check_macro, operator))
def CheckAltTokens(filename, clean_lines, linenum, error):
"""Check alternative keywords being used in boolean expressions.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Avoid preprocessor lines
if Match(r'^\s*#', line):
return
# Last ditch effort to avoid multi-line comments. This will not help
# if the comment started before the current line or ended after the
# current line, but it catches most of the false positives. At least,
# it provides a way to workaround this warning for people who use
# multi-line comments in preprocessor macros.
#
# TODO(unknown): remove this once cpplint has better support for
# multi-line comments.
if line.find('/*') >= 0 or line.find('*/') >= 0:
return
for match in _ALT_TOKEN_REPLACEMENT_PATTERN.finditer(line):
error(filename, linenum, 'readability/alt_tokens', 2,
'Use operator %s instead of %s' % (
_ALT_TOKEN_REPLACEMENT[match.group(1)], match.group(1)))
def GetLineWidth(line):
"""Determines the width of the line in column positions.
Args:
line: A string, which may be a Unicode string.
Returns:
The width of the line in column positions, accounting for Unicode
combining characters and wide characters.
"""
if isinstance(line, unicode):
width = 0
for uc in unicodedata.normalize('NFC', line):
if unicodedata.east_asian_width(uc) in ('W', 'F'):
width += 2
elif not unicodedata.combining(uc):
width += 1
return width
else:
return len(line)
def CheckStyle(filename, clean_lines, linenum, file_extension, nesting_state,
error):
"""Checks rules from the 'C++ style rules' section of cppguide.html.
Most of these rules are hard to test (naming, comment style), but we
do what we can. In particular we check for 2-space indents, line lengths,
tab usage, spaces inside code, etc.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
file_extension: The extension (without the dot) of the filename.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Don't use "elided" lines here, otherwise we can't check commented lines.
# Don't want to use "raw" either, because we don't want to check inside C++11
# raw strings,
raw_lines = clean_lines.lines_without_raw_strings
line = raw_lines[linenum]
if line.find('\t') != -1:
error(filename, linenum, 'whitespace/tab', 1,
'Tab found; better to use spaces')
# One or three blank spaces at the beginning of the line is weird; it's
# hard to reconcile that with 2-space indents.
# NOTE: here are the conditions rob pike used for his tests. Mine aren't
# as sophisticated, but it may be worth becoming so: RLENGTH==initial_spaces
# if(RLENGTH > 20) complain = 0;
# if(match($0, " +(error|private|public|protected):")) complain = 0;
# if(match(prev, "&& *$")) complain = 0;
# if(match(prev, "\\|\\| *$")) complain = 0;
# if(match(prev, "[\",=><] *$")) complain = 0;
# if(match($0, " <<")) complain = 0;
# if(match(prev, " +for \\(")) complain = 0;
# if(prevodd && match(prevprev, " +for \\(")) complain = 0;
scope_or_label_pattern = r'\s*\w+\s*:\s*\\?$'
classinfo = nesting_state.InnermostClass()
initial_spaces = 0
cleansed_line = clean_lines.elided[linenum]
while initial_spaces < len(line) and line[initial_spaces] == ' ':
initial_spaces += 1
if line and line[-1].isspace():
error(filename, linenum, 'whitespace/end_of_line', 4,
'Line ends in whitespace. Consider deleting these extra spaces.')
# There are certain situations we allow one space, notably for
# section labels, and also lines containing multi-line raw strings.
elif ((initial_spaces == 1 or initial_spaces == 3) and
not Match(scope_or_label_pattern, cleansed_line) and
not (clean_lines.raw_lines[linenum] != line and
Match(r'^\s*""', line))):
error(filename, linenum, 'whitespace/indent', 3,
'Weird number of spaces at line-start. '
'Are you using a 2-space indent?')
# Check if the line is a header guard.
is_header_guard = False
if file_extension == 'h':
cppvar = GetHeaderGuardCPPVariable(filename)
if (line.startswith('#ifndef %s' % cppvar) or
line.startswith('#define %s' % cppvar) or
line.startswith('#endif // %s' % cppvar)):
is_header_guard = True
# #include lines and header guards can be long, since there's no clean way to
# split them.
#
# URLs can be long too. It's possible to split these, but it makes them
# harder to cut&paste.
#
# The "$Id:...$" comment may also get very long without it being the
# developers fault.
if (not line.startswith('#include') and not is_header_guard and
not Match(r'^\s*//.*http(s?)://\S*$', line) and
not Match(r'^// \$Id:.*#[0-9]+ \$$', line)):
line_width = GetLineWidth(line)
extended_length = int((_line_length * 1.25))
if line_width > extended_length:
error(filename, linenum, 'whitespace/line_length', 4,
'Lines should very rarely be longer than %i characters' %
extended_length)
elif line_width > _line_length:
error(filename, linenum, 'whitespace/line_length', 2,
'Lines should be <= %i characters long' % _line_length)
if (cleansed_line.count(';') > 1 and
# for loops are allowed two ;'s (and may run over two lines).
cleansed_line.find('for') == -1 and
(GetPreviousNonBlankLine(clean_lines, linenum)[0].find('for') == -1 or
GetPreviousNonBlankLine(clean_lines, linenum)[0].find(';') != -1) and
# It's ok to have many commands in a switch case that fits in 1 line
not ((cleansed_line.find('case ') != -1 or
cleansed_line.find('default:') != -1) and
cleansed_line.find('break;') != -1)):
error(filename, linenum, 'whitespace/newline', 0,
'More than one command on the same line')
# Some more style checks
CheckBraces(filename, clean_lines, linenum, error)
CheckTrailingSemicolon(filename, clean_lines, linenum, error)
CheckEmptyBlockBody(filename, clean_lines, linenum, error)
CheckAccess(filename, clean_lines, linenum, nesting_state, error)
CheckSpacing(filename, clean_lines, linenum, nesting_state, error)
CheckOperatorSpacing(filename, clean_lines, linenum, error)
CheckParenthesisSpacing(filename, clean_lines, linenum, error)
CheckCommaSpacing(filename, clean_lines, linenum, error)
CheckBracesSpacing(filename, clean_lines, linenum, error)
CheckSpacingForFunctionCall(filename, clean_lines, linenum, error)
CheckRValueReference(filename, clean_lines, linenum, nesting_state, error)
CheckCheck(filename, clean_lines, linenum, error)
CheckAltTokens(filename, clean_lines, linenum, error)
classinfo = nesting_state.InnermostClass()
if classinfo:
CheckSectionSpacing(filename, clean_lines, classinfo, linenum, error)
_RE_PATTERN_INCLUDE = re.compile(r'^\s*#\s*include\s*([<"])([^>"]*)[>"].*$')
# Matches the first component of a filename delimited by -s and _s. That is:
# _RE_FIRST_COMPONENT.match('foo').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo.cc').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo-bar_baz.cc').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo_bar-baz.cc').group(0) == 'foo'
_RE_FIRST_COMPONENT = re.compile(r'^[^-_.]+')
def _DropCommonSuffixes(filename):
"""Drops common suffixes like _test.cc or -inl.h from filename.
For example:
>>> _DropCommonSuffixes('foo/foo-inl.h')
'foo/foo'
>>> _DropCommonSuffixes('foo/bar/foo.cc')
'foo/bar/foo'
>>> _DropCommonSuffixes('foo/foo_internal.h')
'foo/foo'
>>> _DropCommonSuffixes('foo/foo_unusualinternal.h')
'foo/foo_unusualinternal'
Args:
filename: The input filename.
Returns:
The filename with the common suffix removed.
"""
for suffix in ('test.cc', 'regtest.cc', 'unittest.cc',
'inl.h', 'impl.h', 'internal.h'):
if (filename.endswith(suffix) and len(filename) > len(suffix) and
filename[-len(suffix) - 1] in ('-', '_')):
return filename[:-len(suffix) - 1]
return os.path.splitext(filename)[0]
def _IsTestFilename(filename):
"""Determines if the given filename has a suffix that identifies it as a test.
Args:
filename: The input filename.
Returns:
True if 'filename' looks like a test, False otherwise.
"""
if (filename.endswith('_test.cc') or
filename.endswith('_unittest.cc') or
filename.endswith('_regtest.cc')):
return True
else:
return False
def _ClassifyInclude(fileinfo, include, is_system):
"""Figures out what kind of header 'include' is.
Args:
fileinfo: The current file cpplint is running over. A FileInfo instance.
include: The path to a #included file.
is_system: True if the #include used <> rather than "".
Returns:
One of the _XXX_HEADER constants.
For example:
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'stdio.h', True)
_C_SYS_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'string', True)
_CPP_SYS_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/foo.h', False)
_LIKELY_MY_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo_unknown_extension.cc'),
... 'bar/foo_other_ext.h', False)
_POSSIBLE_MY_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/bar.h', False)
_OTHER_HEADER
"""
# This is a list of all standard c++ header files, except
# those already checked for above.
is_cpp_h = include in _CPP_HEADERS
if is_system:
if is_cpp_h:
return _CPP_SYS_HEADER
else:
return _C_SYS_HEADER
# If the target file and the include we're checking share a
# basename when we drop common extensions, and the include
# lives in . , then it's likely to be owned by the target file.
target_dir, target_base = (
os.path.split(_DropCommonSuffixes(fileinfo.RepositoryName())))
include_dir, include_base = os.path.split(_DropCommonSuffixes(include))
if target_base == include_base and (
include_dir == target_dir or
include_dir == os.path.normpath(target_dir + '/../public')):
return _LIKELY_MY_HEADER
# If the target and include share some initial basename
# component, it's possible the target is implementing the
# include, so it's allowed to be first, but we'll never
# complain if it's not there.
target_first_component = _RE_FIRST_COMPONENT.match(target_base)
include_first_component = _RE_FIRST_COMPONENT.match(include_base)
if (target_first_component and include_first_component and
target_first_component.group(0) ==
include_first_component.group(0)):
return _POSSIBLE_MY_HEADER
return _OTHER_HEADER
def CheckIncludeLine(filename, clean_lines, linenum, include_state, error):
"""Check rules that are applicable to #include lines.
Strings on #include lines are NOT removed from elided line, to make
certain tasks easier. However, to prevent false positives, checks
applicable to #include lines in CheckLanguage must be put here.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
include_state: An _IncludeState instance in which the headers are inserted.
error: The function to call with any errors found.
"""
fileinfo = FileInfo(filename)
line = clean_lines.lines[linenum]
# "include" should use the new style "foo/bar.h" instead of just "bar.h"
# Only do this check if the included header follows google naming
# conventions. If not, assume that it's a 3rd party API that
# requires special include conventions.
#
# We also make an exception for Lua headers, which follow google
# naming convention but not the include convention.
# removed by weizhenwei, 2015.06.27;
# match = Match(r'#include\s*"([^/]+\.h)"', line)
# if match and not _THIRD_PARTY_HEADERS_PATTERN.match(match.group(1)):
# error(filename, linenum, 'build/include', 4,
# 'Include the directory when naming .h files')
# we shouldn't include a file more than once. actually, there are a
# handful of instances where doing so is okay, but in general it's
# not.
match = _RE_PATTERN_INCLUDE.search(line)
if match:
include = match.group(2)
is_system = (match.group(1) == '<')
duplicate_line = include_state.FindHeader(include)
if duplicate_line >= 0:
error(filename, linenum, 'build/include', 4,
'"%s" already included at %s:%s' %
(include, filename, duplicate_line))
elif not _THIRD_PARTY_HEADERS_PATTERN.match(include):
include_state.include_list[-1].append((include, linenum))
# We want to ensure that headers appear in the right order:
# 1) for foo.cc, foo.h (preferred location)
# 2) c system files
# 3) cpp system files
# 4) for foo.cc, foo.h (deprecated location)
# 5) other google headers
#
# We classify each include statement as one of those 5 types
# using a number of techniques. The include_state object keeps
# track of the highest type seen, and complains if we see a
# lower type after that.
error_message = include_state.CheckNextIncludeOrder(
_ClassifyInclude(fileinfo, include, is_system))
if error_message:
error(filename, linenum, 'build/include_order', 4,
'%s. Should be: %s.h, c system, c++ system, other.' %
(error_message, fileinfo.BaseName()))
canonical_include = include_state.CanonicalizeAlphabeticalOrder(include)
if not include_state.IsInAlphabeticalOrder(
clean_lines, linenum, canonical_include):
error(filename, linenum, 'build/include_alpha', 4,
'Include "%s" not in alphabetical order' % include)
include_state.SetLastHeader(canonical_include)
# Look for any of the stream classes that are part of standard C++.
match = _RE_PATTERN_INCLUDE.match(line)
if match:
include = match.group(2)
if Match(r'(f|ind|io|i|o|parse|pf|stdio|str|)?stream$', include):
# Many unit tests use cout, so we exempt them.
if not _IsTestFilename(filename):
# Suggest a different header for ostream
if include == 'ostream':
error(filename, linenum, 'readability/streams', 3,
'For logging, include "base/logging.h" instead of <ostream>.')
else:
error(filename, linenum, 'readability/streams', 3,
'Streams are highly discouraged.')
def _GetTextInside(text, start_pattern):
r"""Retrieves all the text between matching open and close parentheses.
Given a string of lines and a regular expression string, retrieve all the text
following the expression and between opening punctuation symbols like
(, [, or {, and the matching close-punctuation symbol. This properly nested
occurrences of the punctuations, so for the text like
printf(a(), b(c()));
a call to _GetTextInside(text, r'printf\(') will return 'a(), b(c())'.
start_pattern must match string having an open punctuation symbol at the end.
Args:
text: The lines to extract text. Its comments and strings must be elided.
It can be single line and can span multiple lines.
start_pattern: The regexp string indicating where to start extracting
the text.
Returns:
The extracted text.
None if either the opening string or ending punctuation could not be found.
"""
# TODO(unknown): Audit cpplint.py to see what places could be profitably
# rewritten to use _GetTextInside (and use inferior regexp matching today).
# Give opening punctuations to get the matching close-punctuations.
matching_punctuation = {'(': ')', '{': '}', '[': ']'}
closing_punctuation = set(matching_punctuation.itervalues())
# Find the position to start extracting text.
match = re.search(start_pattern, text, re.M)
if not match: # start_pattern not found in text.
return None
start_position = match.end(0)
assert start_position > 0, (
'start_pattern must ends with an opening punctuation.')
assert text[start_position - 1] in matching_punctuation, (
'start_pattern must ends with an opening punctuation.')
# Stack of closing punctuations we expect to have in text after position.
punctuation_stack = [matching_punctuation[text[start_position - 1]]]
position = start_position
while punctuation_stack and position < len(text):
if text[position] == punctuation_stack[-1]:
punctuation_stack.pop()
elif text[position] in closing_punctuation:
# A closing punctuation without matching opening punctuations.
return None
elif text[position] in matching_punctuation:
punctuation_stack.append(matching_punctuation[text[position]])
position += 1
if punctuation_stack:
# Opening punctuations left without matching close-punctuations.
return None
# punctuations match.
return text[start_position:position - 1]
# Patterns for matching call-by-reference parameters.
#
# Supports nested templates up to 2 levels deep using this messy pattern:
# < (?: < (?: < [^<>]*
# >
# | [^<>] )*
# >
# | [^<>] )*
# >
_RE_PATTERN_IDENT = r'[_a-zA-Z]\w*' # =~ [[:alpha:]][[:alnum:]]*
_RE_PATTERN_TYPE = (
r'(?:const\s+)?(?:typename\s+|class\s+|struct\s+|union\s+|enum\s+)?'
r'(?:\w|'
r'\s*<(?:<(?:<[^<>]*>|[^<>])*>|[^<>])*>|'
r'::)+')
# A call-by-reference parameter ends with '& identifier'.
_RE_PATTERN_REF_PARAM = re.compile(
r'(' + _RE_PATTERN_TYPE + r'(?:\s*(?:\bconst\b|[*]))*\s*'
r'&\s*' + _RE_PATTERN_IDENT + r')\s*(?:=[^,()]+)?[,)]')
# A call-by-const-reference parameter either ends with 'const& identifier'
# or looks like 'const type& identifier' when 'type' is atomic.
_RE_PATTERN_CONST_REF_PARAM = (
r'(?:.*\s*\bconst\s*&\s*' + _RE_PATTERN_IDENT +
r'|const\s+' + _RE_PATTERN_TYPE + r'\s*&\s*' + _RE_PATTERN_IDENT + r')')
def CheckLanguage(filename, clean_lines, linenum, file_extension,
include_state, nesting_state, error):
"""Checks rules from the 'C++ language rules' section of cppguide.html.
Some of these rules are hard to test (function overloading, using
uint32 inappropriately), but we do the best we can.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
file_extension: The extension (without the dot) of the filename.
include_state: An _IncludeState instance in which the headers are inserted.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# If the line is empty or consists of entirely a comment, no need to
# check it.
line = clean_lines.elided[linenum]
if not line:
return
match = _RE_PATTERN_INCLUDE.search(line)
if match:
CheckIncludeLine(filename, clean_lines, linenum, include_state, error)
return
# Reset include state across preprocessor directives. This is meant
# to silence warnings for conditional includes.
match = Match(r'^\s*#\s*(if|ifdef|ifndef|elif|else|endif)\b', line)
if match:
include_state.ResetSection(match.group(1))
# Make Windows paths like Unix.
fullname = os.path.abspath(filename).replace('\\', '/')
# Perform other checks now that we are sure that this is not an include line
CheckCasts(filename, clean_lines, linenum, error)
CheckGlobalStatic(filename, clean_lines, linenum, error)
CheckPrintf(filename, clean_lines, linenum, error)
if file_extension == 'h':
# TODO(unknown): check that 1-arg constructors are explicit.
# How to tell it's a constructor?
# (handled in CheckForNonStandardConstructs for now)
# TODO(unknown): check that classes declare or disable copy/assign
# (level 1 error)
pass
# Check if people are using the verboten C basic types. The only exception
# we regularly allow is "unsigned short port" for port.
if Search(r'\bshort port\b', line):
if not Search(r'\bunsigned short port\b', line):
error(filename, linenum, 'runtime/int', 4,
'Use "unsigned short" for ports, not "short"')
else:
match = Search(r'\b(short|long(?! +double)|long long)\b', line)
if match:
error(filename, linenum, 'runtime/int', 4,
'Use int16/int64/etc, rather than the C type %s' % match.group(1))
# Check if some verboten operator overloading is going on
# TODO(unknown): catch out-of-line unary operator&:
# class X {};
# int operator&(const X& x) { return 42; } // unary operator&
# The trick is it's hard to tell apart from binary operator&:
# class Y { int operator&(const Y& x) { return 23; } }; // binary operator&
if Search(r'\boperator\s*&\s*\(\s*\)', line):
error(filename, linenum, 'runtime/operator', 4,
'Unary operator& is dangerous. Do not use it.')
# Check for suspicious usage of "if" like
# } if (a == b) {
if Search(r'\}\s*if\s*\(', line):
error(filename, linenum, 'readability/braces', 4,
'Did you mean "else if"? If not, start a new line for "if".')
# Check for potential format string bugs like printf(foo).
# We constrain the pattern not to pick things like DocidForPrintf(foo).
# Not perfect but it can catch printf(foo.c_str()) and printf(foo->c_str())
# TODO(unknown): Catch the following case. Need to change the calling
# convention of the whole function to process multiple line to handle it.
# printf(
# boy_this_is_a_really_long_variable_that_cannot_fit_on_the_prev_line);
printf_args = _GetTextInside(line, r'(?i)\b(string)?printf\s*\(')
if printf_args:
match = Match(r'([\w.\->()]+)$', printf_args)
if match and match.group(1) != '__VA_ARGS__':
function_name = re.search(r'\b((?:string)?printf)\s*\(',
line, re.I).group(1)
error(filename, linenum, 'runtime/printf', 4,
'Potential format string bug. Do %s("%%s", %s) instead.'
% (function_name, match.group(1)))
# Check for potential memset bugs like memset(buf, sizeof(buf), 0).
match = Search(r'memset\s*\(([^,]*),\s*([^,]*),\s*0\s*\)', line)
if match and not Match(r"^''|-?[0-9]+|0x[0-9A-Fa-f]$", match.group(2)):
error(filename, linenum, 'runtime/memset', 4,
'Did you mean "memset(%s, 0, %s)"?'
% (match.group(1), match.group(2)))
# removed by weizhenwei, 2015.06.24;
# if Search(r'\busing namespace\b', line):
# error(filename, linenum, 'build/namespaces', 5,
# 'Do not use namespace using-directives. '
# 'Use using-declarations instead.')
# Detect variable-length arrays.
match = Match(r'\s*(.+::)?(\w+) [a-z]\w*\[(.+)];', line)
if (match and match.group(2) != 'return' and match.group(2) != 'delete' and
match.group(3).find(']') == -1):
# Split the size using space and arithmetic operators as delimiters.
# If any of the resulting tokens are not compile time constants then
# report the error.
tokens = re.split(r'\s|\+|\-|\*|\/|<<|>>]', match.group(3))
is_const = True
skip_next = False
for tok in tokens:
if skip_next:
skip_next = False
continue
if Search(r'sizeof\(.+\)', tok): continue
if Search(r'arraysize\(\w+\)', tok): continue
tok = tok.lstrip('(')
tok = tok.rstrip(')')
if not tok: continue
if Match(r'\d+', tok): continue
if Match(r'0[xX][0-9a-fA-F]+', tok): continue
if Match(r'k[A-Z0-9]\w*', tok): continue
if Match(r'(.+::)?k[A-Z0-9]\w*', tok): continue
if Match(r'(.+::)?[A-Z][A-Z0-9_]*', tok): continue
# A catch all for tricky sizeof cases, including 'sizeof expression',
# 'sizeof(*type)', 'sizeof(const type)', 'sizeof(struct StructName)'
# requires skipping the next token because we split on ' ' and '*'.
if tok.startswith('sizeof'):
skip_next = True
continue
is_const = False
break
if not is_const:
error(filename, linenum, 'runtime/arrays', 1,
'Do not use variable-length arrays. Use an appropriately named '
"('k' followed by CamelCase) compile-time constant for the size.")
# If DISALLOW_COPY_AND_ASSIGN DISALLOW_IMPLICIT_CONSTRUCTORS is present,
# then it should be the last thing in the class declaration.
match = Match(
(r'\s*'
r'(DISALLOW_(COPY_AND_ASSIGN|IMPLICIT_CONSTRUCTORS))'
r'\(.*\);$'),
line)
if match and linenum + 1 < clean_lines.NumLines():
next_line = clean_lines.elided[linenum + 1]
# We allow some, but not all, declarations of variables to be present
# in the statement that defines the class. The [\w\*,\s]* fragment of
# the regular expression below allows users to declare instances of
# the class or pointers to instances, but not less common types such
# as function pointers or arrays. It's a tradeoff between allowing
# reasonable code and avoiding trying to parse more C++ using regexps.
if not Search(r'^\s*}[\w\*,\s]*;', next_line):
error(filename, linenum, 'readability/constructors', 3,
match.group(1) + ' should be the last thing in the class')
# Check for use of unnamed namespaces in header files. Registration
# macros are typically OK, so we allow use of "namespace {" on lines
# that end with backslashes.
if (file_extension == 'h'
and Search(r'\bnamespace\s*{', line)
and line[-1] != '\\'):
error(filename, linenum, 'build/namespaces', 4,
'Do not use unnamed namespaces in header files. See '
'http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces'
' for more information.')
def CheckGlobalStatic(filename, clean_lines, linenum, error):
"""Check for unsafe global or static objects.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Match two lines at a time to support multiline declarations
if linenum + 1 < clean_lines.NumLines() and not Search(r'[;({]', line):
line += clean_lines.elided[linenum + 1].strip()
# Check for people declaring static/global STL strings at the top level.
# This is dangerous because the C++ language does not guarantee that
# globals with constructors are initialized before the first access.
match = Match(
r'((?:|static +)(?:|const +))string +([a-zA-Z0-9_:]+)\b(.*)',
line)
# Remove false positives:
# - String pointers (as opposed to values).
# string *pointer
# const string *pointer
# string const *pointer
# string *const pointer
#
# - Functions and template specializations.
# string Function<Type>(...
# string Class<Type>::Method(...
#
# - Operators. These are matched separately because operator names
# cross non-word boundaries, and trying to match both operators
# and functions at the same time would decrease accuracy of
# matching identifiers.
# string Class::operator*()
if (match and
not Search(r'\bstring\b(\s+const)?\s*\*\s*(const\s+)?\w', line) and
not Search(r'\boperator\W', line) and
not Match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)*\s*\(([^"]|$)', match.group(3))):
error(filename, linenum, 'runtime/string', 4,
'For a static/global string constant, use a C style string instead: '
'"%schar %s[]".' %
(match.group(1), match.group(2)))
if Search(r'\b([A-Za-z0-9_]*_)\(\1\)', line):
error(filename, linenum, 'runtime/init', 4,
'You seem to be initializing a member variable with itself.')
def CheckPrintf(filename, clean_lines, linenum, error):
"""Check for printf related issues.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# When snprintf is used, the second argument shouldn't be a literal.
match = Search(r'snprintf\s*\(([^,]*),\s*([0-9]*)\s*,', line)
if match and match.group(2) != '0':
# If 2nd arg is zero, snprintf is used to calculate size.
error(filename, linenum, 'runtime/printf', 3,
'If you can, use sizeof(%s) instead of %s as the 2nd arg '
'to snprintf.' % (match.group(1), match.group(2)))
# Check if some verboten C functions are being used.
if Search(r'\bsprintf\s*\(', line):
error(filename, linenum, 'runtime/printf', 5,
'Never use sprintf. Use snprintf instead.')
match = Search(r'\b(strcpy|strcat)\s*\(', line)
if match:
error(filename, linenum, 'runtime/printf', 4,
'Almost always, snprintf is better than %s' % match.group(1))
def IsDerivedFunction(clean_lines, linenum):
"""Check if current line contains an inherited function.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if current line contains a function with "override"
virt-specifier.
"""
# Scan back a few lines for start of current function
for i in xrange(linenum, max(-1, linenum - 10), -1):
match = Match(r'^([^()]*\w+)\(', clean_lines.elided[i])
if match:
# Look for "override" after the matching closing parenthesis
line, _, closing_paren = CloseExpression(
clean_lines, i, len(match.group(1)))
return (closing_paren >= 0 and
Search(r'\boverride\b', line[closing_paren:]))
return False
def IsInitializerList(clean_lines, linenum):
"""Check if current line is inside constructor initializer list.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if current line appears to be inside constructor initializer
list, False otherwise.
"""
for i in xrange(linenum, 1, -1):
line = clean_lines.elided[i]
if i == linenum:
remove_function_body = Match(r'^(.*)\{\s*$', line)
if remove_function_body:
line = remove_function_body.group(1)
if Search(r'\s:\s*\w+[({]', line):
# A lone colon tend to indicate the start of a constructor
# initializer list. It could also be a ternary operator, which
# also tend to appear in constructor initializer lists as
# opposed to parameter lists.
return True
if Search(r'\}\s*,\s*$', line):
# A closing brace followed by a comma is probably the end of a
# brace-initialized member in constructor initializer list.
return True
if Search(r'[{};]\s*$', line):
# Found one of the following:
# - A closing brace or semicolon, probably the end of the previous
# function.
# - An opening brace, probably the start of current class or namespace.
#
# Current line is probably not inside an initializer list since
# we saw one of those things without seeing the starting colon.
return False
# Got to the beginning of the file without seeing the start of
# constructor initializer list.
return False
def CheckForNonConstReference(filename, clean_lines, linenum,
nesting_state, error):
"""Check for non-const references.
Separate from CheckLanguage since it scans backwards from current
line, instead of scanning forward.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Do nothing if there is no '&' on current line.
line = clean_lines.elided[linenum]
if '&' not in line:
return
# If a function is inherited, current function doesn't have much of
# a choice, so any non-const references should not be blamed on
# derived function.
if IsDerivedFunction(clean_lines, linenum):
return
# Long type names may be broken across multiple lines, usually in one
# of these forms:
# LongType
# ::LongTypeContinued &identifier
# LongType::
# LongTypeContinued &identifier
# LongType<
# ...>::LongTypeContinued &identifier
#
# If we detected a type split across two lines, join the previous
# line to current line so that we can match const references
# accordingly.
#
# Note that this only scans back one line, since scanning back
# arbitrary number of lines would be expensive. If you have a type
# that spans more than 2 lines, please use a typedef.
if linenum > 1:
previous = None
if Match(r'\s*::(?:[\w<>]|::)+\s*&\s*\S', line):
# previous_line\n + ::current_line
previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+[\w<>])\s*$',
clean_lines.elided[linenum - 1])
elif Match(r'\s*[a-zA-Z_]([\w<>]|::)+\s*&\s*\S', line):
# previous_line::\n + current_line
previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+::)\s*$',
clean_lines.elided[linenum - 1])
if previous:
line = previous.group(1) + line.lstrip()
else:
# Check for templated parameter that is split across multiple lines
endpos = line.rfind('>')
if endpos > -1:
(_, startline, startpos) = ReverseCloseExpression(
clean_lines, linenum, endpos)
if startpos > -1 and startline < linenum:
# Found the matching < on an earlier line, collect all
# pieces up to current line.
line = ''
for i in xrange(startline, linenum + 1):
line += clean_lines.elided[i].strip()
# Check for non-const references in function parameters. A single '&' may
# found in the following places:
# inside expression: binary & for bitwise AND
# inside expression: unary & for taking the address of something
# inside declarators: reference parameter
# We will exclude the first two cases by checking that we are not inside a
# function body, including one that was just introduced by a trailing '{'.
# TODO(unknown): Doesn't account for 'catch(Exception& e)' [rare].
if (nesting_state.previous_stack_top and
not (isinstance(nesting_state.previous_stack_top, _ClassInfo) or
isinstance(nesting_state.previous_stack_top, _NamespaceInfo))):
# Not at toplevel, not within a class, and not within a namespace
return
# Avoid initializer lists. We only need to scan back from the
# current line for something that starts with ':'.
#
# We don't need to check the current line, since the '&' would
# appear inside the second set of parentheses on the current line as
# opposed to the first set.
if linenum > 0:
for i in xrange(linenum - 1, max(0, linenum - 10), -1):
previous_line = clean_lines.elided[i]
if not Search(r'[),]\s*$', previous_line):
break
if Match(r'^\s*:\s+\S', previous_line):
return
# Avoid preprocessors
if Search(r'\\\s*$', line):
return
# Avoid constructor initializer lists
if IsInitializerList(clean_lines, linenum):
return
# We allow non-const references in a few standard places, like functions
# called "swap()" or iostream operators like "<<" or ">>". Do not check
# those function parameters.
#
# We also accept & in static_assert, which looks like a function but
# it's actually a declaration expression.
whitelisted_functions = (r'(?:[sS]wap(?:<\w:+>)?|'
r'operator\s*[<>][<>]|'
r'static_assert|COMPILE_ASSERT'
r')\s*\(')
if Search(whitelisted_functions, line):
return
elif not Search(r'\S+\([^)]*$', line):
# Don't see a whitelisted function on this line. Actually we
# didn't see any function name on this line, so this is likely a
# multi-line parameter list. Try a bit harder to catch this case.
for i in xrange(2):
if (linenum > i and
Search(whitelisted_functions, clean_lines.elided[linenum - i - 1])):
return
decls = ReplaceAll(r'{[^}]*}', ' ', line) # exclude function body
for parameter in re.findall(_RE_PATTERN_REF_PARAM, decls):
if not Match(_RE_PATTERN_CONST_REF_PARAM, parameter):
error(filename, linenum, 'runtime/references', 2,
'Is this a non-const reference? '
'If so, make const or use a pointer: ' +
ReplaceAll(' *<', '<', parameter))
def CheckCasts(filename, clean_lines, linenum, error):
"""Various cast related checks.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Check to see if they're using an conversion function cast.
# I just try to capture the most common basic types, though there are more.
# Parameterless conversion functions, such as bool(), are allowed as they are
# probably a member operator declaration or default constructor.
match = Search(
r'(\bnew\s+|\S<\s*(?:const\s+)?)?\b'
r'(int|float|double|bool|char|int32|uint32|int64|uint64)'
r'(\([^)].*)', line)
expecting_function = ExpectingFunctionArgs(clean_lines, linenum)
if match and not expecting_function:
matched_type = match.group(2)
# matched_new_or_template is used to silence two false positives:
# - New operators
# - Template arguments with function types
#
# For template arguments, we match on types immediately following
# an opening bracket without any spaces. This is a fast way to
# silence the common case where the function type is the first
# template argument. False negative with less-than comparison is
# avoided because those operators are usually followed by a space.
#
# function<double(double)> // bracket + no space = false positive
# value < double(42) // bracket + space = true positive
matched_new_or_template = match.group(1)
# Avoid arrays by looking for brackets that come after the closing
# parenthesis.
if Match(r'\([^()]+\)\s*\[', match.group(3)):
return
# Other things to ignore:
# - Function pointers
# - Casts to pointer types
# - Placement new
# - Alias declarations
matched_funcptr = match.group(3)
if (matched_new_or_template is None and
not (matched_funcptr and
(Match(r'\((?:[^() ]+::\s*\*\s*)?[^() ]+\)\s*\(',
matched_funcptr) or
matched_funcptr.startswith('(*)'))) and
not Match(r'\s*using\s+\S+\s*=\s*' + matched_type, line) and
not Search(r'new\(\S+\)\s*' + matched_type, line)):
error(filename, linenum, 'readability/casting', 4,
'Using deprecated casting style. '
'Use static_cast<%s>(...) instead' %
matched_type)
if not expecting_function:
CheckCStyleCast(filename, clean_lines, linenum, 'static_cast',
r'\((int|float|double|bool|char|u?int(16|32|64))\)', error)
# This doesn't catch all cases. Consider (const char * const)"hello".
#
# (char *) "foo" should always be a const_cast (reinterpret_cast won't
# compile).
if CheckCStyleCast(filename, clean_lines, linenum, 'const_cast',
r'\((char\s?\*+\s?)\)\s*"', error):
pass
else:
# Check pointer casts for other than string constants
CheckCStyleCast(filename, clean_lines, linenum, 'reinterpret_cast',
r'\((\w+\s?\*+\s?)\)', error)
# In addition, we look for people taking the address of a cast. This
# is dangerous -- casts can assign to temporaries, so the pointer doesn't
# point where you think.
#
# Some non-identifier character is required before the '&' for the
# expression to be recognized as a cast. These are casts:
# expression = &static_cast<int*>(temporary());
# function(&(int*)(temporary()));
#
# This is not a cast:
# reference_type&(int* function_param);
match = Search(
r'(?:[^\w]&\(([^)]+)\)[\w(])|'
r'(?:[^\w]&(static|dynamic|down|reinterpret)_cast\b)', line)
if match and match.group(1) != '*':
# Try a better error message when the & is bound to something
# dereferenced by the casted pointer, as opposed to the casted
# pointer itself.
parenthesis_error = False
match = Match(r'^(.*&(?:static|dynamic|down|reinterpret)_cast\b)<', line)
if match:
_, y1, x1 = CloseExpression(clean_lines, linenum, len(match.group(1)))
if x1 >= 0 and clean_lines.elided[y1][x1] == '(':
_, y2, x2 = CloseExpression(clean_lines, y1, x1)
if x2 >= 0:
extended_line = clean_lines.elided[y2][x2:]
if y2 < clean_lines.NumLines() - 1:
extended_line += clean_lines.elided[y2 + 1]
if Match(r'\s*(?:->|\[)', extended_line):
parenthesis_error = True
if parenthesis_error:
error(filename, linenum, 'readability/casting', 4,
('Are you taking an address of something dereferenced '
'from a cast? Wrapping the dereferenced expression in '
'parentheses will make the binding more obvious'))
else:
error(filename, linenum, 'runtime/casting', 4,
('Are you taking an address of a cast? '
'This is dangerous: could be a temp var. '
'Take the address before doing the cast, rather than after'))
def CheckCStyleCast(filename, clean_lines, linenum, cast_type, pattern, error):
"""Checks for a C-style cast by looking for the pattern.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
cast_type: The string for the C++ cast to recommend. This is either
reinterpret_cast, static_cast, or const_cast, depending.
pattern: The regular expression used to find C-style casts.
error: The function to call with any errors found.
Returns:
True if an error was emitted.
False otherwise.
"""
line = clean_lines.elided[linenum]
match = Search(pattern, line)
if not match:
return False
# Exclude lines with keywords that tend to look like casts
context = line[0:match.start(1) - 1]
if Match(r'.*\b(?:sizeof|alignof|alignas|[_A-Z][_A-Z0-9]*)\s*$', context):
return False
# Try expanding current context to see if we one level of
# parentheses inside a macro.
if linenum > 0:
for i in xrange(linenum - 1, max(0, linenum - 5), -1):
context = clean_lines.elided[i] + context
if Match(r'.*\b[_A-Z][_A-Z0-9]*\s*\((?:\([^()]*\)|[^()])*$', context):
return False
# operator++(int) and operator--(int)
if context.endswith(' operator++') or context.endswith(' operator--'):
return False
# A single unnamed argument for a function tends to look like old
# style cast. If we see those, don't issue warnings for deprecated
# casts, instead issue warnings for unnamed arguments where
# appropriate.
#
# These are things that we want warnings for, since the style guide
# explicitly require all parameters to be named:
# Function(int);
# Function(int) {
# ConstMember(int) const;
# ConstMember(int) const {
# ExceptionMember(int) throw (...);
# ExceptionMember(int) throw (...) {
# PureVirtual(int) = 0;
#
# These are functions of some sort, where the compiler would be fine
# if they had named parameters, but people often omit those
# identifiers to reduce clutter:
# (FunctionPointer)(int);
# (FunctionPointer)(int) = value;
# Function((function_pointer_arg)(int))
# Function((function_pointer_arg)(int), int param)
# <TemplateArgument(int)>;
# <(FunctionPointerTemplateArgument)(int)>;
remainder = line[match.end(0):]
if Match(r'^\s*(?:;|const\b|throw\b|final\b|override\b|[=>{),])',
remainder):
# Looks like an unnamed parameter.
# Don't warn on any kind of template arguments.
if Match(r'^\s*>', remainder):
return False
# Don't warn on assignments to function pointers, but keep warnings for
# unnamed parameters to pure virtual functions. Note that this pattern
# will also pass on assignments of "0" to function pointers, but the
# preferred values for those would be "nullptr" or "NULL".
matched_zero = Match(r'^\s=\s*(\S+)\s*;', remainder)
if matched_zero and matched_zero.group(1) != '0':
return False
# Don't warn on function pointer declarations. For this we need
# to check what came before the "(type)" string.
if Match(r'.*\)\s*$', line[0:match.start(0)]):
return False
# Don't warn if the parameter is named with block comments, e.g.:
# Function(int /*unused_param*/);
raw_line = clean_lines.raw_lines[linenum]
if '/*' in raw_line:
return False
# Passed all filters, issue warning here.
error(filename, linenum, 'readability/function', 3,
'All parameters should be named in a function')
return True
# At this point, all that should be left is actual casts.
error(filename, linenum, 'readability/casting', 4,
'Using C-style cast. Use %s<%s>(...) instead' %
(cast_type, match.group(1)))
return True
def ExpectingFunctionArgs(clean_lines, linenum):
"""Checks whether where function type arguments are expected.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if the line at 'linenum' is inside something that expects arguments
of function types.
"""
line = clean_lines.elided[linenum]
return (Match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(', line) or
(linenum >= 2 and
(Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\((?:\S+,)?\s*$',
clean_lines.elided[linenum - 1]) or
Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\(\s*$',
clean_lines.elided[linenum - 2]) or
Search(r'\bstd::m?function\s*\<\s*$',
clean_lines.elided[linenum - 1]))))
_HEADERS_CONTAINING_TEMPLATES = (
('<deque>', ('deque',)),
('<functional>', ('unary_function', 'binary_function',
'plus', 'minus', 'multiplies', 'divides', 'modulus',
'negate',
'equal_to', 'not_equal_to', 'greater', 'less',
'greater_equal', 'less_equal',
'logical_and', 'logical_or', 'logical_not',
'unary_negate', 'not1', 'binary_negate', 'not2',
'bind1st', 'bind2nd',
'pointer_to_unary_function',
'pointer_to_binary_function',
'ptr_fun',
'mem_fun_t', 'mem_fun', 'mem_fun1_t', 'mem_fun1_ref_t',
'mem_fun_ref_t',
'const_mem_fun_t', 'const_mem_fun1_t',
'const_mem_fun_ref_t', 'const_mem_fun1_ref_t',
'mem_fun_ref',
)),
('<limits>', ('numeric_limits',)),
('<list>', ('list',)),
('<map>', ('map', 'multimap',)),
('<memory>', ('allocator',)),
('<queue>', ('queue', 'priority_queue',)),
('<set>', ('set', 'multiset',)),
('<stack>', ('stack',)),
('<string>', ('char_traits', 'basic_string',)),
('<utility>', ('pair',)),
('<vector>', ('vector',)),
# gcc extensions.
# Note: std::hash is their hash, ::hash is our hash
('<hash_map>', ('hash_map', 'hash_multimap',)),
('<hash_set>', ('hash_set', 'hash_multiset',)),
('<slist>', ('slist',)),
)
_RE_PATTERN_STRING = re.compile(r'\bstring\b')
_re_pattern_algorithm_header = []
for _template in ('copy', 'max', 'min', 'min_element', 'sort', 'swap',
'transform'):
# Match max<type>(..., ...), max(..., ...), but not foo->max, foo.max or
# type::max().
_re_pattern_algorithm_header.append(
(re.compile(r'[^>.]\b' + _template + r'(<.*?>)?\([^\)]'),
_template,
'<algorithm>'))
_re_pattern_templates = []
for _header, _templates in _HEADERS_CONTAINING_TEMPLATES:
for _template in _templates:
_re_pattern_templates.append(
(re.compile(r'(\<|\b)' + _template + r'\s*\<'),
_template + '<>',
_header))
def FilesBelongToSameModule(filename_cc, filename_h):
"""Check if these two filenames belong to the same module.
The concept of a 'module' here is a as follows:
foo.h, foo-inl.h, foo.cc, foo_test.cc and foo_unittest.cc belong to the
same 'module' if they are in the same directory.
some/path/public/xyzzy and some/path/internal/xyzzy are also considered
to belong to the same module here.
If the filename_cc contains a longer path than the filename_h, for example,
'/absolute/path/to/base/sysinfo.cc', and this file would include
'base/sysinfo.h', this function also produces the prefix needed to open the
header. This is used by the caller of this function to more robustly open the
header file. We don't have access to the real include paths in this context,
so we need this guesswork here.
Known bugs: tools/base/bar.cc and base/bar.h belong to the same module
according to this implementation. Because of this, this function gives
some false positives. This should be sufficiently rare in practice.
Args:
filename_cc: is the path for the .cc file
filename_h: is the path for the header path
Returns:
Tuple with a bool and a string:
bool: True if filename_cc and filename_h belong to the same module.
string: the additional prefix needed to open the header file.
"""
if not filename_cc.endswith('.cc'):
return (False, '')
filename_cc = filename_cc[:-len('.cc')]
if filename_cc.endswith('_unittest'):
filename_cc = filename_cc[:-len('_unittest')]
elif filename_cc.endswith('_test'):
filename_cc = filename_cc[:-len('_test')]
filename_cc = filename_cc.replace('/public/', '/')
filename_cc = filename_cc.replace('/internal/', '/')
if not filename_h.endswith('.h'):
return (False, '')
filename_h = filename_h[:-len('.h')]
if filename_h.endswith('-inl'):
filename_h = filename_h[:-len('-inl')]
filename_h = filename_h.replace('/public/', '/')
filename_h = filename_h.replace('/internal/', '/')
files_belong_to_same_module = filename_cc.endswith(filename_h)
common_path = ''
if files_belong_to_same_module:
common_path = filename_cc[:-len(filename_h)]
return files_belong_to_same_module, common_path
def UpdateIncludeState(filename, include_dict, io=codecs):
"""Fill up the include_dict with new includes found from the file.
Args:
filename: the name of the header to read.
include_dict: a dictionary in which the headers are inserted.
io: The io factory to use to read the file. Provided for testability.
Returns:
True if a header was successfully added. False otherwise.
"""
headerfile = None
try:
headerfile = io.open(filename, 'r', 'utf8', 'replace')
except IOError:
return False
linenum = 0
for line in headerfile:
linenum += 1
clean_line = CleanseComments(line)
match = _RE_PATTERN_INCLUDE.search(clean_line)
if match:
include = match.group(2)
include_dict.setdefault(include, linenum)
return True
def CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error,
io=codecs):
"""Reports for missing stl includes.
This function will output warnings to make sure you are including the headers
necessary for the stl containers and functions that you use. We only give one
reason to include a header. For example, if you use both equal_to<> and
less<> in a .h file, only one (the latter in the file) of these will be
reported as a reason to include the <functional>.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
include_state: An _IncludeState instance.
error: The function to call with any errors found.
io: The IO factory to use to read the header file. Provided for unittest
injection.
"""
required = {} # A map of header name to linenumber and the template entity.
# Example of required: { '<functional>': (1219, 'less<>') }
for linenum in xrange(clean_lines.NumLines()):
line = clean_lines.elided[linenum]
if not line or line[0] == '#':
continue
# String is special -- it is a non-templatized type in STL.
matched = _RE_PATTERN_STRING.search(line)
if matched:
# Don't warn about strings in non-STL namespaces:
# (We check only the first match per line; good enough.)
prefix = line[:matched.start()]
if prefix.endswith('std::') or not prefix.endswith('::'):
required['<string>'] = (linenum, 'string')
for pattern, template, header in _re_pattern_algorithm_header:
if pattern.search(line):
required[header] = (linenum, template)
# The following function is just a speed up, no semantics are changed.
if not '<' in line: # Reduces the cpu time usage by skipping lines.
continue
for pattern, template, header in _re_pattern_templates:
if pattern.search(line):
required[header] = (linenum, template)
# The policy is that if you #include something in foo.h you don't need to
# include it again in foo.cc. Here, we will look at possible includes.
# Let's flatten the include_state include_list and copy it into a dictionary.
include_dict = dict([item for sublist in include_state.include_list
for item in sublist])
# Did we find the header for this file (if any) and successfully load it?
header_found = False
# Use the absolute path so that matching works properly.
abs_filename = FileInfo(filename).FullName()
# For Emacs's flymake.
# If cpplint is invoked from Emacs's flymake, a temporary file is generated
# by flymake and that file name might end with '_flymake.cc'. In that case,
# restore original file name here so that the corresponding header file can be
# found.
# e.g. If the file name is 'foo_flymake.cc', we should search for 'foo.h'
# instead of 'foo_flymake.h'
abs_filename = re.sub(r'_flymake\.cc$', '.cc', abs_filename)
# include_dict is modified during iteration, so we iterate over a copy of
# the keys.
header_keys = include_dict.keys()
for header in header_keys:
(same_module, common_path) = FilesBelongToSameModule(abs_filename, header)
fullpath = common_path + header
if same_module and UpdateIncludeState(fullpath, include_dict, io):
header_found = True
# If we can't find the header file for a .cc, assume it's because we don't
# know where to look. In that case we'll give up as we're not sure they
# didn't include it in the .h file.
# TODO(unknown): Do a better job of finding .h files so we are confident that
# not having the .h file means there isn't one.
if filename.endswith('.cc') and not header_found:
return
# All the lines have been processed, report the errors found.
for required_header_unstripped in required:
template = required[required_header_unstripped][1]
if required_header_unstripped.strip('<>"') not in include_dict:
error(filename, required[required_header_unstripped][0],
'build/include_what_you_use', 4,
'Add #include ' + required_header_unstripped + ' for ' + template)
_RE_PATTERN_EXPLICIT_MAKEPAIR = re.compile(r'\bmake_pair\s*<')
def CheckMakePairUsesDeduction(filename, clean_lines, linenum, error):
"""Check that make_pair's template arguments are deduced.
G++ 4.6 in C++11 mode fails badly if make_pair's template arguments are
specified explicitly, and such use isn't intended in any case.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
match = _RE_PATTERN_EXPLICIT_MAKEPAIR.search(line)
if match:
error(filename, linenum, 'build/explicit_make_pair',
4, # 4 = high confidence
'For C++11-compatibility, omit template arguments from make_pair'
' OR use pair directly OR if appropriate, construct a pair directly')
def CheckDefaultLambdaCaptures(filename, clean_lines, linenum, error):
"""Check that default lambda captures are not used.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# A lambda introducer specifies a default capture if it starts with "[="
# or if it starts with "[&" _not_ followed by an identifier.
match = Match(r'^(.*)\[\s*(?:=|&[^\w])', line)
if match:
# Found a potential error, check what comes after the lambda-introducer.
# If it's not open parenthesis (for lambda-declarator) or open brace
# (for compound-statement), it's not a lambda.
line, _, pos = CloseExpression(clean_lines, linenum, len(match.group(1)))
if pos >= 0 and Match(r'^\s*[{(]', line[pos:]):
error(filename, linenum, 'build/c++11',
4, # 4 = high confidence
'Default lambda captures are an unapproved C++ feature.')
def CheckRedundantVirtual(filename, clean_lines, linenum, error):
"""Check if line contains a redundant "virtual" function-specifier.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Look for "virtual" on current line.
line = clean_lines.elided[linenum]
virtual = Match(r'^(.*\bvirtual\b)', line)
if not virtual: return
# Look for the next opening parenthesis. This is the start of the
# parameter list (possibly on the next line shortly after virtual).
# TODO(unknown): doesn't work if there are virtual functions with
# decltype() or other things that use parentheses, but csearch suggests
# that this is rare.
end_col = -1
end_line = -1
start_col = len(virtual.group(1))
for start_line in xrange(linenum, min(linenum + 3, clean_lines.NumLines())):
line = clean_lines.elided[start_line][start_col:]
parameter_list = Match(r'^([^(]*)\(', line)
if parameter_list:
# Match parentheses to find the end of the parameter list
(_, end_line, end_col) = CloseExpression(
clean_lines, start_line, start_col + len(parameter_list.group(1)))
break
start_col = 0
if end_col < 0:
return # Couldn't find end of parameter list, give up
# Look for "override" or "final" after the parameter list
# (possibly on the next few lines).
for i in xrange(end_line, min(end_line + 3, clean_lines.NumLines())):
line = clean_lines.elided[i][end_col:]
match = Search(r'\b(override|final)\b', line)
if match:
error(filename, linenum, 'readability/inheritance', 4,
('"virtual" is redundant since function is '
'already declared as "%s"' % match.group(1)))
# Set end_col to check whole lines after we are done with the
# first line.
end_col = 0
if Search(r'[^\w]\s*$', line):
break
def CheckRedundantOverrideOrFinal(filename, clean_lines, linenum, error):
"""Check if line contains a redundant "override" or "final" virt-specifier.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Check that at most one of "override" or "final" is present, not both
line = clean_lines.elided[linenum]
if Search(r'\boverride\b', line) and Search(r'\bfinal\b', line):
error(filename, linenum, 'readability/inheritance', 4,
('"override" is redundant since function is '
'already declared as "final"'))
# Returns true if we are at a new block, and it is directly
# inside of a namespace.
def IsBlockInNameSpace(nesting_state, is_forward_declaration):
"""Checks that the new block is directly in a namespace.
Args:
nesting_state: The _NestingState object that contains info about our state.
is_forward_declaration: If the class is a forward declared class.
Returns:
Whether or not the new block is directly in a namespace.
"""
if is_forward_declaration:
if len(nesting_state.stack) >= 1 and (
isinstance(nesting_state.stack[-1], _NamespaceInfo)):
return True
else:
return False
return (len(nesting_state.stack) > 1 and
nesting_state.stack[-1].check_namespace_indentation and
isinstance(nesting_state.stack[-2], _NamespaceInfo))
def ShouldCheckNamespaceIndentation(nesting_state, is_namespace_indent_item,
raw_lines_no_comments, linenum):
"""This method determines if we should apply our namespace indentation check.
Args:
nesting_state: The current nesting state.
is_namespace_indent_item: If we just put a new class on the stack, True.
If the top of the stack is not a class, or we did not recently
add the class, False.
raw_lines_no_comments: The lines without the comments.
linenum: The current line number we are processing.
Returns:
True if we should apply our namespace indentation check. Currently, it
only works for classes and namespaces inside of a namespace.
"""
is_forward_declaration = IsForwardClassDeclaration(raw_lines_no_comments,
linenum)
if not (is_namespace_indent_item or is_forward_declaration):
return False
# If we are in a macro, we do not want to check the namespace indentation.
if IsMacroDefinition(raw_lines_no_comments, linenum):
return False
return IsBlockInNameSpace(nesting_state, is_forward_declaration)
# Call this method if the line is directly inside of a namespace.
# If the line above is blank (excluding comments) or the start of
# an inner namespace, it cannot be indented.
def CheckItemIndentationInNamespace(filename, raw_lines_no_comments, linenum,
error):
line = raw_lines_no_comments[linenum]
if Match(r'^\s+', line):
error(filename, linenum, 'runtime/indentation_namespace', 4,
'Do not indent within a namespace')
def ProcessLine(filename, file_extension, clean_lines, line,
include_state, function_state, nesting_state, error,
extra_check_functions=[]):
"""Processes a single line in the file.
Args:
filename: Filename of the file that is being processed.
file_extension: The extension (dot not included) of the file.
clean_lines: An array of strings, each representing a line of the file,
with comments stripped.
line: Number of line being processed.
include_state: An _IncludeState instance in which the headers are inserted.
function_state: A _FunctionState instance which counts function lines, etc.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
raw_lines = clean_lines.raw_lines
ParseNolintSuppressions(filename, raw_lines[line], line, error)
nesting_state.Update(filename, clean_lines, line, error)
CheckForNamespaceIndentation(filename, nesting_state, clean_lines, line,
error)
if nesting_state.InAsmBlock(): return
CheckForFunctionLengths(filename, clean_lines, line, function_state, error)
CheckForMultilineCommentsAndStrings(filename, clean_lines, line, error)
CheckStyle(filename, clean_lines, line, file_extension, nesting_state, error)
CheckLanguage(filename, clean_lines, line, file_extension, include_state,
nesting_state, error)
CheckForNonConstReference(filename, clean_lines, line, nesting_state, error)
CheckForNonStandardConstructs(filename, clean_lines, line,
nesting_state, error)
CheckVlogArguments(filename, clean_lines, line, error)
CheckPosixThreading(filename, clean_lines, line, error)
CheckInvalidIncrement(filename, clean_lines, line, error)
CheckMakePairUsesDeduction(filename, clean_lines, line, error)
CheckDefaultLambdaCaptures(filename, clean_lines, line, error)
CheckRedundantVirtual(filename, clean_lines, line, error)
CheckRedundantOverrideOrFinal(filename, clean_lines, line, error)
for check_fn in extra_check_functions:
check_fn(filename, clean_lines, line, error)
def FlagCxx11Features(filename, clean_lines, linenum, error):
"""Flag those c++11 features that we only allow in certain places.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Flag unapproved C++11 headers.
include = Match(r'\s*#\s*include\s+[<"]([^<"]+)[">]', line)
if include and include.group(1) in ('cfenv',
'condition_variable',
'fenv.h',
'future',
'mutex',
'thread',
'chrono',
'ratio',
'regex',
'system_error',
):
error(filename, linenum, 'build/c++11', 5,
('<%s> is an unapproved C++11 header.') % include.group(1))
# The only place where we need to worry about C++11 keywords and library
# features in preprocessor directives is in macro definitions.
if Match(r'\s*#', line) and not Match(r'\s*#\s*define\b', line): return
# These are classes and free functions. The classes are always
# mentioned as std::*, but we only catch the free functions if
# they're not found by ADL. They're alphabetical by header.
for top_name in (
# type_traits
'alignment_of',
'aligned_union',
# utility
'forward',
):
if Search(r'\bstd::%s\b' % top_name, line):
error(filename, linenum, 'build/c++11', 5,
('std::%s is an unapproved C++11 class or function. Send c-style '
'an example of where it would make your code more readable, and '
'they may let you use it.') % top_name)
def ProcessFileData(filename, file_extension, lines, error,
extra_check_functions=[]):
"""Performs lint checks and reports any errors to the given error function.
Args:
filename: Filename of the file that is being processed.
file_extension: The extension (dot not included) of the file.
lines: An array of strings, each representing a line of the file, with the
last element being empty if the file is terminated with a newline.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
lines = (['// marker so line numbers and indices both start at 1'] + lines +
['// marker so line numbers end in a known way'])
include_state = _IncludeState()
function_state = _FunctionState()
nesting_state = NestingState()
ResetNolintSuppressions()
CheckForCopyright(filename, lines, error)
if file_extension == 'h':
CheckForHeaderGuard(filename, lines, error)
RemoveMultiLineComments(filename, lines, error)
clean_lines = CleansedLines(lines)
for line in xrange(clean_lines.NumLines()):
ProcessLine(filename, file_extension, clean_lines, line,
include_state, function_state, nesting_state, error,
extra_check_functions)
FlagCxx11Features(filename, clean_lines, line, error)
nesting_state.CheckCompletedBlocks(filename, error)
CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error)
# We check here rather than inside ProcessLine so that we see raw
# lines rather than "cleaned" lines.
CheckForBadCharacters(filename, lines, error)
CheckForNewlineAtEOF(filename, lines, error)
def ProcessConfigOverrides(filename):
""" Loads the configuration files and processes the config overrides.
Args:
filename: The name of the file being processed by the linter.
Returns:
False if the current |filename| should not be processed further.
"""
abs_filename = os.path.abspath(filename)
cfg_filters = []
keep_looking = True
while keep_looking:
abs_path, base_name = os.path.split(abs_filename)
if not base_name:
break # Reached the root directory.
cfg_file = os.path.join(abs_path, "CPPLINT.cfg")
abs_filename = abs_path
if not os.path.isfile(cfg_file):
continue
try:
with open(cfg_file) as file_handle:
for line in file_handle:
line, _, _ = line.partition('#') # Remove comments.
if not line.strip():
continue
name, _, val = line.partition('=')
name = name.strip()
val = val.strip()
if name == 'set noparent':
keep_looking = False
elif name == 'filter':
cfg_filters.append(val)
elif name == 'exclude_files':
# When matching exclude_files pattern, use the base_name of
# the current file name or the directory name we are processing.
# For example, if we are checking for lint errors in /foo/bar/baz.cc
# and we found the .cfg file at /foo/CPPLINT.cfg, then the config
# file's "exclude_files" filter is meant to be checked against "bar"
# and not "baz" nor "bar/baz.cc".
if base_name:
pattern = re.compile(val)
if pattern.match(base_name):
sys.stderr.write('Ignoring "%s": file excluded by "%s". '
'File path component "%s" matches '
'pattern "%s"\n' %
(filename, cfg_file, base_name, val))
return False
elif name == 'linelength':
global _line_length
try:
_line_length = int(val)
except ValueError:
sys.stderr.write('Line length must be numeric.')
else:
sys.stderr.write(
'Invalid configuration option (%s) in file %s\n' %
(name, cfg_file))
except IOError:
sys.stderr.write(
"Skipping config file '%s': Can't open for reading\n" % cfg_file)
keep_looking = False
# Apply all the accumulated filters in reverse order (top-level directory
# config options having the least priority).
for filter in reversed(cfg_filters):
_AddFilters(filter)
return True
def ProcessFile(filename, vlevel, extra_check_functions=[]):
"""Does google-lint on a single file.
Args:
filename: The name of the file to parse.
vlevel: The level of errors to report. Every error of confidence
>= verbose_level will be reported. 0 is a good default.
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
_SetVerboseLevel(vlevel)
_BackupFilters()
if not ProcessConfigOverrides(filename):
_RestoreFilters()
return
lf_lines = []
crlf_lines = []
try:
# Support the UNIX convention of using "-" for stdin. Note that
# we are not opening the file with universal newline support
# (which codecs doesn't support anyway), so the resulting lines do
# contain trailing '\r' characters if we are reading a file that
# has CRLF endings.
# If after the split a trailing '\r' is present, it is removed
# below.
if filename == '-':
lines = codecs.StreamReaderWriter(sys.stdin,
codecs.getreader('utf8'),
codecs.getwriter('utf8'),
'replace').read().split('\n')
else:
lines = codecs.open(filename, 'r', 'utf8', 'replace').read().split('\n')
# Remove trailing '\r'.
# The -1 accounts for the extra trailing blank line we get from split()
for linenum in range(len(lines) - 1):
if lines[linenum].endswith('\r'):
lines[linenum] = lines[linenum].rstrip('\r')
crlf_lines.append(linenum + 1)
else:
lf_lines.append(linenum + 1)
except IOError:
sys.stderr.write(
"Skipping input '%s': Can't open for reading\n" % filename)
_RestoreFilters()
return
# Note, if no dot is found, this will give the entire filename as the ext.
file_extension = filename[filename.rfind('.') + 1:]
# When reading from stdin, the extension is unknown, so no cpplint tests
# should rely on the extension.
if filename != '-' and file_extension not in _valid_extensions:
sys.stderr.write('Ignoring %s; not a valid file name '
'(%s)\n' % (filename, ', '.join(_valid_extensions)))
else:
ProcessFileData(filename, file_extension, lines, Error,
extra_check_functions)
# If end-of-line sequences are a mix of LF and CR-LF, issue
# warnings on the lines with CR.
#
# Don't issue any warnings if all lines are uniformly LF or CR-LF,
# since critique can handle these just fine, and the style guide
# doesn't dictate a particular end of line sequence.
#
# We can't depend on os.linesep to determine what the desired
# end-of-line sequence should be, since that will return the
# server-side end-of-line sequence.
if lf_lines and crlf_lines:
# Warn on every line with CR. An alternative approach might be to
# check whether the file is mostly CRLF or just LF, and warn on the
# minority, we bias toward LF here since most tools prefer LF.
for linenum in crlf_lines:
Error(filename, linenum, 'whitespace/newline', 1,
'Unexpected \\r (^M) found; better to use only \\n')
sys.stderr.write('Done processing %s\n' % filename)
_RestoreFilters()
def PrintUsage(message):
"""Prints a brief usage string and exits, optionally with an error message.
Args:
message: The optional error message.
"""
sys.stderr.write(_USAGE)
if message:
sys.exit('\nFATAL ERROR: ' + message)
else:
sys.exit(1)
def PrintCategories():
"""Prints a list of all the error-categories used by error messages.
These are the categories used to filter messages via --filter.
"""
sys.stderr.write(''.join(' %s\n' % cat for cat in _ERROR_CATEGORIES))
sys.exit(0)
def ParseArguments(args):
"""Parses the command line arguments.
This may set the output format and verbosity level as side-effects.
Args:
args: The command line arguments:
Returns:
The list of filenames to lint.
"""
try:
(opts, filenames) = getopt.getopt(args, '', ['help', 'output=', 'verbose=',
'counting=',
'filter=',
'root=',
'linelength=',
'extensions='])
except getopt.GetoptError:
PrintUsage('Invalid arguments.')
verbosity = _VerboseLevel()
output_format = _OutputFormat()
filters = ''
counting_style = ''
for (opt, val) in opts:
if opt == '--help':
PrintUsage(None)
elif opt == '--output':
if val not in ('emacs', 'vs7', 'eclipse'):
PrintUsage('The only allowed output formats are emacs, vs7 and eclipse.')
output_format = val
elif opt == '--verbose':
verbosity = int(val)
elif opt == '--filter':
filters = val
if not filters:
PrintCategories()
elif opt == '--counting':
if val not in ('total', 'toplevel', 'detailed'):
PrintUsage('Valid counting options are total, toplevel, and detailed')
counting_style = val
elif opt == '--root':
global _root
_root = val
elif opt == '--linelength':
global _line_length
try:
_line_length = int(val)
except ValueError:
PrintUsage('Line length must be digits.')
elif opt == '--extensions':
global _valid_extensions
try:
_valid_extensions = set(val.split(','))
except ValueError:
PrintUsage('Extensions must be comma seperated list.')
if not filenames:
PrintUsage('No files were specified.')
_SetOutputFormat(output_format)
_SetVerboseLevel(verbosity)
_SetFilters(filters)
_SetCountingStyle(counting_style)
return filenames
def main():
filenames = ParseArguments(sys.argv[1:])
# Change stderr to write with replacement characters so we don't die
# if we try to print something containing non-ASCII characters.
sys.stderr = codecs.StreamReaderWriter(sys.stderr,
codecs.getreader('utf8'),
codecs.getwriter('utf8'),
'replace')
_cpplint_state.ResetErrorCounts()
for filename in filenames:
ProcessFile(filename, _cpplint_state.verbose_level)
_cpplint_state.PrintErrorCounts()
sys.exit(_cpplint_state.error_count > 0)
if __name__ == '__main__':
main()
| 38.273156 | 97 | 0.651386 |
import codecs
import copy
import getopt
import math
import os
import re
import sre_compile
import string
import sys
import unicodedata
_USAGE = """
Syntax: cpplint.py [--verbose=#] [--output=vs7] [--filter=-x,+y,...]
[--counting=total|toplevel|detailed] [--root=subdir]
[--linelength=digits]
<file> [file] ...
The style guidelines this tries to follow are those in
http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml
Every problem is given a confidence score from 1-5, with 5 meaning we are
certain of the problem, and 1 meaning it could be a legitimate construct.
This will miss some errors, and is not a substitute for a code review.
To suppress false-positive errors of a certain category, add a
'NOLINT(category)' comment to the line. NOLINT or NOLINT(*)
suppresses errors of all categories on that line.
The files passed in will be linted; at least one file must be provided.
Default linted extensions are .cc, .cpp, .cu, .cuh and .h. Change the
extensions with the --extensions flag.
Flags:
output=vs7
By default, the output is formatted to ease emacs parsing. Visual Studio
compatible output (vs7) may also be used. Other formats are unsupported.
verbose=#
Specify a number 0-5 to restrict errors to certain verbosity levels.
filter=-x,+y,...
Specify a comma-separated list of category-filters to apply: only
error messages whose category names pass the filters will be printed.
(Category names are printed with the message and look like
"[whitespace/indent]".) Filters are evaluated left to right.
"-FOO" and "FOO" means "do not print categories that start with FOO".
"+FOO" means "do print categories that start with FOO".
Examples: --filter=-whitespace,+whitespace/braces
--filter=whitespace,runtime/printf,+runtime/printf_format
--filter=-,+build/include_what_you_use
To see a list of all the categories used in cpplint, pass no arg:
--filter=
counting=total|toplevel|detailed
The total number of errors found is always printed. If
'toplevel' is provided, then the count of errors in each of
the top-level categories like 'build' and 'whitespace' will
also be printed. If 'detailed' is provided, then a count
is provided for each category like 'build/class'.
root=subdir
The root directory used for deriving header guard CPP variable.
By default, the header guard CPP variable is calculated as the relative
path to the directory that contains .git, .hg, or .svn. When this flag
is specified, the relative path is calculated from the specified
directory. If the specified directory does not exist, this flag is
ignored.
Examples:
Assuming that src/.git exists, the header guard CPP variables for
src/chrome/browser/ui/browser.h are:
No flag => CHROME_BROWSER_UI_BROWSER_H_
--root=chrome => BROWSER_UI_BROWSER_H_
--root=chrome/browser => UI_BROWSER_H_
linelength=digits
This is the allowed line length for the project. The default value is
80 characters.
Examples:
--linelength=120
extensions=extension,extension,...
The allowed file extensions that cpplint will check
Examples:
--extensions=hpp,cpp
cpplint.py supports per-directory configurations specified in CPPLINT.cfg
files. CPPLINT.cfg file can contain a number of key=value pairs.
Currently the following options are supported:
set noparent
filter=+filter1,-filter2,...
exclude_files=regex
linelength=80
"set noparent" option prevents cpplint from traversing directory tree
upwards looking for more .cfg files in parent directories. This option
is usually placed in the top-level project directory.
The "filter" option is similar in function to --filter flag. It specifies
message filters in addition to the |_DEFAULT_FILTERS| and those specified
through --filter command-line flag.
"exclude_files" allows to specify a regular expression to be matched against
a file name. If the expression matches, the file is skipped and not run
through liner.
"linelength" allows to specify the allowed line length for the project.
CPPLINT.cfg has an effect on files in the same directory and all
sub-directories, unless overridden by a nested configuration file.
Example file:
filter=-build/include_order,+build/include_alpha
exclude_files=.*\.cc
The above example disables build/include_order warning and enables
build/include_alpha as well as excludes all .cc from being
processed by linter, in the current directory (where the .cfg
file is located) and all sub-directories.
"""
_ERROR_CATEGORIES = [
'build/class',
'build/c++11',
'build/deprecated',
'build/endif_comment',
'build/explicit_make_pair',
'build/forward_decl',
'build/header_guard',
'build/include',
'build/include_alpha',
'build/include_order',
'build/include_what_you_use',
'build/namespaces',
'build/printf_format',
'build/storage_class',
'legal/copyright',
'readability/alt_tokens',
'readability/braces',
'readability/casting',
'readability/check',
'readability/constructors',
'readability/fn_size',
'readability/function',
'readability/inheritance',
'readability/multiline_comment',
'readability/multiline_string',
'readability/namespace',
'readability/nolint',
'readability/nul',
'readability/streams',
'readability/todo',
'readability/utf8',
'runtime/arrays',
'runtime/casting',
'runtime/explicit',
'runtime/int',
'runtime/init',
'runtime/invalid_increment',
'runtime/member_string_references',
'runtime/memset',
'runtime/indentation_namespace',
'runtime/operator',
'runtime/printf',
'runtime/printf_format',
'runtime/references',
'runtime/string',
'runtime/threadsafe_fn',
'runtime/vlog',
'whitespace/blank_line',
'whitespace/braces',
'whitespace/comma',
'whitespace/comments',
'whitespace/empty_conditional_body',
'whitespace/empty_loop_body',
'whitespace/end_of_line',
'whitespace/ending_newline',
'whitespace/forcolon',
'whitespace/indent',
'whitespace/line_length',
'whitespace/newline',
'whitespace/operators',
'whitespace/parens',
'whitespace/semicolon',
'whitespace/tab',
'whitespace/todo'
]
_DEFAULT_FILTERS = ['-build/include_alpha']
# hard-coded international strings, which belong in a separate i18n file.
# C++ headers
_CPP_HEADERS = frozenset([
# Legacy
'algobase.h',
'algo.h',
'alloc.h',
'builtinbuf.h',
'bvector.h',
'complex.h',
'defalloc.h',
'deque.h',
'editbuf.h',
'fstream.h',
'function.h',
'hash_map',
'hash_map.h',
'hash_set',
'hash_set.h',
'hashtable.h',
'heap.h',
'indstream.h',
'iomanip.h',
'iostream.h',
'istream.h',
'iterator.h',
'list.h',
'map.h',
'multimap.h',
'multiset.h',
'ostream.h',
'pair.h',
'parsestream.h',
'pfstream.h',
'procbuf.h',
'pthread_alloc',
'pthread_alloc.h',
'rope',
'rope.h',
'ropeimpl.h',
'set.h',
'slist',
'slist.h',
'stack.h',
'stdiostream.h',
'stl_alloc.h',
'stl_relops.h',
'streambuf.h',
'stream.h',
'strfile.h',
'strstream.h',
'tempbuf.h',
'tree.h',
'type_traits.h',
'vector.h',
# 17.6.1.2 C++ library headers
'algorithm',
'array',
'atomic',
'bitset',
'chrono',
'codecvt',
'complex',
'condition_variable',
'deque',
'exception',
'forward_list',
'fstream',
'functional',
'future',
'initializer_list',
'iomanip',
'ios',
'iosfwd',
'iostream',
'istream',
'iterator',
'limits',
'list',
'locale',
'map',
'memory',
'mutex',
'new',
'numeric',
'ostream',
'queue',
'random',
'ratio',
'regex',
'set',
'sstream',
'stack',
'stdexcept',
'streambuf',
'string',
'strstream',
'system_error',
'thread',
'tuple',
'typeindex',
'typeinfo',
'type_traits',
'unordered_map',
'unordered_set',
'utility',
'valarray',
'vector',
# 17.6.1.2 C++ headers for C library facilities
'cassert',
'ccomplex',
'cctype',
'cerrno',
'cfenv',
'cfloat',
'cinttypes',
'ciso646',
'climits',
'clocale',
'cmath',
'csetjmp',
'csignal',
'cstdalign',
'cstdarg',
'cstdbool',
'cstddef',
'cstdint',
'cstdio',
'cstdlib',
'cstring',
'ctgmath',
'ctime',
'cuchar',
'cwchar',
'cwctype',
])
# These headers are excluded from [build/include] and [build/include_order]
# checks:
# - Anything not following google file name conventions (containing an
# uppercase character, such as Python.h or nsStringAPI.h, for example).
# - Lua headers.
_THIRD_PARTY_HEADERS_PATTERN = re.compile(
r'^(?:[^/]*[A-Z][^/]*\.h|lua\.h|lauxlib\.h|lualib\.h)$')
# Assertion macros. These are defined in base/logging.h and
# testing/base/gunit.h. Note that the _M versions need to come first
# for substring matching to work.
_CHECK_MACROS = [
'DCHECK', 'CHECK',
'EXPECT_TRUE_M', 'EXPECT_TRUE',
'ASSERT_TRUE_M', 'ASSERT_TRUE',
'EXPECT_FALSE_M', 'EXPECT_FALSE',
'ASSERT_FALSE_M', 'ASSERT_FALSE',
]
# Replacement macros for CHECK/DCHECK/EXPECT_TRUE/EXPECT_FALSE
_CHECK_REPLACEMENT = dict([(m, {}) for m in _CHECK_MACROS])
for op, replacement in [('==', 'EQ'), ('!=', 'NE'),
('>=', 'GE'), ('>', 'GT'),
('<=', 'LE'), ('<', 'LT')]:
_CHECK_REPLACEMENT['DCHECK'][op] = 'DCHECK_%s' % replacement
_CHECK_REPLACEMENT['CHECK'][op] = 'CHECK_%s' % replacement
_CHECK_REPLACEMENT['EXPECT_TRUE'][op] = 'EXPECT_%s' % replacement
_CHECK_REPLACEMENT['ASSERT_TRUE'][op] = 'ASSERT_%s' % replacement
_CHECK_REPLACEMENT['EXPECT_TRUE_M'][op] = 'EXPECT_%s_M' % replacement
_CHECK_REPLACEMENT['ASSERT_TRUE_M'][op] = 'ASSERT_%s_M' % replacement
for op, inv_replacement in [('==', 'NE'), ('!=', 'EQ'),
('>=', 'LT'), ('>', 'LE'),
('<=', 'GT'), ('<', 'GE')]:
_CHECK_REPLACEMENT['EXPECT_FALSE'][op] = 'EXPECT_%s' % inv_replacement
_CHECK_REPLACEMENT['ASSERT_FALSE'][op] = 'ASSERT_%s' % inv_replacement
_CHECK_REPLACEMENT['EXPECT_FALSE_M'][op] = 'EXPECT_%s_M' % inv_replacement
_CHECK_REPLACEMENT['ASSERT_FALSE_M'][op] = 'ASSERT_%s_M' % inv_replacement
# Alternative tokens and their replacements. For full list, see section 2.5
# Alternative tokens [lex.digraph] in the C++ standard.
#
# Digraphs (such as '%:') are not included here since it's a mess to
_ALT_TOKEN_REPLACEMENT = {
'and': '&&',
'bitor': '|',
'or': '||',
'xor': '^',
'compl': '~',
'bitand': '&',
'and_eq': '&=',
'or_eq': '|=',
'xor_eq': '^=',
'not': '!',
'not_eq': '!='
}
_ALT_TOKEN_REPLACEMENT_PATTERN = re.compile(
r'[ =()](' + ('|'.join(_ALT_TOKEN_REPLACEMENT.keys())) + r')(?=[ (]|$)')
_C_SYS_HEADER = 1
_CPP_SYS_HEADER = 2
_LIKELY_MY_HEADER = 3
_POSSIBLE_MY_HEADER = 4
_OTHER_HEADER = 5
_NO_ASM = 0
_INSIDE_ASM = 1
_END_ASM = 2
_BLOCK_ASM = 3
_MATCH_ASM = re.compile(r'^\s*(?:asm|_asm|__asm|__asm__)'
r'(?:\s+(volatile|__volatile__))?'
r'\s*[{(]')
_regexp_compile_cache = {}
_error_suppressions = {}
_root = None
_line_length = 80
_valid_extensions = set(['c', 'cc', 'h', 'cpp', 'cu', 'cuh', "m", "mm"])
def ParseNolintSuppressions(filename, raw_line, linenum, error):
matched = Search(r'\bNOLINT(NEXTLINE)?\b(\([^)]+\))?', raw_line)
if matched:
if matched.group(1):
suppressed_line = linenum + 1
else:
suppressed_line = linenum
category = matched.group(2)
if category in (None, '(*)'):
_error_suppressions.setdefault(None, set()).add(suppressed_line)
else:
if category.startswith('(') and category.endswith(')'):
category = category[1:-1]
if category in _ERROR_CATEGORIES:
_error_suppressions.setdefault(category, set()).add(suppressed_line)
else:
error(filename, linenum, 'readability/nolint', 5,
'Unknown NOLINT error category: %s' % category)
def ResetNolintSuppressions():
_error_suppressions.clear()
def IsErrorSuppressedByNolint(category, linenum):
return (linenum in _error_suppressions.get(category, set()) or
linenum in _error_suppressions.get(None, set()))
def Match(pattern, s):
if pattern not in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].match(s)
def ReplaceAll(pattern, rep, s):
if pattern not in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].sub(rep, s)
def Search(pattern, s):
if pattern not in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].search(s)
class _IncludeState(object):
_INITIAL_SECTION = 0
_MY_H_SECTION = 1
_C_SECTION = 2
_CPP_SECTION = 3
_OTHER_H_SECTION = 4
_TYPE_NAMES = {
_C_SYS_HEADER: 'C system header',
_CPP_SYS_HEADER: 'C++ system header',
_LIKELY_MY_HEADER: 'header this file implements',
_POSSIBLE_MY_HEADER: 'header this file may implement',
_OTHER_HEADER: 'other header',
}
_SECTION_NAMES = {
_INITIAL_SECTION: "... nothing. (This can't be an error.)",
_MY_H_SECTION: 'a header this file implements',
_C_SECTION: 'C system header',
_CPP_SECTION: 'C++ system header',
_OTHER_H_SECTION: 'other header',
}
def __init__(self):
self.include_list = [[]]
self.ResetSection('')
def FindHeader(self, header):
for section_list in self.include_list:
for f in section_list:
if f[0] == header:
return f[1]
return -1
def ResetSection(self, directive):
# The name of the current section.
self._section = self._INITIAL_SECTION
# The path of last found header.
self._last_header = ''
# Update list of includes. Note that we never pop from the
# include list.
if directive in ('if', 'ifdef', 'ifndef'):
self.include_list.append([])
elif directive in ('else', 'elif'):
self.include_list[-1] = []
def SetLastHeader(self, header_path):
self._last_header = header_path
def CanonicalizeAlphabeticalOrder(self, header_path):
return header_path.replace('-inl.h', '.h').replace('-', '_').lower()
def IsInAlphabeticalOrder(self, clean_lines, linenum, header_path):
# If previous section is different from current section, _last_header will
# be reset to empty string, so it's always less than current header.
if (self._last_header > header_path and
not Match(r'^\s*$', clean_lines.elided[linenum - 1])):
return False
return True
def CheckNextIncludeOrder(self, header_type):
error_message = ('Found %s after %s' %
(self._TYPE_NAMES[header_type],
self._SECTION_NAMES[self._section]))
last_section = self._section
if header_type == _C_SYS_HEADER:
if self._section <= self._C_SECTION:
self._section = self._C_SECTION
else:
self._last_header = ''
return error_message
elif header_type == _CPP_SYS_HEADER:
if self._section <= self._CPP_SECTION:
self._section = self._CPP_SECTION
else:
self._last_header = ''
return error_message
elif header_type == _LIKELY_MY_HEADER:
if self._section <= self._MY_H_SECTION:
self._section = self._MY_H_SECTION
else:
self._section = self._OTHER_H_SECTION
elif header_type == _POSSIBLE_MY_HEADER:
if self._section <= self._MY_H_SECTION:
self._section = self._MY_H_SECTION
else:
# enough that the header is associated with this file.
self._section = self._OTHER_H_SECTION
else:
assert header_type == _OTHER_HEADER
self._section = self._OTHER_H_SECTION
if last_section != self._section:
self._last_header = ''
return ''
class _CppLintState(object):
def __init__(self):
self.verbose_level = 1 # global setting.
self.error_count = 0 # global count of reported errors
# filters to apply when emitting error messages
self.filters = _DEFAULT_FILTERS[:]
# backup of filter list. Used to restore the state after each file.
self._filters_backup = self.filters[:]
self.counting = 'total' # In what way are we counting errors?
self.errors_by_category = {} # string to int dict storing error counts
# output format:
# "emacs" - format that emacs can parse (default)
# "vs7" - format that Microsoft Visual Studio 7 can parse
self.output_format = 'emacs'
def SetOutputFormat(self, output_format):
self.output_format = output_format
def SetVerboseLevel(self, level):
last_verbose_level = self.verbose_level
self.verbose_level = level
return last_verbose_level
def SetCountingStyle(self, counting_style):
self.counting = counting_style
def SetFilters(self, filters):
# Default filters always have less priority than the flag ones.
self.filters = _DEFAULT_FILTERS[:]
self.AddFilters(filters)
def AddFilters(self, filters):
for filt in filters.split(','):
clean_filt = filt.strip()
if clean_filt:
self.filters.append(clean_filt)
for filt in self.filters:
if not (filt.startswith('+') or filt.startswith('-')):
raise ValueError('Every filter in --filters must start with + or -'
' (%s does not)' % filt)
def BackupFilters(self):
self._filters_backup = self.filters[:]
def RestoreFilters(self):
self.filters = self._filters_backup[:]
def ResetErrorCounts(self):
self.error_count = 0
self.errors_by_category = {}
def IncrementErrorCount(self, category):
self.error_count += 1
if self.counting in ('toplevel', 'detailed'):
if self.counting != 'detailed':
category = category.split('/')[0]
if category not in self.errors_by_category:
self.errors_by_category[category] = 0
self.errors_by_category[category] += 1
def PrintErrorCounts(self):
for category, count in self.errors_by_category.iteritems():
sys.stderr.write('Category \'%s\' errors found: %d\n' %
(category, count))
sys.stderr.write('Total errors found: %d\n' % self.error_count)
_cpplint_state = _CppLintState()
def _OutputFormat():
return _cpplint_state.output_format
def _SetOutputFormat(output_format):
_cpplint_state.SetOutputFormat(output_format)
def _VerboseLevel():
return _cpplint_state.verbose_level
def _SetVerboseLevel(level):
return _cpplint_state.SetVerboseLevel(level)
def _SetCountingStyle(level):
_cpplint_state.SetCountingStyle(level)
def _Filters():
return _cpplint_state.filters
def _SetFilters(filters):
_cpplint_state.SetFilters(filters)
def _AddFilters(filters):
_cpplint_state.AddFilters(filters)
def _BackupFilters():
_cpplint_state.BackupFilters()
def _RestoreFilters():
_cpplint_state.RestoreFilters()
class _FunctionState(object):
_NORMAL_TRIGGER = 250 # for --v=0, 500 for --v=1, etc.
_TEST_TRIGGER = 400 # about 50% more than _NORMAL_TRIGGER.
def __init__(self):
self.in_a_function = False
self.lines_in_function = 0
self.current_function = ''
def Begin(self, function_name):
self.in_a_function = True
self.lines_in_function = 0
self.current_function = function_name
def Count(self):
if self.in_a_function:
self.lines_in_function += 1
def Check(self, error, filename, linenum):
if Match(r'T(EST|est)', self.current_function):
base_trigger = self._TEST_TRIGGER
else:
base_trigger = self._NORMAL_TRIGGER
trigger = base_trigger * 2**_VerboseLevel()
if self.lines_in_function > trigger:
error_level = int(math.log(self.lines_in_function / base_trigger, 2))
# 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ...
if error_level > 5:
error_level = 5
error(filename, linenum, 'readability/fn_size', error_level,
'Small and focused functions are preferred:'
' %s has %d non-comment lines'
' (error triggered by exceeding %d lines).' % (
self.current_function, self.lines_in_function, trigger))
def End(self):
self.in_a_function = False
class _IncludeError(Exception):
pass
class FileInfo(object):
def __init__(self, filename):
self._filename = filename
def FullName(self):
return os.path.abspath(self._filename).replace('\\', '/')
def RepositoryName(self):
fullname = self.FullName()
if os.path.exists(fullname):
project_dir = os.path.dirname(fullname)
if os.path.exists(os.path.join(project_dir, ".svn")):
# If there's a .svn file in the current directory, we recursively look
root_dir = project_dir
one_up_dir = os.path.dirname(root_dir)
while os.path.exists(os.path.join(one_up_dir, ".svn")):
root_dir = os.path.dirname(root_dir)
one_up_dir = os.path.dirname(one_up_dir)
prefix = os.path.commonprefix([root_dir, project_dir])
return fullname[len(prefix) + 1:]
root_dir = os.path.dirname(fullname)
while (root_dir != os.path.dirname(root_dir) and
not os.path.exists(os.path.join(root_dir, ".git")) and
not os.path.exists(os.path.join(root_dir, ".hg")) and
not os.path.exists(os.path.join(root_dir, ".svn"))):
root_dir = os.path.dirname(root_dir)
if (os.path.exists(os.path.join(root_dir, ".git")) or
os.path.exists(os.path.join(root_dir, ".hg")) or
os.path.exists(os.path.join(root_dir, ".svn"))):
prefix = os.path.commonprefix([root_dir, project_dir])
return fullname[len(prefix) + 1:]
return fullname
def Split(self):
googlename = self.RepositoryName()
project, rest = os.path.split(googlename)
return (project,) + os.path.splitext(rest)
def BaseName(self):
return self.Split()[1]
def Extension(self):
return self.Split()[2]
def NoExtension(self):
return '/'.join(self.Split()[0:2])
def IsSource(self):
return self.Extension()[1:] in ('c', 'cc', 'cpp', 'cxx')
def _ShouldPrintError(category, confidence, linenum):
# There are three ways we might decide not to print an error message:
# a "NOLINT(category)" comment appears in the source,
# the verbosity level isn't high enough, or the filters filter it out.
if IsErrorSuppressedByNolint(category, linenum):
return False
if confidence < _cpplint_state.verbose_level:
return False
is_filtered = False
for one_filter in _Filters():
if one_filter.startswith('-'):
if category.startswith(one_filter[1:]):
is_filtered = True
elif one_filter.startswith('+'):
if category.startswith(one_filter[1:]):
is_filtered = False
else:
assert False
if is_filtered:
return False
return True
def Error(filename, linenum, category, confidence, message):
if _ShouldPrintError(category, confidence, linenum):
_cpplint_state.IncrementErrorCount(category)
if _cpplint_state.output_format == 'vs7':
sys.stderr.write('%s(%s): %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence))
elif _cpplint_state.output_format == 'eclipse':
sys.stderr.write('%s:%s: warning: %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence))
else:
sys.stderr.write('%s:%s: %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence))
_RE_PATTERN_CLEANSE_LINE_ESCAPES = re.compile(
r'\\([abfnrtv?"\\\']|\d+|x[0-9a-fA-F]+)')
# Match a single C style comment on the same line.
_RE_PATTERN_C_COMMENTS = r'/\*(?:[^*]|\*(?!/))*\*/'
# Matches multi-line C style comments.
# This RE is a little bit more complicated than one might expect, because we
# have to take care of space removals tools so we can handle comments inside
# statements better.
# The current rule is: We only clear spaces from both sides when we're at the
# end of the line. Otherwise, we try to remove spaces from the right side,
# if this doesn't work we try on left side but only if there's a non-character
# on the right.
_RE_PATTERN_CLEANSE_LINE_C_COMMENTS = re.compile(
r'(\s*' + _RE_PATTERN_C_COMMENTS + r'\s*$|' +
_RE_PATTERN_C_COMMENTS + r'\s+|' +
r'\s+' + _RE_PATTERN_C_COMMENTS + r'(?=\W)|' +
_RE_PATTERN_C_COMMENTS + r')')
def IsCppString(line):
line = line.replace(r'\\', 'XX') # after this, \\" does not match to \"
return ((line.count('"') - line.count(r'\"') - line.count("'\"'")) & 1) == 1
def CleanseRawStrings(raw_lines):
delimiter = None
lines_without_raw_strings = []
for line in raw_lines:
if delimiter:
end = line.find(delimiter)
if end >= 0:
leading_space = Match(r'^(\s*)\S', line)
line = leading_space.group(1) + '""' + line[end + len(delimiter):]
delimiter = None
else:
line = '""'
# Look for beginning of a raw string, and replace them with
# empty strings. This is done in a loop to handle multiple raw
# strings on the same line.
while delimiter is None:
# Look for beginning of a raw string.
# See 2.14.15 [lex.string] for syntax.
matched = Match(r'^(.*)\b(?:R|u8R|uR|UR|LR)"([^\s\\()]*)\((.*)$', line)
if matched:
delimiter = ')' + matched.group(2) + '"'
end = matched.group(3).find(delimiter)
if end >= 0:
# Raw string ended on same line
line = (matched.group(1) + '""' +
matched.group(3)[end + len(delimiter):])
delimiter = None
else:
# Start of a multi-line raw string
line = matched.group(1) + '""'
else:
break
lines_without_raw_strings.append(line)
# TODO(unknown): if delimiter is not None here, we might want to
# emit a warning for unterminated string.
return lines_without_raw_strings
def FindNextMultiLineCommentStart(lines, lineix):
while lineix < len(lines):
if lines[lineix].strip().startswith('/*'):
# Only return this marker if the comment goes beyond this line
if lines[lineix].strip().find('*/', 2) < 0:
return lineix
lineix += 1
return len(lines)
def FindNextMultiLineCommentEnd(lines, lineix):
while lineix < len(lines):
if lines[lineix].strip().endswith('*/'):
return lineix
lineix += 1
return len(lines)
def RemoveMultiLineCommentsFromRange(lines, begin, end):
# Having // dummy comments makes the lines non-empty, so we will not get
# unnecessary blank line warnings later in the code.
for i in range(begin, end):
lines[i] = '// dummy'
def RemoveMultiLineComments(filename, lines, error):
lineix = 0
while lineix < len(lines):
lineix_begin = FindNextMultiLineCommentStart(lines, lineix)
if lineix_begin >= len(lines):
return
lineix_end = FindNextMultiLineCommentEnd(lines, lineix_begin)
if lineix_end >= len(lines):
error(filename, lineix_begin + 1, 'readability/multiline_comment', 5,
'Could not find end of multi-line comment')
return
RemoveMultiLineCommentsFromRange(lines, lineix_begin, lineix_end + 1)
lineix = lineix_end + 1
def CleanseComments(line):
commentpos = line.find('//')
if commentpos != -1 and not IsCppString(line[:commentpos]):
line = line[:commentpos].rstrip()
# get rid of /* ... */
return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line)
class CleansedLines(object):
def __init__(self, lines):
self.elided = []
self.lines = []
self.raw_lines = lines
self.num_lines = len(lines)
self.lines_without_raw_strings = CleanseRawStrings(lines)
for linenum in range(len(self.lines_without_raw_strings)):
self.lines.append(CleanseComments(
self.lines_without_raw_strings[linenum]))
elided = self._CollapseStrings(self.lines_without_raw_strings[linenum])
self.elided.append(CleanseComments(elided))
def NumLines(self):
return self.num_lines
@staticmethod
def _CollapseStrings(elided):
if _RE_PATTERN_INCLUDE.match(elided):
return elided
# Remove escaped characters first to make quote/single quote collapsing
# basic. Things that look like escaped characters shouldn't occur
elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub('', elided)
collapsed = ''
while True:
# Find the first quote character
match = Match(r'^([^\'"]*)([\'"])(.*)$', elided)
if not match:
collapsed += elided
break
head, quote, tail = match.groups()
if quote == '"':
# Collapse double quoted strings
second_quote = tail.find('"')
if second_quote >= 0:
collapsed += head + '""'
elided = tail[second_quote + 1:]
else:
# Unmatched double quote, don't bother processing the rest
collapsed += elided
break
else:
# like "0.'3" (gcc 4.9.0 will not allow this literal).
if Search(r'\b(?:0[bBxX]?|[1-9])[0-9a-fA-F]*$', head):
match_literal = Match(r'^((?:\'?[0-9a-zA-Z_])*)(.*)$', "'" + tail)
collapsed += head + match_literal.group(1).replace("'", '')
elided = match_literal.group(2)
else:
second_quote = tail.find('\'')
if second_quote >= 0:
collapsed += head + "''"
elided = tail[second_quote + 1:]
else:
collapsed += elided
break
return collapsed
def FindEndOfExpressionInLine(line, startpos, stack):
for i in xrange(startpos, len(line)):
char = line[i]
if char in '([{':
stack.append(char)
elif char == '<':
if i > 0 and line[i - 1] == '<':
if stack and stack[-1] == '<':
stack.pop()
if not stack:
return (-1, None)
elif i > 0 and Search(r'\boperator\s*$', line[0:i]):
continue
else:
# Tentative start of template argument list
stack.append('<')
elif char in ')]}':
# Found end of parenthesized expression.
#
# If we are currently expecting a matching '>', the pending '<'
# must have been an operator. Remove them from expression stack.
while stack and stack[-1] == '<':
stack.pop()
if not stack:
return (-1, None)
if ((stack[-1] == '(' and char == ')') or
(stack[-1] == '[' and char == ']') or
(stack[-1] == '{' and char == '}')):
stack.pop()
if not stack:
return (i + 1, None)
else:
# Mismatched parentheses
return (-1, None)
elif char == '>':
# Found potential end of template argument list.
# Ignore "->" and operator functions
if (i > 0 and
(line[i - 1] == '-' or Search(r'\boperator\s*$', line[0:i - 1]))):
continue
# Pop the stack if there is a matching '<'. Otherwise, ignore
# this '>' since it must be an operator.
if stack:
if stack[-1] == '<':
stack.pop()
if not stack:
return (i + 1, None)
elif char == ';':
# Found something that look like end of statements. If we are currently
# expecting a '>', the matching '<' must have been an operator, since
# template argument list should not contain statements.
while stack and stack[-1] == '<':
stack.pop()
if not stack:
return (-1, None)
# Did not find end of expression or unbalanced parentheses on this line
return (-1, stack)
def CloseExpression(clean_lines, linenum, pos):
line = clean_lines.elided[linenum]
if (line[pos] not in '({[<') or Match(r'<[<=]', line[pos:]):
return (line, clean_lines.NumLines(), -1)
# Check first line
(end_pos, stack) = FindEndOfExpressionInLine(line, pos, [])
if end_pos > -1:
return (line, linenum, end_pos)
# Continue scanning forward
while stack and linenum < clean_lines.NumLines() - 1:
linenum += 1
line = clean_lines.elided[linenum]
(end_pos, stack) = FindEndOfExpressionInLine(line, 0, stack)
if end_pos > -1:
return (line, linenum, end_pos)
# Did not find end of expression before end of file, give up
return (line, clean_lines.NumLines(), -1)
def FindStartOfExpressionInLine(line, endpos, stack):
i = endpos
while i >= 0:
char = line[i]
if char in ')]}':
# Found end of expression, push to expression stack
stack.append(char)
elif char == '>':
# Found potential end of template argument list.
#
# Ignore it if it's a "->" or ">=" or "operator>"
if (i > 0 and
(line[i - 1] == '-' or
Match(r'\s>=\s', line[i - 1:]) or
Search(r'\boperator\s*$', line[0:i]))):
i -= 1
else:
stack.append('>')
elif char == '<':
if i > 0 and line[i - 1] == '<':
i -= 1
else:
if stack and stack[-1] == '>':
stack.pop()
if not stack:
return (i, None)
elif char in '([{':
while stack and stack[-1] == '>':
stack.pop()
if not stack:
return (-1, None)
if ((char == '(' and stack[-1] == ')') or
(char == '[' and stack[-1] == ']') or
(char == '{' and stack[-1] == '}')):
stack.pop()
if not stack:
return (i, None)
else:
return (-1, None)
elif char == ';':
while stack and stack[-1] == '>':
stack.pop()
if not stack:
return (-1, None)
i -= 1
return (-1, stack)
def ReverseCloseExpression(clean_lines, linenum, pos):
line = clean_lines.elided[linenum]
if line[pos] not in ')}]>':
return (line, 0, -1)
(start_pos, stack) = FindStartOfExpressionInLine(line, pos, [])
if start_pos > -1:
return (line, linenum, start_pos)
while stack and linenum > 0:
linenum -= 1
line = clean_lines.elided[linenum]
(start_pos, stack) = FindStartOfExpressionInLine(line, len(line) - 1, stack)
if start_pos > -1:
return (line, linenum, start_pos)
return (line, 0, -1)
def CheckForCopyright(filename, lines, error):
# dummy line at the front.
for line in xrange(1, min(len(lines), 11)):
if re.search(r'Copyright', lines[line], re.I): break
else: # means no copyright line was found
error(filename, 0, 'legal/copyright', 5,
'No copyright message found. '
'You should have a line: "Copyright [year] <Copyright Owner>"')
def GetIndentLevel(line):
indent = Match(r'^( *)\S', line)
if indent:
return len(indent.group(1))
else:
return 0
def GetHeaderGuardCPPVariable(filename):
# Restores original filename in case that cpplint is invoked from Emacs's
filename = re.sub(r'_flymake\.h$', '.h', filename)
filename = re.sub(r'/\.flymake/([^/]*)$', r'/\1', filename)
fileinfo = FileInfo(filename)
file_path_from_root = fileinfo.RepositoryName()
if _root:
file_path_from_root = re.sub('^' + _root + os.sep, '', file_path_from_root)
return re.sub(r'[-./\s]', '_', file_path_from_root).upper()
def CheckForHeaderGuard(filename, lines, error):
# comments somewhere in this file.
#
# Because this is silencing a warning for a nonexistent line, we
# only support the very specific NOLINT(build/header_guard) syntax,
# and not the general NOLINT or NOLINT(*) syntax.
for i in lines:
if Search(r'//\s*NOLINT\(build/header_guard\)', i):
return
cppvar = GetHeaderGuardCPPVariable(filename)
ifndef = None
ifndef_linenum = 0
define = None
endif = None
endif_linenum = 0
for linenum, line in enumerate(lines):
linesplit = line.split()
if len(linesplit) >= 2:
# find the first occurrence of #ifndef and #define, save arg
if not ifndef and linesplit[0] == '
# set ifndef to the header guard presented on the #ifndef line.
ifndef = linesplit[1]
ifndef_linenum = linenum
if not define and linesplit[0] == '
define = linesplit[1]
# find the last occurrence of #endif, save entire line
if line.startswith('
endif = line
endif_linenum = linenum
if not ifndef:
error(filename, 0, 'build/header_guard', 5,
'No
cppvar)
return
if not define:
error(filename, 0, 'build/header_guard', 5,
'No
cppvar)
return
# The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__
# for backward compatibility.
if ifndef != cppvar:
# removed by weizhenwei, 2015.06.26;
# error_level = 0
# if ifndef != cppvar + '_':
# error_level = 5
error_level = 5
ParseNolintSuppressions(filename, lines[ifndef_linenum], ifndef_linenum,
error)
error(filename, ifndef_linenum, 'build/header_guard', error_level,
'
if define != ifndef:
error(filename, 0, 'build/header_guard', 5,
' // %s' % cppvar):
error_level = 5
ParseNolintSuppressions(filename, lines[endif_linenum], endif_linenum,
error)
error(filename, endif_linenum, 'build/header_guard', error_level,
'#endif line should be "#endif // %s"' % cppvar)
def CheckForBadCharacters(filename, lines, error):
for linenum, line in enumerate(lines):
if u'\ufffd' in line:
error(filename, linenum, 'readability/utf8', 5,
'Line contains invalid UTF-8 (or Unicode replacement character).')
if '\0' in line:
error(filename, linenum, 'readability/nul', 5, 'Line contains NUL byte.')
def CheckForNewlineAtEOF(filename, lines, error):
if len(lines) < 3 or lines[-2]:
error(filename, len(lines) - 2, 'whitespace/ending_newline', 5,
'Could not find a newline character at the end of the file.')
def CheckForMultilineCommentsAndStrings(filename, clean_lines, linenum, error):
line = clean_lines.elided[linenum]
line = line.replace('\\\\', '')
if line.count('/*') > line.count('*/'):
error(filename, linenum, 'readability/multiline_comment', 5,
'Complex multi-line /*...*/-style comment found. '
'Lint may give bogus warnings. '
'Consider replacing these with //-style comments, '
'with #if 0...#endif, '
'or with more clearly structured multi-line comments.')
if (line.count('"') - line.count('\\"')) % 2:
error(filename, linenum, 'readability/multiline_string', 5,
'Multi-line string ("...") found. This lint script doesn\'t '
'do well with such strings, and may give bogus warnings. '
'Use C++11 raw strings or concatenation instead.')
# (non-threadsafe name, thread-safe alternative, validation pattern)
#
# The validation pattern is used to eliminate false positives such as:
# _rand(); // false positive due to substring match.
# ->rand(); // some member function rand().
# ACMRandom rand(seed); // some variable named rand.
# ISAACRandom rand(); // another variable named rand.
#
# Basically we require the return value of these functions to be used
# in some expression context on the same line by matching on some
# operator before the function name. This eliminates constructors and
# member function calls.
_UNSAFE_FUNC_PREFIX = r'(?:[-+*/=%^&|(<]\s*|>\s+)'
_THREADING_LIST = (
('asctime(', 'asctime_r(', _UNSAFE_FUNC_PREFIX + r'asctime\([^)]+\)'),
('ctime(', 'ctime_r(', _UNSAFE_FUNC_PREFIX + r'ctime\([^)]+\)'),
('getgrgid(', 'getgrgid_r(', _UNSAFE_FUNC_PREFIX + r'getgrgid\([^)]+\)'),
('getgrnam(', 'getgrnam_r(', _UNSAFE_FUNC_PREFIX + r'getgrnam\([^)]+\)'),
('getlogin(', 'getlogin_r(', _UNSAFE_FUNC_PREFIX + r'getlogin\(\)'),
('getpwnam(', 'getpwnam_r(', _UNSAFE_FUNC_PREFIX + r'getpwnam\([^)]+\)'),
('getpwuid(', 'getpwuid_r(', _UNSAFE_FUNC_PREFIX + r'getpwuid\([^)]+\)'),
('gmtime(', 'gmtime_r(', _UNSAFE_FUNC_PREFIX + r'gmtime\([^)]+\)'),
('localtime(', 'localtime_r(', _UNSAFE_FUNC_PREFIX + r'localtime\([^)]+\)'),
('rand(', 'rand_r(', _UNSAFE_FUNC_PREFIX + r'rand\(\)'),
('strtok(', 'strtok_r(',
_UNSAFE_FUNC_PREFIX + r'strtok\([^)]+\)'),
('ttyname(', 'ttyname_r(', _UNSAFE_FUNC_PREFIX + r'ttyname\([^)]+\)'),
)
def CheckPosixThreading(filename, clean_lines, linenum, error):
line = clean_lines.elided[linenum]
for single_thread_func, multithread_safe_func, pattern in _THREADING_LIST:
# Additional pattern matching check to confirm that this is the
# function we are looking for
if Search(pattern, line):
error(filename, linenum, 'runtime/threadsafe_fn', 2,
'Consider using ' + multithread_safe_func +
'...) instead of ' + single_thread_func +
'...) for improved thread safety.')
def CheckVlogArguments(filename, clean_lines, linenum, error):
line = clean_lines.elided[linenum]
if Search(r'\bVLOG\((INFO|ERROR|WARNING|DFATAL|FATAL)\)', line):
error(filename, linenum, 'runtime/vlog', 5,
'VLOG() should be used with numeric verbosity level. '
'Use LOG() if you want symbolic severity levels.')
# Matches invalid increment: *count++, which moves pointer instead of
# incrementing a value.
_RE_PATTERN_INVALID_INCREMENT = re.compile(
r'^\s*\*\w+(\+\+|--);')
def CheckInvalidIncrement(filename, clean_lines, linenum, error):
line = clean_lines.elided[linenum]
if _RE_PATTERN_INVALID_INCREMENT.match(line):
error(filename, linenum, 'runtime/invalid_increment', 5,
'Changing pointer instead of value (or unused value of operator*).')
def IsMacroDefinition(clean_lines, linenum):
if Search(r'^#define', clean_lines[linenum]):
return True
if linenum > 0 and Search(r'\\$', clean_lines[linenum - 1]):
return True
return False
def IsForwardClassDeclaration(clean_lines, linenum):
return Match(r'^\s*(\btemplate\b)*.*class\s+\w+;\s*$', clean_lines[linenum])
class _BlockInfo(object):
def __init__(self, seen_open_brace):
self.seen_open_brace = seen_open_brace
self.open_parentheses = 0
self.inline_asm = _NO_ASM
self.check_namespace_indentation = False
def CheckBegin(self, filename, clean_lines, linenum, error):
pass
def CheckEnd(self, filename, clean_lines, linenum, error):
pass
def IsBlockInfo(self):
return self.__class__ == _BlockInfo
class _ExternCInfo(_BlockInfo):
def __init__(self):
_BlockInfo.__init__(self, True)
class _ClassInfo(_BlockInfo):
def __init__(self, name, class_or_struct, clean_lines, linenum):
_BlockInfo.__init__(self, False)
self.name = name
self.starting_linenum = linenum
self.is_derived = False
self.check_namespace_indentation = True
if class_or_struct == 'struct':
self.access = 'public'
self.is_struct = True
else:
self.access = 'private'
self.is_struct = False
# Remember initial indentation level for this class. Using raw_lines here
# instead of elided to account for leading comments.
self.class_indent = GetIndentLevel(clean_lines.raw_lines[linenum])
# Try to find the end of the class. This will be confused by things like:
# class A {
# } *x = { ...
#
# But it's still good enough for CheckSectionSpacing.
self.last_line = 0
depth = 0
for i in range(linenum, clean_lines.NumLines()):
line = clean_lines.elided[i]
depth += line.count('{') - line.count('}')
if not depth:
self.last_line = i
break
def CheckBegin(self, filename, clean_lines, linenum, error):
# Look for a bare ':'
if Search('(^|[^:]):($|[^:])', clean_lines.elided[linenum]):
self.is_derived = True
def CheckEnd(self, filename, clean_lines, linenum, error):
# Check that closing brace is aligned with beginning of the class.
# Only do this if the closing brace is indented by only whitespaces.
# This means we will not check single-line class definitions.
indent = Match(r'^( *)\}', clean_lines.elided[linenum])
if indent and len(indent.group(1)) != self.class_indent:
if self.is_struct:
parent = 'struct ' + self.name
else:
parent = 'class ' + self.name
error(filename, linenum, 'whitespace/indent', 3,
'Closing brace should be aligned with beginning of %s' % parent)
class _NamespaceInfo(_BlockInfo):
def __init__(self, name, linenum):
_BlockInfo.__init__(self, False)
self.name = name or ''
self.starting_linenum = linenum
self.check_namespace_indentation = True
def CheckEnd(self, filename, clean_lines, linenum, error):
line = clean_lines.raw_lines[linenum]
# Check how many lines is enclosed in this namespace. Don't issue
# warning for missing namespace comments if there aren't enough
# lines. However, do apply checks if there is already an end of
# namespace comment and it's incorrect.
#
# TODO(unknown): We always want to check end of namespace comments
# if a namespace is large, but sometimes we also want to apply the
# check if a short namespace contained nontrivial things (something
# other than forward declarations). There is currently no logic on
# deciding what these nontrivial things are, so this check is
# triggered by namespace size only, which works most of the time.
if (linenum - self.starting_linenum < 10
and not Match(r'};*\s*(//|/\*).*\bnamespace\b', line)):
return
# Look for matching comment at end of namespace.
#
# Note that we accept C style "/* */" comments for terminating
# namespaces, so that code that terminate namespaces inside
# preprocessor macros can be cpplint clean.
#
# We also accept stuff like "// end of namespace <name>." with the
# period at the end.
#
# Besides these, we don't accept anything else, otherwise we might
# get false negatives when existing comment is a substring of the
# expected namespace.
if self.name:
# Named namespace
if not Match((r'};*\s*(//|/\*).*\bnamespace\s+' + re.escape(self.name) +
r'[\*/\.\\\s]*$'),
line):
error(filename, linenum, 'readability/namespace', 5,
'Namespace should be terminated with "// namespace %s"' %
self.name)
else:
# Anonymous namespace
if not Match(r'};*\s*(//|/\*).*\bnamespace[\*/\.\\\s]*$', line):
# If "// namespace anonymous" or "// anonymous namespace (more text)",
# mention "// anonymous namespace" as an acceptable form
if Match(r'}.*\b(namespace anonymous|anonymous namespace)\b', line):
error(filename, linenum, 'readability/namespace', 5,
'Anonymous namespace should be terminated with "// namespace"'
' or "// anonymous namespace"')
else:
error(filename, linenum, 'readability/namespace', 5,
'Anonymous namespace should be terminated with "// namespace"')
class _PreprocessorInfo(object):
def __init__(self, stack_before_if):
# The entire nesting stack before #if
self.stack_before_if = stack_before_if
# The entire nesting stack up to #else
self.stack_before_else = []
# Whether we have already seen #else or #elif
self.seen_else = False
class NestingState(object):
def __init__(self):
# Stack for tracking all braces. An object is pushed whenever we
# see a "{", and popped when we see a "}". Only 3 types of
# objects are possible:
# - _ClassInfo: a class or struct.
# - _NamespaceInfo: a namespace.
# - _BlockInfo: some other type of block.
self.stack = []
# Top of the previous stack before each Update().
#
# Because the nesting_stack is updated at the end of each line, we
# had to do some convoluted checks to find out what is the current
# scope at the beginning of the line. This check is simplified by
# saving the previous top of nesting stack.
#
# We could save the full stack, but we only need the top. Copying
# the full nesting stack would slow down cpplint by ~10%.
self.previous_stack_top = []
# Stack of _PreprocessorInfo objects.
self.pp_stack = []
def SeenOpenBrace(self):
return (not self.stack) or self.stack[-1].seen_open_brace
def InNamespaceBody(self):
return self.stack and isinstance(self.stack[-1], _NamespaceInfo)
def InExternC(self):
return self.stack and isinstance(self.stack[-1], _ExternCInfo)
def InClassDeclaration(self):
return self.stack and isinstance(self.stack[-1], _ClassInfo)
def InAsmBlock(self):
return self.stack and self.stack[-1].inline_asm != _NO_ASM
def InTemplateArgumentList(self, clean_lines, linenum, pos):
while linenum < clean_lines.NumLines():
# Find the earliest character that might indicate a template argument
line = clean_lines.elided[linenum]
match = Match(r'^[^{};=\[\]\.<>]*(.)', line[pos:])
if not match:
linenum += 1
pos = 0
continue
token = match.group(1)
pos += len(match.group(0))
# These things do not look like template argument list:
# class Suspect {
# class Suspect x; }
if token in ('{', '}', ';'): return False
# These things look like template argument list:
# template <class Suspect>
# template <class Suspect = default_value>
# template <class Suspect[]>
# template <class Suspect...>
if token in ('>', '=', '[', ']', '.'): return True
# Check if token is an unmatched '<'.
# If not, move on to the next character.
if token != '<':
pos += 1
if pos >= len(line):
linenum += 1
pos = 0
continue
# We can't be sure if we just find a single '<', and need to
# find the matching '>'.
(_, end_line, end_pos) = CloseExpression(clean_lines, linenum, pos - 1)
if end_pos < 0:
# Not sure if template argument list or syntax error in file
return False
linenum = end_line
pos = end_pos
return False
def UpdatePreprocessor(self, line):
if Match(r'^\s*#\s*(if|ifdef|ifndef)\b', line):
# Beginning of #if block, save the nesting stack here. The saved
# stack will allow us to restore the parsing state in the #else case.
self.pp_stack.append(_PreprocessorInfo(copy.deepcopy(self.stack)))
elif Match(r'^\s*#\s*(else|elif)\b', line):
# Beginning of #else block
if self.pp_stack:
if not self.pp_stack[-1].seen_else:
# This is the first #else or #elif block. Remember the
# whole nesting stack up to this point. This is what we
# keep after the #endif.
self.pp_stack[-1].seen_else = True
self.pp_stack[-1].stack_before_else = copy.deepcopy(self.stack)
# Restore the stack to how it was before the #if
self.stack = copy.deepcopy(self.pp_stack[-1].stack_before_if)
else:
# TODO(unknown): unexpected #else, issue warning?
pass
elif Match(r'^\s*#\s*endif\b', line):
# End of #if or #else blocks.
if self.pp_stack:
# If we saw an #else, we will need to restore the nesting
# stack to its former state before the #else, otherwise we
# will just continue from where we left off.
if self.pp_stack[-1].seen_else:
# Here we can just use a shallow copy since we are the last
# reference to it.
self.stack = self.pp_stack[-1].stack_before_else
# Drop the corresponding #if
self.pp_stack.pop()
else:
# TODO(unknown): unexpected #endif, issue warning?
pass
# TODO(unknown): Update() is too long, but we will refactor later.
def Update(self, filename, clean_lines, linenum, error):
line = clean_lines.elided[linenum]
# Remember top of the previous nesting stack.
#
# The stack is always pushed/popped and not modified in place, so
# we can just do a shallow copy instead of copy.deepcopy. Using
# deepcopy would slow down cpplint by ~28%.
if self.stack:
self.previous_stack_top = self.stack[-1]
else:
self.previous_stack_top = None
# Update pp_stack
self.UpdatePreprocessor(line)
# Count parentheses. This is to avoid adding struct arguments to
# the nesting stack.
if self.stack:
inner_block = self.stack[-1]
depth_change = line.count('(') - line.count(')')
inner_block.open_parentheses += depth_change
# Also check if we are starting or ending an inline assembly block.
if inner_block.inline_asm in (_NO_ASM, _END_ASM):
if (depth_change != 0 and
inner_block.open_parentheses == 1 and
_MATCH_ASM.match(line)):
# Enter assembly block
inner_block.inline_asm = _INSIDE_ASM
else:
# Not entering assembly block. If previous line was _END_ASM,
# we will now shift to _NO_ASM state.
inner_block.inline_asm = _NO_ASM
elif (inner_block.inline_asm == _INSIDE_ASM and
inner_block.open_parentheses == 0):
# Exit assembly block
inner_block.inline_asm = _END_ASM
# Consume namespace declaration at the beginning of the line. Do
# this in a loop so that we catch same line declarations like this:
# namespace proto2 { namespace bridge { class MessageSet; } }
while True:
# Match start of namespace. The "\b\s*" below catches namespace
# declarations even if it weren't followed by a whitespace, this
# is so that we don't confuse our namespace checker. The
# missing spaces will be flagged by CheckSpacing.
namespace_decl_match = Match(r'^\s*namespace\b\s*([:\w]+)?(.*)$', line)
if not namespace_decl_match:
break
new_namespace = _NamespaceInfo(namespace_decl_match.group(1), linenum)
self.stack.append(new_namespace)
line = namespace_decl_match.group(2)
if line.find('{') != -1:
new_namespace.seen_open_brace = True
line = line[line.find('{') + 1:]
# Look for a class declaration in whatever is left of the line
# after parsing namespaces. The regexp accounts for decorated classes
# such as in:
# class LOCKABLE API Object {
# };
class_decl_match = Match(
r'^(\s*(?:template\s*<[\w\s<>,:]*>\s*)?'
r'(class|struct)\s+(?:[A-Z_]+\s+)*(\w+(?:::\w+)*))'
r'(.*)$', line)
if (class_decl_match and
(not self.stack or self.stack[-1].open_parentheses == 0)):
# We do not want to accept classes that are actually template arguments:
# template <class Ignore1,
# class Ignore2 = Default<Args>,
# template <Args> class Ignore3>
# void Function() {};
#
# To avoid template argument cases, we scan forward and look for
# an unmatched '>'. If we see one, assume we are inside a
# template argument list.
end_declaration = len(class_decl_match.group(1))
if not self.InTemplateArgumentList(clean_lines, linenum, end_declaration):
self.stack.append(_ClassInfo(
class_decl_match.group(3), class_decl_match.group(2),
clean_lines, linenum))
line = class_decl_match.group(4)
# If we have not yet seen the opening brace for the innermost block,
# run checks here.
if not self.SeenOpenBrace():
self.stack[-1].CheckBegin(filename, clean_lines, linenum, error)
# Update access control if we are inside a class/struct
if self.stack and isinstance(self.stack[-1], _ClassInfo):
classinfo = self.stack[-1]
access_match = Match(
r'^(.*)\b(public|private|protected|signals)(\s+(?:slots\s*)?)?'
r':(?:[^:]|$)',
line)
if access_match:
classinfo.access = access_match.group(2)
# Check that access keywords are indented +1 space. Skip this
# check if the keywords are not preceded by whitespaces.
# removed by weizhenwei, 2015.06.29;
# indent = access_match.group(1)
# if (len(indent) != classinfo.class_indent + 1 and
# Match(r'^\s*$', indent)):
# if classinfo.is_struct:
# parent = 'struct ' + classinfo.name
# else:
# parent = 'class ' + classinfo.name
# slots = ''
# if access_match.group(3):
# slots = access_match.group(3)
# error(filename, linenum, 'whitespace/indent', 3,
# '%s%s: should be indented +1 space inside %s' % (
# access_match.group(2), slots, parent))
# Consume braces or semicolons from what's left of the line
while True:
# Match first brace, semicolon, or closed parenthesis.
matched = Match(r'^[^{;)}]*([{;)}])(.*)$', line)
if not matched:
break
token = matched.group(1)
if token == '{':
# If namespace or class hasn't seen a opening brace yet, mark
# namespace/class head as complete. Push a new block onto the
# stack otherwise.
if not self.SeenOpenBrace():
self.stack[-1].seen_open_brace = True
elif Match(r'^extern\s*"[^"]*"\s*\{', line):
self.stack.append(_ExternCInfo())
else:
self.stack.append(_BlockInfo(True))
if _MATCH_ASM.match(line):
self.stack[-1].inline_asm = _BLOCK_ASM
elif token == ';' or token == ')':
# If we haven't seen an opening brace yet, but we already saw
# already saw a closing parenthesis, then these are probably
# function arguments with extra "class" or "struct" keywords.
# Also pop these stack for these.
if not self.SeenOpenBrace():
self.stack.pop()
else: # token == '}'
# Perform end of block checks and pop the stack.
if self.stack:
self.stack[-1].CheckEnd(filename, clean_lines, linenum, error)
self.stack.pop()
line = matched.group(2)
def InnermostClass(self):
for i in range(len(self.stack), 0, -1):
classinfo = self.stack[i - 1]
if isinstance(classinfo, _ClassInfo):
return classinfo
return None
def CheckCompletedBlocks(self, filename, error):
# Note: This test can result in false positives if #ifdef constructs
# get in the way of brace matching. See the testBuildClass test in
# cpplint_unittest.py for an example of this.
for obj in self.stack:
if isinstance(obj, _ClassInfo):
error(filename, obj.starting_linenum, 'build/class', 5,
'Failed to find complete declaration of class %s' %
obj.name)
elif isinstance(obj, _NamespaceInfo):
error(filename, obj.starting_linenum, 'build/namespaces', 5,
'Failed to find complete declaration of namespace %s' %
obj.name)
def CheckForNonStandardConstructs(filename, clean_lines, linenum,
nesting_state, error):
# Remove comments from the line, but leave in strings for now.
line = clean_lines.lines[linenum]
if Search(r'printf\s*\(.*".*%[-+ ]?\d*q', line):
error(filename, linenum, 'runtime/printf_format', 3,
'%q in format strings is deprecated. Use %ll instead.')
if Search(r'printf\s*\(.*".*%\d+\$', line):
error(filename, linenum, 'runtime/printf_format', 2,
'%N$ formats are unconventional. Try rewriting to avoid them.')
# Remove escaped backslashes before looking for undefined escapes.
line = line.replace('\\\\', '')
if Search(r'("|\').*\\(%|\[|\(|{)', line):
error(filename, linenum, 'build/printf_format', 3,
'%, [, (, and { are undefined character escapes. Unescape them.')
# For the rest, work with both comments and strings removed.
line = clean_lines.elided[linenum]
if Search(r'\b(const|volatile|void|char|short|int|long'
r'|float|double|signed|unsigned'
r'|schar|u?int8|u?int16|u?int32|u?int64)'
r'\s+(register|static|extern|typedef)\b',
line):
error(filename, linenum, 'build/storage_class', 5,
'Storage class (static, extern, typedef, etc) should be first.')
if Match(r'\s*#\s*endif\s*[^/\s]+', line):
error(filename, linenum, 'build/endif_comment', 5,
'Uncommented text after #endif is non-standard. Use a comment.')
if Match(r'\s*class\s+(\w+\s*::\s*)+\w+\s*;', line):
error(filename, linenum, 'build/forward_decl', 5,
'Inner-style forward declarations are invalid. Remove this line.')
if Search(r'(\w+|[+-]?\d+(\.\d*)?)\s*(<|>)\?=?\s*(\w+|[+-]?\d+)(\.\d*)?',
line):
error(filename, linenum, 'build/deprecated', 3,
'>? and <? (max and min) operators are non-standard and deprecated.')
if Search(r'^\s*const\s*string\s*&\s*\w+\s*;', line):
# TODO(unknown): Could it be expanded safely to arbitrary references,
# without triggering too many false positives? The first
# attempt triggered 5 warnings for mostly benign code in the regtest, hence
# the restriction.
# Here's the original regexp, for the reference:
# type_name = r'\w+((\s*::\s*\w+)|(\s*<\s*\w+?\s*>))?'
# r'\s*const\s*' + type_name + '\s*&\s*\w+\s*;'
error(filename, linenum, 'runtime/member_string_references', 2,
'const string& members are dangerous. It is much better to use '
'alternatives, such as pointers or simple constants.')
# Everything else in this function operates on class declarations.
# Return early if the top of the nesting stack is not a class, or if
# the class head is not completed yet.
classinfo = nesting_state.InnermostClass()
if not classinfo or not classinfo.seen_open_brace:
return
# The class may have been declared with namespace or classname qualifiers.
# The constructor and destructor will not have those qualifiers.
base_classname = classinfo.name.split('::')[-1]
# Look for single-argument constructors that aren't marked explicit.
# Technically a valid construct, but against style. Also look for
# non-single-argument constructors which are also technically valid, but
# strongly suggest something is wrong.
explicit_constructor_match = Match(
r'\s+(?:inline\s+)?(explicit\s+)?(?:inline\s+)?%s\s*'
r'\(((?:[^()]|\([^()]*\))*)\)'
% re.escape(base_classname),
line)
if explicit_constructor_match:
is_marked_explicit = explicit_constructor_match.group(1)
if not explicit_constructor_match.group(2):
constructor_args = []
else:
constructor_args = explicit_constructor_match.group(2).split(',')
# collapse arguments so that commas in template parameter lists and function
# argument parameter lists don't split arguments in two
i = 0
while i < len(constructor_args):
constructor_arg = constructor_args[i]
while (constructor_arg.count('<') > constructor_arg.count('>') or
constructor_arg.count('(') > constructor_arg.count(')')):
constructor_arg += ',' + constructor_args[i + 1]
del constructor_args[i + 1]
constructor_args[i] = constructor_arg
i += 1
defaulted_args = [arg for arg in constructor_args if '=' in arg]
noarg_constructor = (not constructor_args or # empty arg list
# 'void' arg specifier
(len(constructor_args) == 1 and
constructor_args[0].strip() == 'void'))
onearg_constructor = ((len(constructor_args) == 1 and # exactly one arg
not noarg_constructor) or
# all but at most one arg defaulted
(len(constructor_args) >= 1 and
not noarg_constructor and
len(defaulted_args) >= len(constructor_args) - 1))
initializer_list_constructor = bool(
onearg_constructor and
Search(r'\bstd\s*::\s*initializer_list\b', constructor_args[0]))
copy_constructor = bool(
onearg_constructor and
Match(r'(const\s+)?%s(\s*<[^>]*>)?(\s+const)?\s*(?:<\w+>\s*)?&'
% re.escape(base_classname), constructor_args[0].strip()))
if (not is_marked_explicit and
onearg_constructor and
not initializer_list_constructor and
not copy_constructor):
if defaulted_args:
error(filename, linenum, 'runtime/explicit', 5,
'Constructors callable with one argument '
'should be marked explicit.')
else:
error(filename, linenum, 'runtime/explicit', 5,
'Single-parameter constructors should be marked explicit.')
elif is_marked_explicit and not onearg_constructor:
if noarg_constructor:
error(filename, linenum, 'runtime/explicit', 5,
'Zero-parameter constructors should not be marked explicit.')
else:
error(filename, linenum, 'runtime/explicit', 0,
'Constructors that require multiple arguments '
'should not be marked explicit.')
def CheckSpacingForFunctionCall(filename, clean_lines, linenum, error):
line = clean_lines.elided[linenum]
# Since function calls often occur inside if/for/while/switch
# expressions - which have their own, more liberal conventions - we
# first see if we should be looking inside such an expression for a
# function call, to which we can apply more strict standards.
fncall = line # if there's no control flow construct, look at whole line
for pattern in (r'\bif\s*\((.*)\)\s*{',
r'\bfor\s*\((.*)\)\s*{',
r'\bwhile\s*\((.*)\)\s*[{;]',
r'\bswitch\s*\((.*)\)\s*{'):
match = Search(pattern, line)
if match:
fncall = match.group(1) # look inside the parens for function calls
break
# Except in if/for/while/switch, there should never be space
# immediately inside parens (eg "f( 3, 4 )"). We make an exception
# for nested parens ( (a+b) + c ). Likewise, there should never be
# a space before a ( when it's a function argument. I assume it's a
# function argument when the char before the whitespace is legal in
# a function name (alnum + _) and we're not starting a macro. Also ignore
# pointers and references to arrays and functions coz they're too tricky:
# we use a very simple way to recognize these:
# " (something)(maybe-something)" or
# " (something)(maybe-something," or
# " (something)[something]"
# Note that we assume the contents of [] to be short enough that
# they'll never need to wrap.
if ( # Ignore control structures.
not Search(r'\b(if|for|while|switch|return|new|delete|catch|sizeof)\b',
fncall) and
# Ignore pointers/references to functions.
not Search(r' \([^)]+\)\([^)]*(\)|,$)', fncall) and
# Ignore pointers/references to arrays.
not Search(r' \([^)]+\)\[[^\]]+\]', fncall)):
if Search(r'\w\s*\(\s(?!\s*\\$)', fncall): # a ( used for a fn call
error(filename, linenum, 'whitespace/parens', 4,
'Extra space after ( in function call')
elif Search(r'\(\s+(?!(\s*\\)|\()', fncall):
error(filename, linenum, 'whitespace/parens', 2,
'Extra space after (')
if (Search(r'\w\s+\(', fncall) and
not Search(r'#\s*define|typedef|using\s+\w+\s*=', fncall) and
not Search(r'\w\s+\((\w+::)*\*\w+\)\(', fncall)):
# TODO(unknown): Space after an operator function seem to be a common
# error, silence those for now by restricting them to highest verbosity.
if Search(r'\boperator_*\b', line):
error(filename, linenum, 'whitespace/parens', 0,
'Extra space before ( in function call')
else:
error(filename, linenum, 'whitespace/parens', 4,
'Extra space before ( in function call')
# If the ) is followed only by a newline or a { + newline, assume it's
# part of a control statement (if/while/etc), and don't complain
if Search(r'[^)]\s+\)\s*[^{\s]', fncall):
# If the closing parenthesis is preceded by only whitespaces,
# try to give a more descriptive error message.
if Search(r'^\s+\)', fncall):
error(filename, linenum, 'whitespace/parens', 2,
'Closing ) should be moved to the previous line')
else:
error(filename, linenum, 'whitespace/parens', 2,
'Extra space before )')
def IsBlankLine(line):
return not line or line.isspace()
def CheckForNamespaceIndentation(filename, nesting_state, clean_lines, line,
error):
is_namespace_indent_item = (
len(nesting_state.stack) > 1 and
nesting_state.stack[-1].check_namespace_indentation and
isinstance(nesting_state.previous_stack_top, _NamespaceInfo) and
nesting_state.previous_stack_top == nesting_state.stack[-2])
if ShouldCheckNamespaceIndentation(nesting_state, is_namespace_indent_item,
clean_lines.elided, line):
CheckItemIndentationInNamespace(filename, clean_lines.elided,
line, error)
def CheckForFunctionLengths(filename, clean_lines, linenum,
function_state, error):
lines = clean_lines.lines
line = lines[linenum]
joined_line = ''
starting_func = False
regexp = r'(\w(\w|::|\*|\&|\s)*)\(' # decls * & space::name( ...
match_result = Match(regexp, line)
if match_result:
# If the name is all caps and underscores, figure it's a macro and
# ignore it, unless it's TEST or TEST_F.
function_name = match_result.group(1).split()[-1]
if function_name == 'TEST' or function_name == 'TEST_F' or (
not Match(r'[A-Z_]+$', function_name)):
starting_func = True
if starting_func:
body_found = False
for start_linenum in xrange(linenum, clean_lines.NumLines()):
start_line = lines[start_linenum]
joined_line += ' ' + start_line.lstrip()
if Search(r'(;|})', start_line): # Declarations and trivial functions
body_found = True
break # ... ignore
elif Search(r'{', start_line):
body_found = True
function = Search(r'((\w|:)*)\(', line).group(1)
if Match(r'TEST', function): # Handle TEST... macros
parameter_regexp = Search(r'(\(.*\))', joined_line)
if parameter_regexp: # Ignore bad syntax
function += parameter_regexp.group(1)
else:
function += '()'
function_state.Begin(function)
break
if not body_found:
# No body for the function (or evidence of a non-function) was found.
error(filename, linenum, 'readability/fn_size', 5,
'Lint failed to find start of function body.')
elif Match(r'^\}\s*$', line): # function end
function_state.Check(error, filename, linenum)
function_state.End()
elif not Match(r'^\s*$', line):
function_state.Count() # Count non-blank/non-comment lines.
_RE_PATTERN_TODO = re.compile(r'^//(\s*)TODO(\(.+?\))?:?(\s|$)?')
def CheckComment(line, filename, linenum, next_line_start, error):
commentpos = line.find('//')
if commentpos != -1:
# Check if the // may be in quotes. If so, ignore it
# Comparisons made explicit for clarity -- pylint: disable=g-explicit-bool-comparison
if (line.count('"', 0, commentpos) -
line.count('\\"', 0, commentpos)) % 2 == 0: # not in quotes
# Allow one space for new scopes, two spaces otherwise:
if (not (Match(r'^.*{ *//', line) and next_line_start == commentpos) and
((commentpos >= 1 and
line[commentpos-1] not in string.whitespace) or
(commentpos >= 2 and
line[commentpos-2] not in string.whitespace))):
error(filename, linenum, 'whitespace/comments', 2,
'At least two spaces is best between code and comments')
# Checks for common mistakes in TODO comments.
comment = line[commentpos:]
match = _RE_PATTERN_TODO.match(comment)
if match:
# One whitespace is correct; zero whitespace is handled elsewhere.
leading_whitespace = match.group(1)
if len(leading_whitespace) > 1:
error(filename, linenum, 'whitespace/todo', 2,
'Too many spaces before TODO')
username = match.group(2)
if not username:
error(filename, linenum, 'readability/todo', 2,
'Missing username in TODO; it should look like '
'"// TODO(my_username): Stuff."')
middle_whitespace = match.group(3)
# Comparisons made explicit for correctness -- pylint: disable=g-explicit-bool-comparison
if middle_whitespace != ' ' and middle_whitespace != '':
error(filename, linenum, 'whitespace/todo', 2,
'TODO(my_username) should be followed by a space')
# If the comment contains an alphanumeric character, there
# should be a space somewhere between it and the //.
if Match(r'//[^ ]*\w', comment):
error(filename, linenum, 'whitespace/comments', 4,
'Should have a space between // and comment')
def CheckAccess(filename, clean_lines, linenum, nesting_state, error):
line = clean_lines.elided[linenum] # get rid of comments and strings
matched = Match((r'\s*(DISALLOW_COPY_AND_ASSIGN|'
r'DISALLOW_IMPLICIT_CONSTRUCTORS)'), line)
if not matched:
return
if nesting_state.stack and isinstance(nesting_state.stack[-1], _ClassInfo):
if nesting_state.stack[-1].access != 'private':
error(filename, linenum, 'readability/constructors', 3,
'%s must be in the private: section' % matched.group(1))
else:
# Found DISALLOW* macro outside a class declaration, or perhaps it
# was used inside a function when it should have been part of the
# class declaration. We could issue a warning here, but it
# probably resulted in a compiler error already.
pass
def CheckSpacing(filename, clean_lines, linenum, nesting_state, error):
# Don't use "elided" lines here, otherwise we can't check commented lines.
# Don't want to use "raw" either, because we don't want to check inside C++11
# raw strings,
raw = clean_lines.lines_without_raw_strings
line = raw[linenum]
# Before nixing comments, check if the line is blank for no good
# reason. This includes the first line after a block is opened, and
# blank lines at the end of a function (ie, right before a line like '}'
#
# Skip all the blank line checks if we are immediately inside a
# namespace body. In other words, don't issue blank line warnings
# for this block:
# namespace {
#
# }
#
# A warning about missing end of namespace comments will be issued instead.
#
# Also skip blank line checks for 'extern "C"' blocks, which are formatted
# like namespaces.
if (IsBlankLine(line) and
not nesting_state.InNamespaceBody() and
not nesting_state.InExternC()):
elided = clean_lines.elided
prev_line = elided[linenum - 1]
prevbrace = prev_line.rfind('{')
# TODO(unknown): Don't complain if line before blank line, and line after,
# both start with alnums and are indented the same amount.
# This ignores whitespace at the start of a namespace block
# because those are not usually indented.
if prevbrace != -1 and prev_line[prevbrace:].find('}') == -1:
# OK, we have a blank line at the start of a code block. Before we
# complain, we check if it is an exception to the rule: The previous
# non-empty line has the parameters of a function header that are indented
# 4 spaces (because they did not fit in a 80 column line when placed on
# the same line as the function name). We also check for the case where
# the previous line is indented 6 spaces, which may happen when the
# initializers of a constructor do not fit into a 80 column line.
exception = False
if Match(r' {6}\w', prev_line): # Initializer list?
# We are looking for the opening column of initializer list, which
# should be indented 4 spaces to cause 6 space indentation afterwards.
search_position = linenum-2
while (search_position >= 0
and Match(r' {6}\w', elided[search_position])):
search_position -= 1
exception = (search_position >= 0
and elided[search_position][:5] == ' :')
else:
# Search for the function arguments or an initializer list. We use a
# simple heuristic here: If the line is indented 4 spaces; and we have a
# closing paren, without the opening paren, followed by an opening brace
# or colon (for initializer lists) we assume that it is the last line of
# a function header. If we have a colon indented 4 spaces, it is an
# initializer list.
exception = (Match(r' {4}\w[^\(]*\)\s*(const\s*)?(\{\s*$|:)',
prev_line)
or Match(r' {4}:', prev_line))
if not exception:
error(filename, linenum, 'whitespace/blank_line', 2,
'Redundant blank line at the start of a code block '
'should be deleted.')
# Ignore blank lines at the end of a block in a long if-else
# chain, like this:
# if (condition1) {
# // Something followed by a blank line
#
# } else if (condition2) {
# // Something else
# }
if linenum + 1 < clean_lines.NumLines():
next_line = raw[linenum + 1]
if (next_line
and Match(r'\s*}', next_line)
and next_line.find('} else ') == -1):
error(filename, linenum, 'whitespace/blank_line', 3,
'Redundant blank line at the end of a code block '
'should be deleted.')
matched = Match(r'\s*(public|protected|private):', prev_line)
if matched:
error(filename, linenum, 'whitespace/blank_line', 3,
'Do not leave a blank line after "%s:"' % matched.group(1))
# Next, check comments
next_line_start = 0
if linenum + 1 < clean_lines.NumLines():
next_line = raw[linenum + 1]
next_line_start = len(next_line) - len(next_line.lstrip())
CheckComment(line, filename, linenum, next_line_start, error)
# get rid of comments and strings
line = clean_lines.elided[linenum]
# You shouldn't have spaces before your brackets, except maybe after
# 'delete []' or 'return []() {};'
if Search(r'\w\s+\[', line) and not Search(r'(?:delete|return)\s+\[', line):
error(filename, linenum, 'whitespace/braces', 5,
'Extra space before [')
# In range-based for, we wanted spaces before and after the colon, but
# not around "::" tokens that might appear.
if (Search(r'for *\(.*[^:]:[^: ]', line) or
Search(r'for *\(.*[^: ]:[^:]', line)):
error(filename, linenum, 'whitespace/forcolon', 2,
'Missing space around colon in range-based for loop')
def CheckOperatorSpacing(filename, clean_lines, linenum, error):
line = clean_lines.elided[linenum]
# Don't try to do spacing checks for operator methods. Do this by
# replacing the troublesome characters with something else,
# preserving column position for all other characters.
#
# The replacement is done repeatedly to avoid false positives from
# operators that call operators.
while True:
match = Match(r'^(.*\boperator\b)(\S+)(\s*\(.*)$', line)
if match:
line = match.group(1) + ('_' * len(match.group(2))) + match.group(3)
else:
break
# We allow no-spaces around = within an if: "if ( (a=Foo()) == 0 )".
# Otherwise not. Note we only check for non-spaces on *both* sides;
# sometimes people put non-spaces on one side when aligning ='s among
# many lines (not that this is behavior that I approve of...)
if Search(r'[\w.]=[\w.]', line) and not Search(r'\b(if|while) ', line):
error(filename, linenum, 'whitespace/operators', 4,
'Missing spaces around =')
# It's ok not to have spaces around binary operators like + - * /, but if
# there's too little whitespace, we get concerned. It's hard to tell,
# though, so we punt on this one for now. TODO.
# You should always have whitespace around binary operators.
#
# Check <= and >= first to avoid false positives with < and >, then
# check non-include lines for spacing around < and >.
#
# If the operator is followed by a comma, assume it's be used in a
# macro context and don't do any checks. This avoids false
# positives.
#
# Note that && is not included here. Those are checked separately
# in CheckRValueReference
match = Search(r'[^<>=!\s](==|!=|<=|>=|\|\|)[^<>=!\s,;\)]', line)
if match:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around %s' % match.group(1))
elif not Match(r'#.*include', line):
# Look for < that is not surrounded by spaces. This is only
# triggered if both sides are missing spaces, even though
# technically should should flag if at least one side is missing a
# space. This is done to avoid some false positives with shifts.
match = Match(r'^(.*[^\s<])<[^\s=<,]', line)
if match:
(_, _, end_pos) = CloseExpression(
clean_lines, linenum, len(match.group(1)))
if end_pos <= -1:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around <')
# Look for > that is not surrounded by spaces. Similar to the
# above, we only trigger if both sides are missing spaces to avoid
# false positives with shifts.
match = Match(r'^(.*[^-\s>])>[^\s=>,]', line)
if match:
(_, _, start_pos) = ReverseCloseExpression(
clean_lines, linenum, len(match.group(1)))
if start_pos <= -1:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around >')
# We allow no-spaces around << when used like this: 10<<20, but
# not otherwise (particularly, not when used as streams)
#
# We also allow operators following an opening parenthesis, since
# those tend to be macros that deal with operators.
match = Search(r'(operator|\S)(?:L|UL|ULL|l|ul|ull)?<<([^\s,=])', line)
if (match and match.group(1) != '(' and
not (match.group(1).isdigit() and match.group(2).isdigit()) and
not (match.group(1) == 'operator' and match.group(2) == ';')):
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around <<')
# We allow no-spaces around >> for almost anything. This is because
# C++11 allows ">>" to close nested templates, which accounts for
# most cases when ">>" is not followed by a space.
#
# We still warn on ">>" followed by alpha character, because that is
# likely due to ">>" being used for right shifts, e.g.:
# value >> alpha
#
# When ">>" is used to close templates, the alphanumeric letter that
# follows would be part of an identifier, and there should still be
# a space separating the template type and the identifier.
# type<type<type>> alpha
match = Search(r'>>[a-zA-Z_]', line)
if match:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around >>')
# There shouldn't be space around unary operators
match = Search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line)
if match:
error(filename, linenum, 'whitespace/operators', 4,
'Extra space for operator %s' % match.group(1))
def CheckParenthesisSpacing(filename, clean_lines, linenum, error):
line = clean_lines.elided[linenum]
# No spaces after an if, while, switch, or for
match = Search(r' (if\(|for\(|while\(|switch\()', line)
if match:
error(filename, linenum, 'whitespace/parens', 5,
'Missing space before ( in %s' % match.group(1))
# For if/for/while/switch, the left and right parens should be
# consistent about how many spaces are inside the parens, and
# there should either be zero or one spaces inside the parens.
# We don't want: "if ( foo)" or "if ( foo )".
# Exception: "for ( ; foo; bar)" and "for (foo; bar; )" are allowed.
match = Search(r'\b(if|for|while|switch)\s*'
r'\(([ ]*)(.).*[^ ]+([ ]*)\)\s*{\s*$',
line)
if match:
if len(match.group(2)) != len(match.group(4)):
if not (match.group(3) == ';' and
len(match.group(2)) == 1 + len(match.group(4)) or
not match.group(2) and Search(r'\bfor\s*\(.*; \)', line)):
error(filename, linenum, 'whitespace/parens', 5,
'Mismatching spaces inside () in %s' % match.group(1))
if len(match.group(2)) not in [0, 1]:
error(filename, linenum, 'whitespace/parens', 5,
'Should have zero or one spaces inside ( and ) in %s' %
match.group(1))
def CheckCommaSpacing(filename, clean_lines, linenum, error):
raw = clean_lines.lines_without_raw_strings
line = clean_lines.elided[linenum]
# You should always have a space after a comma (either as fn arg or operator)
#
# This does not apply when the non-space character following the
# comma is another comma, since the only time when that happens is
# for empty macro arguments.
#
# We run this check in two passes: first pass on elided lines to
# verify that lines contain missing whitespaces, second pass on raw
# lines to confirm that those missing whitespaces are not due to
# elided comments.
if (Search(r',[^,\s]', ReplaceAll(r'\boperator\s*,\s*\(', 'F(', line)) and
Search(r',[^,\s]', raw[linenum])):
error(filename, linenum, 'whitespace/comma', 3,
'Missing space after ,')
# You should always have a space after a semicolon
# except for few corner cases
# TODO(unknown): clarify if 'if (1) { return 1;}' is requires one more
# space after ;
if Search(r';[^\s};\\)/]', line):
error(filename, linenum, 'whitespace/semicolon', 3,
'Missing space after ;')
def CheckBracesSpacing(filename, clean_lines, linenum, error):
line = clean_lines.elided[linenum]
# Except after an opening paren, or after another opening brace (in case of
# an initializer list, for instance), you should have spaces before your
# braces. And since you should never have braces at the beginning of a line,
# this is an easy test.
match = Match(r'^(.*[^ ({]){', line)
if match:
# Try a bit harder to check for brace initialization. This
# happens in one of the following forms:
# Constructor() : initializer_list_{} { ... }
# Constructor{}.MemberFunction()
# Type variable{};
# FunctionCall(type{}, ...);
# LastArgument(..., type{});
# LOG(INFO) << type{} << " ...";
# map_of_type[{...}] = ...;
# ternary = expr ? new type{} : nullptr;
# OuterTemplate<InnerTemplateConstructor<Type>{}>
#
# We check for the character following the closing brace, and
# silence the warning if it's one of those listed above, i.e.
# "{.;,)<>]:".
#
# To account for nested initializer list, we allow any number of
# closing braces up to "{;,)<". We can't simply silence the
# warning on first sight of closing brace, because that would
# cause false negatives for things that are not initializer lists.
# Silence this: But not this:
# Outer{ if (...) {
# Inner{...} if (...){ // Missing space before {
# }; }
#
# There is a false negative with this approach if people inserted
# spurious semicolons, e.g. "if (cond){};", but we will catch the
# spurious semicolon with a separate check.
(endline, endlinenum, endpos) = CloseExpression(
clean_lines, linenum, len(match.group(1)))
trailing_text = ''
if endpos > -1:
trailing_text = endline[endpos:]
for offset in xrange(endlinenum + 1,
min(endlinenum + 3, clean_lines.NumLines() - 1)):
trailing_text += clean_lines.elided[offset]
if not Match(r'^[\s}]*[{.;,)<>\]:]', trailing_text):
error(filename, linenum, 'whitespace/braces', 5,
'Missing space before {')
# Make sure '} else {' has spaces.
if Search(r'}else', line):
error(filename, linenum, 'whitespace/braces', 5,
'Missing space before else')
# You shouldn't have a space before a semicolon at the end of the line.
# There's a special case for "for" since the style guide allows space before
# the semicolon there.
if Search(r':\s*;\s*$', line):
error(filename, linenum, 'whitespace/semicolon', 5,
'Semicolon defining empty statement. Use {} instead.')
elif Search(r'^\s*;\s*$', line):
error(filename, linenum, 'whitespace/semicolon', 5,
'Line contains only semicolon. If this should be an empty statement, '
'use {} instead.')
elif (Search(r'\s+;\s*$', line) and
not Search(r'\bfor\b', line)):
error(filename, linenum, 'whitespace/semicolon', 5,
'Extra space before last semicolon. If this should be an empty '
'statement, use {} instead.')
def IsDecltype(clean_lines, linenum, column):
(text, _, start_col) = ReverseCloseExpression(clean_lines, linenum, column)
if start_col < 0:
return False
if Search(r'\bdecltype\s*$', text[0:start_col]):
return True
return False
def IsTemplateParameterList(clean_lines, linenum, column):
(_, startline, startpos) = ReverseCloseExpression(
clean_lines, linenum, column)
if (startpos > -1 and
Search(r'\btemplate\s*$', clean_lines.elided[startline][0:startpos])):
return True
return False
def IsRValueType(clean_lines, nesting_state, linenum, column):
prefix = clean_lines.elided[linenum][0:column]
# Get one word to the left. If we failed to do so, this is most
# likely not a type, since it's unlikely that the type name and "&&"
# would be split across multiple lines.
match = Match(r'^(.*)(\b\w+|[>*)&])\s*$', prefix)
if not match:
return False
# Check text following the token. If it's "&&>" or "&&," or "&&...", it's
# most likely a rvalue reference used inside a template.
suffix = clean_lines.elided[linenum][column:]
if Match(r'&&\s*(?:[>,]|\.\.\.)', suffix):
return True
# Check for simple type and end of templates:
# int&& variable
# vector<int>&& variable
#
# Because this function is called recursively, we also need to
# recognize pointer and reference types:
# int* Function()
# int& Function()
if match.group(2) in ['char', 'char16_t', 'char32_t', 'wchar_t', 'bool',
'short', 'int', 'long', 'signed', 'unsigned',
'float', 'double', 'void', 'auto', '>', '*', '&']:
return True
# If we see a close parenthesis, look for decltype on the other side.
# decltype would unambiguously identify a type, anything else is
# probably a parenthesized expression and not a type.
if match.group(2) == ')':
return IsDecltype(
clean_lines, linenum, len(match.group(1)) + len(match.group(2)) - 1)
# Check for casts and cv-qualifiers.
# match.group(1) remainder
# -------------- ---------
# const_cast< type&&
# const type&&
# type const&&
if Search(r'\b(?:const_cast\s*<|static_cast\s*<|dynamic_cast\s*<|'
r'reinterpret_cast\s*<|\w+\s)\s*$',
match.group(1)):
return True
# Look for a preceding symbol that might help differentiate the context.
# These are the cases that would be ambiguous:
# match.group(1) remainder
# -------------- ---------
# Call ( expression &&
# Declaration ( type&&
# sizeof ( type&&
# if ( expression &&
# while ( expression &&
# for ( type&&
# for( ; expression &&
# statement ; type&&
# block { type&&
# constructor { expression &&
start = linenum
line = match.group(1)
match_symbol = None
while start >= 0:
# We want to skip over identifiers and commas to get to a symbol.
# Commas are skipped so that we can find the opening parenthesis
# for function parameter lists.
match_symbol = Match(r'^(.*)([^\w\s,])[\w\s,]*$', line)
if match_symbol:
break
start -= 1
line = clean_lines.elided[start]
if not match_symbol:
# Probably the first statement in the file is an rvalue reference
return True
if match_symbol.group(2) == '}':
# Found closing brace, probably an indicate of this:
# block{} type&&
return True
if match_symbol.group(2) == ';':
# Found semicolon, probably one of these:
# for(; expression &&
# statement; type&&
# Look for the previous 'for(' in the previous lines.
before_text = match_symbol.group(1)
for i in xrange(start - 1, max(start - 6, 0), -1):
before_text = clean_lines.elided[i] + before_text
if Search(r'for\s*\([^{};]*$', before_text):
# This is the condition inside a for-loop
return False
# Did not find a for-init-statement before this semicolon, so this
# is probably a new statement and not a condition.
return True
if match_symbol.group(2) == '{':
# Found opening brace, probably one of these:
# block{ type&& = ... ; }
# constructor{ expression && expression }
# Look for a closing brace or a semicolon. If we see a semicolon
# first, this is probably a rvalue reference.
line = clean_lines.elided[start][0:len(match_symbol.group(1)) + 1]
end = start
depth = 1
while True:
for ch in line:
if ch == ';':
return True
elif ch == '{':
depth += 1
elif ch == '}':
depth -= 1
if depth == 0:
return False
end += 1
if end >= clean_lines.NumLines():
break
line = clean_lines.elided[end]
# Incomplete program?
return False
if match_symbol.group(2) == '(':
# Opening parenthesis. Need to check what's to the left of the
# parenthesis. Look back one extra line for additional context.
before_text = match_symbol.group(1)
if linenum > 1:
before_text = clean_lines.elided[linenum - 1] + before_text
before_text = match_symbol.group(1)
# Patterns that are likely to be types:
# [](type&&
# for (type&&
# sizeof(type&&
# operator=(type&&
#
if Search(r'(?:\]|\bfor|\bsizeof|\boperator\s*\S+\s*)\s*$', before_text):
return True
# Patterns that are likely to be expressions:
# if (expression &&
# while (expression &&
# : initializer(expression &&
# , initializer(expression &&
# ( FunctionCall(expression &&
# + FunctionCall(expression &&
# + (expression &&
#
# The last '+' represents operators such as '+' and '-'.
if Search(r'(?:\bif|\bwhile|[-+=%^(<!?:,&*]\s*)$', before_text):
return False
# Something else. Check that tokens to the left look like
# return_type function_name
match_func = Match(r'^(.*)\s+\w(?:\w|::)*(?:<[^<>]*>)?\s*$',
match_symbol.group(1))
if match_func:
# Check for constructors, which don't have return types.
if Search(r'\b(?:explicit|inline)$', match_func.group(1)):
return True
implicit_constructor = Match(r'\s*(\w+)\((?:const\s+)?(\w+)', prefix)
if (implicit_constructor and
implicit_constructor.group(1) == implicit_constructor.group(2)):
return True
return IsRValueType(clean_lines, nesting_state, linenum,
len(match_func.group(1)))
# Nothing before the function name. If this is inside a block scope,
# this is probably a function call.
return not (nesting_state.previous_stack_top and
nesting_state.previous_stack_top.IsBlockInfo())
if match_symbol.group(2) == '>':
# Possibly a closing bracket, check that what's on the other side
# looks like the start of a template.
return IsTemplateParameterList(
clean_lines, start, len(match_symbol.group(1)))
# Some other symbol, usually something like "a=b&&c". This is most
# likely not a type.
return False
def IsDeletedOrDefault(clean_lines, linenum):
open_paren = clean_lines.elided[linenum].find('(')
if open_paren < 0:
return False
(close_line, _, close_paren) = CloseExpression(
clean_lines, linenum, open_paren)
if close_paren < 0:
return False
return Match(r'\s*=\s*(?:delete|default)\b', close_line[close_paren:])
def IsRValueAllowed(clean_lines, linenum):
# Allow region marked by PUSH/POP macros
for i in xrange(linenum, 0, -1):
line = clean_lines.elided[i]
if Match(r'GOOGLE_ALLOW_RVALUE_REFERENCES_(?:PUSH|POP)', line):
if not line.endswith('PUSH'):
return False
for j in xrange(linenum, clean_lines.NumLines(), 1):
line = clean_lines.elided[j]
if Match(r'GOOGLE_ALLOW_RVALUE_REFERENCES_(?:PUSH|POP)', line):
return line.endswith('POP')
# Allow operator=
line = clean_lines.elided[linenum]
if Search(r'\boperator\s*=\s*\(', line):
return IsDeletedOrDefault(clean_lines, linenum)
# Allow constructors
match = Match(r'\s*([\w<>]+)\s*::\s*([\w<>]+)\s*\(', line)
if match and match.group(1) == match.group(2):
return IsDeletedOrDefault(clean_lines, linenum)
if Search(r'\b(?:explicit|inline)\s+[\w<>]+\s*\(', line):
return IsDeletedOrDefault(clean_lines, linenum)
if Match(r'\s*[\w<>]+\s*\(', line):
previous_line = 'ReturnType'
if linenum > 0:
previous_line = clean_lines.elided[linenum - 1]
if Match(r'^\s*$', previous_line) or Search(r'[{}:;]\s*$', previous_line):
return IsDeletedOrDefault(clean_lines, linenum)
return False
def CheckRValueReference(filename, clean_lines, linenum, nesting_state, error):
# Find lines missing spaces around &&.
# TODO(unknown): currently we don't check for rvalue references
# with spaces surrounding the && to avoid false positives with
# boolean expressions.
line = clean_lines.elided[linenum]
match = Match(r'^(.*\S)&&', line)
if not match:
match = Match(r'(.*)&&\S', line)
if (not match) or '(&&)' in line or Search(r'\boperator\s*$', match.group(1)):
return
# Either poorly formed && or an rvalue reference, check the context
# to get a more accurate error message. Mostly we want to determine
# if what's to the left of "&&" is a type or not.
and_pos = len(match.group(1))
if IsRValueType(clean_lines, nesting_state, linenum, and_pos):
if not IsRValueAllowed(clean_lines, linenum):
error(filename, linenum, 'build/c++11', 3,
'RValue references are an unapproved C++ feature.')
else:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around &&')
def CheckSectionSpacing(filename, clean_lines, class_info, linenum, error):
# Skip checks if the class is small, where small means 25 lines or less.
# 25 lines seems like a good cutoff since that's the usual height of
# terminals, and any class that can't fit in one screen can't really
# be considered "small".
#
# Also skip checks if we are on the first line. This accounts for
# classes that look like
# class Foo { public: ... };
#
# If we didn't find the end of the class, last_line would be zero,
# and the check will be skipped by the first condition.
if (class_info.last_line - class_info.starting_linenum <= 24 or
linenum <= class_info.starting_linenum):
return
matched = Match(r'\s*(public|protected|private):', clean_lines.lines[linenum])
if matched:
# Issue warning if the line before public/protected/private was
# not a blank line, but don't do this if the previous line contains
# "class" or "struct". This can happen two ways:
# - We are at the beginning of the class.
# - We are forward-declaring an inner class that is semantically
# private, but needed to be public for implementation reasons.
# Also ignores cases where the previous line ends with a backslash as can be
# common when defining classes in C macros.
prev_line = clean_lines.lines[linenum - 1]
if (not IsBlankLine(prev_line) and
not Search(r'\b(class|struct)\b', prev_line) and
not Search(r'\\$', prev_line)):
# Try a bit harder to find the beginning of the class. This is to
# account for multi-line base-specifier lists, e.g.:
# class Derived
# : public Base {
end_class_head = class_info.starting_linenum
for i in range(class_info.starting_linenum, linenum):
if Search(r'\{\s*$', clean_lines.lines[i]):
end_class_head = i
break
if end_class_head < linenum - 1:
error(filename, linenum, 'whitespace/blank_line', 3,
'"%s:" should be preceded by a blank line' % matched.group(1))
def GetPreviousNonBlankLine(clean_lines, linenum):
prevlinenum = linenum - 1
while prevlinenum >= 0:
prevline = clean_lines.elided[prevlinenum]
if not IsBlankLine(prevline): # if not a blank line...
return (prevline, prevlinenum)
prevlinenum -= 1
return ('', -1)
def CheckBraces(filename, clean_lines, linenum, error):
line = clean_lines.elided[linenum] # get rid of comments and strings
if Match(r'\s*{\s*$', line):
# We allow an open brace to start a line in the case where someone is using
# braces in a block to explicitly create a new scope, which is commonly used
# to control the lifetime of stack-allocated variables. Braces are also
# used for brace initializers inside function calls. We don't detect this
# perfectly: we just don't complain if the last non-whitespace character on
# the previous non-blank line is ',', ';', ':', '(', '{', or '}', or if the
# previous line starts a preprocessor block.
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if (not Search(r'[,;:}{(]\s*$', prevline) and
not Match(r'\s*#', prevline)):
error(filename, linenum, 'whitespace/braces', 4,
'{ should almost always be at the end of the previous line')
# An else clause should be on the same line as the preceding closing brace.
if Match(r'\s*else\b\s*(?:if\b|\{|$)', line):
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if Match(r'\s*}\s*$', prevline):
error(filename, linenum, 'whitespace/newline', 4,
'An else should appear on the same line as the preceding }')
# If braces come on one side of an else, they should be on both.
# However, we have to worry about "else if" that spans multiple lines!
if Search(r'else if\s*\(', line): # could be multi-line if
brace_on_left = bool(Search(r'}\s*else if\s*\(', line))
# find the ( after the if
pos = line.find('else if')
pos = line.find('(', pos)
if pos > 0:
(endline, _, endpos) = CloseExpression(clean_lines, linenum, pos)
brace_on_right = endline[endpos:].find('{') != -1
if brace_on_left != brace_on_right: # must be brace after if
error(filename, linenum, 'readability/braces', 5,
'If an else has a brace on one side, it should have it on both')
elif Search(r'}\s*else[^{]*$', line) or Match(r'[^}]*else\s*{', line):
error(filename, linenum, 'readability/braces', 5,
'If an else has a brace on one side, it should have it on both')
# Likewise, an else should never have the else clause on the same line
if Search(r'\belse [^\s{]', line) and not Search(r'\belse if\b', line):
error(filename, linenum, 'whitespace/newline', 4,
'Else clause should never be on same line as else (use 2 lines)')
# In the same way, a do/while should never be on one line
if Match(r'\s*do [^\s{]', line):
error(filename, linenum, 'whitespace/newline', 4,
'do/while clauses should not be on a single line')
# Check single-line if/else bodies. The style guide says 'curly braces are not
# required for single-line statements'. We additionally allow multi-line,
# single statements, but we reject anything with more than one semicolon in
# it. This means that the first semicolon after the if should be at the end of
# its line, and the line after that should have an indent level equal to or
# lower than the if. We also check for ambiguous if/else nesting without
# braces.
if_else_match = Search(r'\b(if\s*\(|else\b)', line)
if if_else_match and not Match(r'\s*#', line):
if_indent = GetIndentLevel(line)
endline, endlinenum, endpos = line, linenum, if_else_match.end()
if_match = Search(r'\bif\s*\(', line)
if if_match:
# This could be a multiline if condition, so find the end first.
pos = if_match.end() - 1
(endline, endlinenum, endpos) = CloseExpression(clean_lines, linenum, pos)
# Check for an opening brace, either directly after the if or on the next
# line. If found, this isn't a single-statement conditional.
if (not Match(r'\s*{', endline[endpos:])
and not (Match(r'\s*$', endline[endpos:])
and endlinenum < (len(clean_lines.elided) - 1)
and Match(r'\s*{', clean_lines.elided[endlinenum + 1]))):
while (endlinenum < len(clean_lines.elided)
and ';' not in clean_lines.elided[endlinenum][endpos:]):
endlinenum += 1
endpos = 0
if endlinenum < len(clean_lines.elided):
endline = clean_lines.elided[endlinenum]
# We allow a mix of whitespace and closing braces (e.g. for one-liner
# methods) and a single \ after the semicolon (for macros)
endpos = endline.find(';')
if not Match(r';[\s}]*(\\?)$', endline[endpos:]):
# Semicolon isn't the last character, there's something trailing.
# Output a warning if the semicolon is not contained inside
# a lambda expression.
if not Match(r'^[^{};]*\[[^\[\]]*\][^{}]*\{[^{}]*\}\s*\)*[;,]\s*$',
endline):
error(filename, linenum, 'readability/braces', 4,
'If/else bodies with multiple statements require braces')
elif endlinenum < len(clean_lines.elided) - 1:
# Make sure the next line is dedented
next_line = clean_lines.elided[endlinenum + 1]
next_indent = GetIndentLevel(next_line)
# With ambiguous nested if statements, this will error out on the
# if that *doesn't* match the else, regardless of whether it's the
# inner one or outer one.
if (if_match and Match(r'\s*else\b', next_line)
and next_indent != if_indent):
error(filename, linenum, 'readability/braces', 4,
'Else clause should be indented at the same level as if. '
'Ambiguous nested if/else chains require braces.')
elif next_indent > if_indent:
error(filename, linenum, 'readability/braces', 4,
'If/else bodies with multiple statements require braces')
def CheckTrailingSemicolon(filename, clean_lines, linenum, error):
line = clean_lines.elided[linenum]
# Block bodies should not be followed by a semicolon. Due to C++11
# brace initialization, there are more places where semicolons are
# required than not, so we use a whitelist approach to check these
# rather than a blacklist. These are the places where "};" should
# be replaced by just "}":
# 1. Some flavor of block following closing parenthesis:
# for (;;) {};
# while (...) {};
# switch (...) {};
# Function(...) {};
# if (...) {};
# if (...) else if (...) {};
#
# 2. else block:
# if (...) else {};
#
# 3. const member function:
# Function(...) const {};
#
# 4. Block following some statement:
# x = 42;
# {};
#
# 5. Block at the beginning of a function:
# Function(...) {
# {};
# }
#
# Note that naively checking for the preceding "{" will also match
# braces inside multi-dimensional arrays, but this is fine since
# that expression will not contain semicolons.
#
# 6. Block following another block:
# while (true) {}
# {};
#
# 7. End of namespaces:
# namespace {};
#
# These semicolons seems far more common than other kinds of
# redundant semicolons, possibly due to people converting classes
# to namespaces. For now we do not warn for this case.
#
# Try matching case 1 first.
match = Match(r'^(.*\)\s*)\{', line)
if match:
# Matched closing parenthesis (case 1). Check the token before the
# matching opening parenthesis, and don't warn if it looks like a
# macro. This avoids these false positives:
# - macro that defines a base class
# - multi-line macro that defines a base class
# - macro that defines the whole class-head
#
# But we still issue warnings for macros that we know are safe to
# warn, specifically:
# - TEST, TEST_F, TEST_P, MATCHER, MATCHER_P
# - TYPED_TEST
# - INTERFACE_DEF
# - EXCLUSIVE_LOCKS_REQUIRED, SHARED_LOCKS_REQUIRED, LOCKS_EXCLUDED:
#
# We implement a whitelist of safe macros instead of a blacklist of
# unsafe macros, even though the latter appears less frequently in
# google code and would have been easier to implement. This is because
# the downside for getting the whitelist wrong means some extra
# semicolons, while the downside for getting the blacklist wrong
# would result in compile errors.
#
# In addition to macros, we also don't want to warn on compound
# literals and lambdas.
closing_brace_pos = match.group(1).rfind(')')
opening_parenthesis = ReverseCloseExpression(
clean_lines, linenum, closing_brace_pos)
if opening_parenthesis[2] > -1:
line_prefix = opening_parenthesis[0][0:opening_parenthesis[2]]
macro = Search(r'\b([A-Z_]+)\s*$', line_prefix)
func = Match(r'^(.*\])\s*$', line_prefix)
if ((macro and
macro.group(1) not in (
'TEST', 'TEST_F', 'MATCHER', 'MATCHER_P', 'TYPED_TEST',
'EXCLUSIVE_LOCKS_REQUIRED', 'SHARED_LOCKS_REQUIRED',
'LOCKS_EXCLUDED', 'INTERFACE_DEF')) or
(func and not Search(r'\boperator\s*\[\s*\]', func.group(1))) or
Search(r'\s+=\s*$', line_prefix)):
match = None
if (match and
opening_parenthesis[1] > 1 and
Search(r'\]\s*$', clean_lines.elided[opening_parenthesis[1] - 1])):
# Multi-line lambda-expression
match = None
else:
# Try matching cases 2-3.
match = Match(r'^(.*(?:else|\)\s*const)\s*)\{', line)
if not match:
# Try matching cases 4-6. These are always matched on separate lines.
#
# Note that we can't simply concatenate the previous line to the
# current line and do a single match, otherwise we may output
# duplicate warnings for the blank line case:
# if (cond) {
# // blank line
# }
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if prevline and Search(r'[;{}]\s*$', prevline):
match = Match(r'^(\s*)\{', line)
# Check matching closing brace
if match:
(endline, endlinenum, endpos) = CloseExpression(
clean_lines, linenum, len(match.group(1)))
if endpos > -1 and Match(r'^\s*;', endline[endpos:]):
# Current {} pair is eligible for semicolon check, and we have found
# the redundant semicolon, output warning here.
#
# Note: because we are scanning forward for opening braces, and
# outputting warnings for the matching closing brace, if there are
# nested blocks with trailing semicolons, we will get the error
# messages in reversed order.
error(filename, endlinenum, 'readability/braces', 4,
"You don't need a ; after a }")
def CheckEmptyBlockBody(filename, clean_lines, linenum, error):
# Search for loop keywords at the beginning of the line. Because only
# whitespaces are allowed before the keywords, this will also ignore most
# do-while-loops, since those lines should start with closing brace.
#
# We also check "if" blocks here, since an empty conditional block
# is likely an error.
line = clean_lines.elided[linenum]
matched = Match(r'\s*(for|while|if)\s*\(', line)
if matched:
# Find the end of the conditional expression
(end_line, end_linenum, end_pos) = CloseExpression(
clean_lines, linenum, line.find('('))
# Output warning if what follows the condition expression is a semicolon.
# No warning for all other cases, including whitespace or newline, since we
# have a separate check for semicolons preceded by whitespace.
if end_pos >= 0 and Match(r';', end_line[end_pos:]):
if matched.group(1) == 'if':
error(filename, end_linenum, 'whitespace/empty_conditional_body', 5,
'Empty conditional bodies should use {}')
else:
error(filename, end_linenum, 'whitespace/empty_loop_body', 5,
'Empty loop bodies should use {} or continue')
def FindCheckMacro(line):
for macro in _CHECK_MACROS:
i = line.find(macro)
if i >= 0:
# Find opening parenthesis. Do a regular expression match here
# to make sure that we are matching the expected CHECK macro, as
# opposed to some other macro that happens to contain the CHECK
# substring.
matched = Match(r'^(.*\b' + macro + r'\s*)\(', line)
if not matched:
continue
return (macro, len(matched.group(1)))
return (None, -1)
def CheckCheck(filename, clean_lines, linenum, error):
# Decide the set of replacement macros that should be suggested
lines = clean_lines.elided
(check_macro, start_pos) = FindCheckMacro(lines[linenum])
if not check_macro:
return
# Find end of the boolean expression by matching parentheses
(last_line, end_line, end_pos) = CloseExpression(
clean_lines, linenum, start_pos)
if end_pos < 0:
return
# If the check macro is followed by something other than a
# semicolon, assume users will log their own custom error messages
# and don't suggest any replacements.
if not Match(r'\s*;', last_line[end_pos:]):
return
if linenum == end_line:
expression = lines[linenum][start_pos + 1:end_pos - 1]
else:
expression = lines[linenum][start_pos + 1:]
for i in xrange(linenum + 1, end_line):
expression += lines[i]
expression += last_line[0:end_pos - 1]
# Parse expression so that we can take parentheses into account.
# This avoids false positives for inputs like "CHECK((a < 4) == b)",
# which is not replaceable by CHECK_LE.
lhs = ''
rhs = ''
operator = None
while expression:
matched = Match(r'^\s*(<<|<<=|>>|>>=|->\*|->|&&|\|\||'
r'==|!=|>=|>|<=|<|\()(.*)$', expression)
if matched:
token = matched.group(1)
if token == '(':
# Parenthesized operand
expression = matched.group(2)
(end, _) = FindEndOfExpressionInLine(expression, 0, ['('])
if end < 0:
return # Unmatched parenthesis
lhs += '(' + expression[0:end]
expression = expression[end:]
elif token in ('&&', '||'):
# Logical and/or operators. This means the expression
# contains more than one term, for example:
# CHECK(42 < a && a < b);
#
# These are not replaceable with CHECK_LE, so bail out early.
return
elif token in ('<<', '<<=', '>>', '>>=', '->*', '->'):
# Non-relational operator
lhs += token
expression = matched.group(2)
else:
# Relational operator
operator = token
rhs = matched.group(2)
break
else:
# Unparenthesized operand. Instead of appending to lhs one character
# at a time, we do another regular expression match to consume several
# characters at once if possible. Trivial benchmark shows that this
# is more efficient when the operands are longer than a single
# character, which is generally the case.
matched = Match(r'^([^-=!<>()&|]+)(.*)$', expression)
if not matched:
matched = Match(r'^(\s*\S)(.*)$', expression)
if not matched:
break
lhs += matched.group(1)
expression = matched.group(2)
# Only apply checks if we got all parts of the boolean expression
if not (lhs and operator and rhs):
return
# Check that rhs do not contain logical operators. We already know
# that lhs is fine since the loop above parses out && and ||.
if rhs.find('&&') > -1 or rhs.find('||') > -1:
return
# At least one of the operands must be a constant literal. This is
# to avoid suggesting replacements for unprintable things like
# CHECK(variable != iterator)
#
# The following pattern matches decimal, hex integers, strings, and
# characters (in that order).
lhs = lhs.strip()
rhs = rhs.strip()
match_constant = r'^([-+]?(\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\'.*\')$'
if Match(match_constant, lhs) or Match(match_constant, rhs):
# Note: since we know both lhs and rhs, we can provide a more
# descriptive error message like:
# Consider using CHECK_EQ(x, 42) instead of CHECK(x == 42)
# Instead of:
# Consider using CHECK_EQ instead of CHECK(a == b)
#
# We are still keeping the less descriptive message because if lhs
# or rhs gets long, the error message might become unreadable.
error(filename, linenum, 'readability/check', 2,
'Consider using %s instead of %s(a %s b)' % (
_CHECK_REPLACEMENT[check_macro][operator],
check_macro, operator))
def CheckAltTokens(filename, clean_lines, linenum, error):
line = clean_lines.elided[linenum]
# Avoid preprocessor lines
if Match(r'^\s*#', line):
return
# Last ditch effort to avoid multi-line comments. This will not help
# if the comment started before the current line or ended after the
# current line, but it catches most of the false positives. At least,
# it provides a way to workaround this warning for people who use
# multi-line comments in preprocessor macros.
#
# TODO(unknown): remove this once cpplint has better support for
# multi-line comments.
if line.find('/*') >= 0 or line.find('*/') >= 0:
return
for match in _ALT_TOKEN_REPLACEMENT_PATTERN.finditer(line):
error(filename, linenum, 'readability/alt_tokens', 2,
'Use operator %s instead of %s' % (
_ALT_TOKEN_REPLACEMENT[match.group(1)], match.group(1)))
def GetLineWidth(line):
if isinstance(line, unicode):
width = 0
for uc in unicodedata.normalize('NFC', line):
if unicodedata.east_asian_width(uc) in ('W', 'F'):
width += 2
elif not unicodedata.combining(uc):
width += 1
return width
else:
return len(line)
def CheckStyle(filename, clean_lines, linenum, file_extension, nesting_state,
error):
# Don't use "elided" lines here, otherwise we can't check commented lines.
# Don't want to use "raw" either, because we don't want to check inside C++11
# raw strings,
raw_lines = clean_lines.lines_without_raw_strings
line = raw_lines[linenum]
if line.find('\t') != -1:
error(filename, linenum, 'whitespace/tab', 1,
'Tab found; better to use spaces')
# One or three blank spaces at the beginning of the line is weird; it's
# hard to reconcile that with 2-space indents.
# NOTE: here are the conditions rob pike used for his tests. Mine aren't
# as sophisticated, but it may be worth becoming so: RLENGTH==initial_spaces
# if(RLENGTH > 20) complain = 0;
# if(match($0, " +(error|private|public|protected):")) complain = 0;
# if(match(prev, "&& *$")) complain = 0;
# if(match(prev, "\\|\\| *$")) complain = 0;
# if(match(prev, "[\",=><] *$")) complain = 0;
scope_or_label_pattern = r'\s*\w+\s*:\s*\\?$'
classinfo = nesting_state.InnermostClass()
initial_spaces = 0
cleansed_line = clean_lines.elided[linenum]
while initial_spaces < len(line) and line[initial_spaces] == ' ':
initial_spaces += 1
if line and line[-1].isspace():
error(filename, linenum, 'whitespace/end_of_line', 4,
'Line ends in whitespace. Consider deleting these extra spaces.')
elif ((initial_spaces == 1 or initial_spaces == 3) and
not Match(scope_or_label_pattern, cleansed_line) and
not (clean_lines.raw_lines[linenum] != line and
Match(r'^\s*""', line))):
error(filename, linenum, 'whitespace/indent', 3,
'Weird number of spaces at line-start. '
'Are you using a 2-space indent?')
is_header_guard = False
if file_extension == 'h':
cppvar = GetHeaderGuardCPPVariable(filename)
if (line.startswith('#ifndef %s' % cppvar) or
line.startswith('#define %s' % cppvar) or
line.startswith('#endif // %s' % cppvar)):
is_header_guard = True
e, but it makes them
if (not line.startswith('#include') and not is_header_guard and
not Match(r'^\s*//.*http(s?)://\S*$', line) and
not Match(r'^// \$Id:.*#[0-9]+ \$$', line)):
line_width = GetLineWidth(line)
extended_length = int((_line_length * 1.25))
if line_width > extended_length:
error(filename, linenum, 'whitespace/line_length', 4,
'Lines should very rarely be longer than %i characters' %
extended_length)
elif line_width > _line_length:
error(filename, linenum, 'whitespace/line_length', 2,
'Lines should be <= %i characters long' % _line_length)
if (cleansed_line.count(';') > 1 and
cleansed_line.find('for') == -1 and
(GetPreviousNonBlankLine(clean_lines, linenum)[0].find('for') == -1 or
GetPreviousNonBlankLine(clean_lines, linenum)[0].find(';') != -1) and
# It's ok to have many commands in a switch case that fits in 1 line
not ((cleansed_line.find('case ') != -1 or
cleansed_line.find('default:') != -1) and
cleansed_line.find('break;') != -1)):
error(filename, linenum, 'whitespace/newline', 0,
'More than one command on the same line')
CheckBraces(filename, clean_lines, linenum, error)
CheckTrailingSemicolon(filename, clean_lines, linenum, error)
CheckEmptyBlockBody(filename, clean_lines, linenum, error)
CheckAccess(filename, clean_lines, linenum, nesting_state, error)
CheckSpacing(filename, clean_lines, linenum, nesting_state, error)
CheckOperatorSpacing(filename, clean_lines, linenum, error)
CheckParenthesisSpacing(filename, clean_lines, linenum, error)
CheckCommaSpacing(filename, clean_lines, linenum, error)
CheckBracesSpacing(filename, clean_lines, linenum, error)
CheckSpacingForFunctionCall(filename, clean_lines, linenum, error)
CheckRValueReference(filename, clean_lines, linenum, nesting_state, error)
CheckCheck(filename, clean_lines, linenum, error)
CheckAltTokens(filename, clean_lines, linenum, error)
classinfo = nesting_state.InnermostClass()
if classinfo:
CheckSectionSpacing(filename, clean_lines, classinfo, linenum, error)
_RE_PATTERN_INCLUDE = re.compile(r'^\s*#\s*include\s*([<"])([^>"]*)[>"].*$')
# Matches the first component of a filename delimited by -s and _s. That is:
# _RE_FIRST_COMPONENT.match('foo').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo.cc').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo-bar_baz.cc').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo_bar-baz.cc').group(0) == 'foo'
_RE_FIRST_COMPONENT = re.compile(r'^[^-_.]+')
def _DropCommonSuffixes(filename):
for suffix in ('test.cc', 'regtest.cc', 'unittest.cc',
'inl.h', 'impl.h', 'internal.h'):
if (filename.endswith(suffix) and len(filename) > len(suffix) and
filename[-len(suffix) - 1] in ('-', '_')):
return filename[:-len(suffix) - 1]
return os.path.splitext(filename)[0]
def _IsTestFilename(filename):
if (filename.endswith('_test.cc') or
filename.endswith('_unittest.cc') or
filename.endswith('_regtest.cc')):
return True
else:
return False
def _ClassifyInclude(fileinfo, include, is_system):
# This is a list of all standard c++ header files, except
# those already checked for above.
is_cpp_h = include in _CPP_HEADERS
if is_system:
if is_cpp_h:
return _CPP_SYS_HEADER
else:
return _C_SYS_HEADER
# If the target file and the include we're checking share a
# basename when we drop common extensions, and the include
# lives in . , then it's likely to be owned by the target file.
target_dir, target_base = (
os.path.split(_DropCommonSuffixes(fileinfo.RepositoryName())))
include_dir, include_base = os.path.split(_DropCommonSuffixes(include))
if target_base == include_base and (
include_dir == target_dir or
include_dir == os.path.normpath(target_dir + '/../public')):
return _LIKELY_MY_HEADER
# If the target and include share some initial basename
# component, it's possible the target is implementing the
# include, so it's allowed to be first, but we'll never
# complain if it's not there.
target_first_component = _RE_FIRST_COMPONENT.match(target_base)
include_first_component = _RE_FIRST_COMPONENT.match(include_base)
if (target_first_component and include_first_component and
target_first_component.group(0) ==
include_first_component.group(0)):
return _POSSIBLE_MY_HEADER
return _OTHER_HEADER
def CheckIncludeLine(filename, clean_lines, linenum, include_state, error):
fileinfo = FileInfo(filename)
line = clean_lines.lines[linenum]
# "include" should use the new style "foo/bar.h" instead of just "bar.h"
# Only do this check if the included header follows google naming
# conventions. If not, assume that it's a 3rd party API that
# requires special include conventions.
#
# We also make an exception for Lua headers, which follow google
# naming convention but not the include convention.
# removed by weizhenwei, 2015.06.27;
# match = Match(r'#include\s*"([^/]+\.h)"', line)
# if match and not _THIRD_PARTY_HEADERS_PATTERN.match(match.group(1)):
# error(filename, linenum, 'build/include', 4,
# 'Include the directory when naming .h files')
# we shouldn't include a file more than once. actually, there are a
# handful of instances where doing so is okay, but in general it's
# not.
match = _RE_PATTERN_INCLUDE.search(line)
if match:
include = match.group(2)
is_system = (match.group(1) == '<')
duplicate_line = include_state.FindHeader(include)
if duplicate_line >= 0:
error(filename, linenum, 'build/include', 4,
'"%s" already included at %s:%s' %
(include, filename, duplicate_line))
elif not _THIRD_PARTY_HEADERS_PATTERN.match(include):
include_state.include_list[-1].append((include, linenum))
# We want to ensure that headers appear in the right order:
# 1) for foo.cc, foo.h (preferred location)
# 2) c system files
# 3) cpp system files
# 4) for foo.cc, foo.h (deprecated location)
# 5) other google headers
#
# We classify each include statement as one of those 5 types
# using a number of techniques. The include_state object keeps
# track of the highest type seen, and complains if we see a
# lower type after that.
error_message = include_state.CheckNextIncludeOrder(
_ClassifyInclude(fileinfo, include, is_system))
if error_message:
error(filename, linenum, 'build/include_order', 4,
'%s. Should be: %s.h, c system, c++ system, other.' %
(error_message, fileinfo.BaseName()))
canonical_include = include_state.CanonicalizeAlphabeticalOrder(include)
if not include_state.IsInAlphabeticalOrder(
clean_lines, linenum, canonical_include):
error(filename, linenum, 'build/include_alpha', 4,
'Include "%s" not in alphabetical order' % include)
include_state.SetLastHeader(canonical_include)
# Look for any of the stream classes that are part of standard C++.
match = _RE_PATTERN_INCLUDE.match(line)
if match:
include = match.group(2)
if Match(r'(f|ind|io|i|o|parse|pf|stdio|str|)?stream$', include):
# Many unit tests use cout, so we exempt them.
if not _IsTestFilename(filename):
# Suggest a different header for ostream
if include == 'ostream':
error(filename, linenum, 'readability/streams', 3,
'For logging, include "base/logging.h" instead of <ostream>.')
else:
error(filename, linenum, 'readability/streams', 3,
'Streams are highly discouraged.')
def _GetTextInside(text, start_pattern):
# TODO(unknown): Audit cpplint.py to see what places could be profitably
# rewritten to use _GetTextInside (and use inferior regexp matching today).
# Give opening punctuations to get the matching close-punctuations.
matching_punctuation = {'(': ')', '{': '}', '[': ']'}
closing_punctuation = set(matching_punctuation.itervalues())
# Find the position to start extracting text.
match = re.search(start_pattern, text, re.M)
if not match: # start_pattern not found in text.
return None
start_position = match.end(0)
assert start_position > 0, (
'start_pattern must ends with an opening punctuation.')
assert text[start_position - 1] in matching_punctuation, (
'start_pattern must ends with an opening punctuation.')
# Stack of closing punctuations we expect to have in text after position.
punctuation_stack = [matching_punctuation[text[start_position - 1]]]
position = start_position
while punctuation_stack and position < len(text):
if text[position] == punctuation_stack[-1]:
punctuation_stack.pop()
elif text[position] in closing_punctuation:
# A closing punctuation without matching opening punctuations.
return None
elif text[position] in matching_punctuation:
punctuation_stack.append(matching_punctuation[text[position]])
position += 1
if punctuation_stack:
# Opening punctuations left without matching close-punctuations.
return None
# punctuations match.
return text[start_position:position - 1]
# Patterns for matching call-by-reference parameters.
#
# Supports nested templates up to 2 levels deep using this messy pattern:
# < (?: < (?: < [^<>]*
# >
# | [^<>] )*
# >
# | [^<>] )*
# >
_RE_PATTERN_IDENT = r'[_a-zA-Z]\w*' # =~ [[:alpha:]][[:alnum:]]*
_RE_PATTERN_TYPE = (
r'(?:const\s+)?(?:typename\s+|class\s+|struct\s+|union\s+|enum\s+)?'
r'(?:\w|'
r'\s*<(?:<(?:<[^<>]*>|[^<>])*>|[^<>])*>|'
r'::)+')
# A call-by-reference parameter ends with '& identifier'.
_RE_PATTERN_REF_PARAM = re.compile(
r'(' + _RE_PATTERN_TYPE + r'(?:\s*(?:\bconst\b|[*]))*\s*'
r'&\s*' + _RE_PATTERN_IDENT + r')\s*(?:=[^,()]+)?[,)]')
# A call-by-const-reference parameter either ends with 'const& identifier'
# or looks like 'const type& identifier' when 'type' is atomic.
_RE_PATTERN_CONST_REF_PARAM = (
r'(?:.*\s*\bconst\s*&\s*' + _RE_PATTERN_IDENT +
r'|const\s+' + _RE_PATTERN_TYPE + r'\s*&\s*' + _RE_PATTERN_IDENT + r')')
def CheckLanguage(filename, clean_lines, linenum, file_extension,
include_state, nesting_state, error):
# If the line is empty or consists of entirely a comment, no need to
# check it.
line = clean_lines.elided[linenum]
if not line:
return
match = _RE_PATTERN_INCLUDE.search(line)
if match:
CheckIncludeLine(filename, clean_lines, linenum, include_state, error)
return
# Reset include state across preprocessor directives. This is meant
# to silence warnings for conditional includes.
match = Match(r'^\s*#\s*(if|ifdef|ifndef|elif|else|endif)\b', line)
if match:
include_state.ResetSection(match.group(1))
# Make Windows paths like Unix.
fullname = os.path.abspath(filename).replace('\\', '/')
# Perform other checks now that we are sure that this is not an include line
CheckCasts(filename, clean_lines, linenum, error)
CheckGlobalStatic(filename, clean_lines, linenum, error)
CheckPrintf(filename, clean_lines, linenum, error)
if file_extension == 'h':
# TODO(unknown): check that 1-arg constructors are explicit.
# How to tell it's a constructor?
# (handled in CheckForNonStandardConstructs for now)
# TODO(unknown): check that classes declare or disable copy/assign
# (level 1 error)
pass
# Check if people are using the verboten C basic types. The only exception
# we regularly allow is "unsigned short port" for port.
if Search(r'\bshort port\b', line):
if not Search(r'\bunsigned short port\b', line):
error(filename, linenum, 'runtime/int', 4,
'Use "unsigned short" for ports, not "short"')
else:
match = Search(r'\b(short|long(?! +double)|long long)\b', line)
if match:
error(filename, linenum, 'runtime/int', 4,
'Use int16/int64/etc, rather than the C type %s' % match.group(1))
# Check if some verboten operator overloading is going on
# TODO(unknown): catch out-of-line unary operator&:
# class X {};
# int operator&(const X& x) { return 42; } // unary operator&
# The trick is it's hard to tell apart from binary operator&:
# class Y { int operator&(const Y& x) { return 23; } }; // binary operator&
if Search(r'\boperator\s*&\s*\(\s*\)', line):
error(filename, linenum, 'runtime/operator', 4,
'Unary operator& is dangerous. Do not use it.')
# Check for suspicious usage of "if" like
# } if (a == b) {
if Search(r'\}\s*if\s*\(', line):
error(filename, linenum, 'readability/braces', 4,
'Did you mean "else if"? If not, start a new line for "if".')
# Check for potential format string bugs like printf(foo).
# We constrain the pattern not to pick things like DocidForPrintf(foo).
# Not perfect but it can catch printf(foo.c_str()) and printf(foo->c_str())
# TODO(unknown): Catch the following case. Need to change the calling
# convention of the whole function to process multiple line to handle it.
# printf(
# boy_this_is_a_really_long_variable_that_cannot_fit_on_the_prev_line);
printf_args = _GetTextInside(line, r'(?i)\b(string)?printf\s*\(')
if printf_args:
match = Match(r'([\w.\->()]+)$', printf_args)
if match and match.group(1) != '__VA_ARGS__':
function_name = re.search(r'\b((?:string)?printf)\s*\(',
line, re.I).group(1)
error(filename, linenum, 'runtime/printf', 4,
'Potential format string bug. Do %s("%%s", %s) instead.'
% (function_name, match.group(1)))
# Check for potential memset bugs like memset(buf, sizeof(buf), 0).
match = Search(r'memset\s*\(([^,]*),\s*([^,]*),\s*0\s*\)', line)
if match and not Match(r"^''|-?[0-9]+|0x[0-9A-Fa-f]$", match.group(2)):
error(filename, linenum, 'runtime/memset', 4,
'Did you mean "memset(%s, 0, %s)"?'
% (match.group(1), match.group(2)))
# removed by weizhenwei, 2015.06.24;
# if Search(r'\busing namespace\b', line):
# error(filename, linenum, 'build/namespaces', 5,
# 'Do not use namespace using-directives. '
# 'Use using-declarations instead.')
# Detect variable-length arrays.
match = Match(r'\s*(.+::)?(\w+) [a-z]\w*\[(.+)];', line)
if (match and match.group(2) != 'return' and match.group(2) != 'delete' and
match.group(3).find(']') == -1):
# Split the size using space and arithmetic operators as delimiters.
# If any of the resulting tokens are not compile time constants then
# report the error.
tokens = re.split(r'\s|\+|\-|\*|\/|<<|>>]', match.group(3))
is_const = True
skip_next = False
for tok in tokens:
if skip_next:
skip_next = False
continue
if Search(r'sizeof\(.+\)', tok): continue
if Search(r'arraysize\(\w+\)', tok): continue
tok = tok.lstrip('(')
tok = tok.rstrip(')')
if not tok: continue
if Match(r'\d+', tok): continue
if Match(r'0[xX][0-9a-fA-F]+', tok): continue
if Match(r'k[A-Z0-9]\w*', tok): continue
if Match(r'(.+::)?k[A-Z0-9]\w*', tok): continue
if Match(r'(.+::)?[A-Z][A-Z0-9_]*', tok): continue
# A catch all for tricky sizeof cases, including 'sizeof expression',
# 'sizeof(*type)', 'sizeof(const type)', 'sizeof(struct StructName)'
# requires skipping the next token because we split on ' ' and '*'.
if tok.startswith('sizeof'):
skip_next = True
continue
is_const = False
break
if not is_const:
error(filename, linenum, 'runtime/arrays', 1,
'Do not use variable-length arrays. Use an appropriately named '
"('k' followed by CamelCase) compile-time constant for the size.")
# If DISALLOW_COPY_AND_ASSIGN DISALLOW_IMPLICIT_CONSTRUCTORS is present,
# then it should be the last thing in the class declaration.
match = Match(
(r'\s*'
r'(DISALLOW_(COPY_AND_ASSIGN|IMPLICIT_CONSTRUCTORS))'
r'\(.*\);$'),
line)
if match and linenum + 1 < clean_lines.NumLines():
next_line = clean_lines.elided[linenum + 1]
# We allow some, but not all, declarations of variables to be present
# in the statement that defines the class. The [\w\*,\s]* fragment of
# the regular expression below allows users to declare instances of
# the class or pointers to instances, but not less common types such
# as function pointers or arrays. It's a tradeoff between allowing
# reasonable code and avoiding trying to parse more C++ using regexps.
if not Search(r'^\s*}[\w\*,\s]*;', next_line):
error(filename, linenum, 'readability/constructors', 3,
match.group(1) + ' should be the last thing in the class')
# Check for use of unnamed namespaces in header files. Registration
# macros are typically OK, so we allow use of "namespace {" on lines
# that end with backslashes.
if (file_extension == 'h'
and Search(r'\bnamespace\s*{', line)
and line[-1] != '\\'):
error(filename, linenum, 'build/namespaces', 4,
'Do not use unnamed namespaces in header files. See '
'http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces'
' for more information.')
def CheckGlobalStatic(filename, clean_lines, linenum, error):
line = clean_lines.elided[linenum]
# Match two lines at a time to support multiline declarations
if linenum + 1 < clean_lines.NumLines() and not Search(r'[;({]', line):
line += clean_lines.elided[linenum + 1].strip()
# Check for people declaring static/global STL strings at the top level.
# This is dangerous because the C++ language does not guarantee that
# globals with constructors are initialized before the first access.
match = Match(
r'((?:|static +)(?:|const +))string +([a-zA-Z0-9_:]+)\b(.*)',
line)
# Remove false positives:
# - String pointers (as opposed to values).
# string *pointer
# const string *pointer
# string const *pointer
# string *const pointer
#
# - Functions and template specializations.
# string Function<Type>(...
# string Class<Type>::Method(...
#
# - Operators. These are matched separately because operator names
# cross non-word boundaries, and trying to match both operators
# and functions at the same time would decrease accuracy of
# matching identifiers.
# string Class::operator*()
if (match and
not Search(r'\bstring\b(\s+const)?\s*\*\s*(const\s+)?\w', line) and
not Search(r'\boperator\W', line) and
not Match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)*\s*\(([^"]|$)', match.group(3))):
error(filename, linenum, 'runtime/string', 4,
'For a static/global string constant, use a C style string instead: '
'"%schar %s[]".' %
(match.group(1), match.group(2)))
if Search(r'\b([A-Za-z0-9_]*_)\(\1\)', line):
error(filename, linenum, 'runtime/init', 4,
'You seem to be initializing a member variable with itself.')
def CheckPrintf(filename, clean_lines, linenum, error):
line = clean_lines.elided[linenum]
match = Search(r'snprintf\s*\(([^,]*),\s*([0-9]*)\s*,', line)
if match and match.group(2) != '0':
# If 2nd arg is zero, snprintf is used to calculate size.
error(filename, linenum, 'runtime/printf', 3,
'If you can, use sizeof(%s) instead of %s as the 2nd arg '
'to snprintf.' % (match.group(1), match.group(2)))
# Check if some verboten C functions are being used.
if Search(r'\bsprintf\s*\(', line):
error(filename, linenum, 'runtime/printf', 5,
'Never use sprintf. Use snprintf instead.')
match = Search(r'\b(strcpy|strcat)\s*\(', line)
if match:
error(filename, linenum, 'runtime/printf', 4,
'Almost always, snprintf is better than %s' % match.group(1))
def IsDerivedFunction(clean_lines, linenum):
# Scan back a few lines for start of current function
for i in xrange(linenum, max(-1, linenum - 10), -1):
match = Match(r'^([^()]*\w+)\(', clean_lines.elided[i])
if match:
# Look for "override" after the matching closing parenthesis
line, _, closing_paren = CloseExpression(
clean_lines, i, len(match.group(1)))
return (closing_paren >= 0 and
Search(r'\boverride\b', line[closing_paren:]))
return False
def IsInitializerList(clean_lines, linenum):
for i in xrange(linenum, 1, -1):
line = clean_lines.elided[i]
if i == linenum:
remove_function_body = Match(r'^(.*)\{\s*$', line)
if remove_function_body:
line = remove_function_body.group(1)
if Search(r'\s:\s*\w+[({]', line):
# A lone colon tend to indicate the start of a constructor
# initializer list. It could also be a ternary operator, which
# also tend to appear in constructor initializer lists as
# opposed to parameter lists.
return True
if Search(r'\}\s*,\s*$', line):
# A closing brace followed by a comma is probably the end of a
# brace-initialized member in constructor initializer list.
return True
if Search(r'[{};]\s*$', line):
# Found one of the following:
# - A closing brace or semicolon, probably the end of the previous
# function.
# - An opening brace, probably the start of current class or namespace.
#
# Current line is probably not inside an initializer list since
# we saw one of those things without seeing the starting colon.
return False
# Got to the beginning of the file without seeing the start of
# constructor initializer list.
return False
def CheckForNonConstReference(filename, clean_lines, linenum,
nesting_state, error):
# Do nothing if there is no '&' on current line.
line = clean_lines.elided[linenum]
if '&' not in line:
return
# If a function is inherited, current function doesn't have much of
if IsDerivedFunction(clean_lines, linenum):
return
if linenum > 1:
previous = None
if Match(r'\s*::(?:[\w<>]|::)+\s*&\s*\S', line):
previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+[\w<>])\s*$',
clean_lines.elided[linenum - 1])
elif Match(r'\s*[a-zA-Z_]([\w<>]|::)+\s*&\s*\S', line):
previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+::)\s*$',
clean_lines.elided[linenum - 1])
if previous:
line = previous.group(1) + line.lstrip()
else:
endpos = line.rfind('>')
if endpos > -1:
(_, startline, startpos) = ReverseCloseExpression(
clean_lines, linenum, endpos)
if startpos > -1 and startline < linenum:
line = ''
for i in xrange(startline, linenum + 1):
line += clean_lines.elided[i].strip()
if (nesting_state.previous_stack_top and
not (isinstance(nesting_state.previous_stack_top, _ClassInfo) or
isinstance(nesting_state.previous_stack_top, _NamespaceInfo))):
# Not at toplevel, not within a class, and not within a namespace
return
# Avoid initializer lists. We only need to scan back from the
# current line for something that starts with ':'.
#
# We don't need to check the current line, since the '&' would
if linenum > 0:
for i in xrange(linenum - 1, max(0, linenum - 10), -1):
previous_line = clean_lines.elided[i]
if not Search(r'[),]\s*$', previous_line):
break
if Match(r'^\s*:\s+\S', previous_line):
return
if Search(r'\\\s*$', line):
return
if IsInitializerList(clean_lines, linenum):
return
whitelisted_functions = (r'(?:[sS]wap(?:<\w:+>)?|'
r'operator\s*[<>][<>]|'
r'static_assert|COMPILE_ASSERT'
r')\s*\(')
if Search(whitelisted_functions, line):
return
elif not Search(r'\S+\([^)]*$', line):
# Don't see a whitelisted function on this line. Actually we
# multi-line parameter list. Try a bit harder to catch this case.
for i in xrange(2):
if (linenum > i and
Search(whitelisted_functions, clean_lines.elided[linenum - i - 1])):
return
decls = ReplaceAll(r'{[^}]*}', ' ', line) # exclude function body
for parameter in re.findall(_RE_PATTERN_REF_PARAM, decls):
if not Match(_RE_PATTERN_CONST_REF_PARAM, parameter):
error(filename, linenum, 'runtime/references', 2,
'Is this a non-const reference? '
'If so, make const or use a pointer: ' +
ReplaceAll(' *<', '<', parameter))
def CheckCasts(filename, clean_lines, linenum, error):
line = clean_lines.elided[linenum]
# Check to see if they're using an conversion function cast.
match = Search(
r'(\bnew\s+|\S<\s*(?:const\s+)?)?\b'
r'(int|float|double|bool|char|int32|uint32|int64|uint64)'
r'(\([^)].*)', line)
expecting_function = ExpectingFunctionArgs(clean_lines, linenum)
if match and not expecting_function:
matched_type = match.group(2)
matched_new_or_template = match.group(1)
if Match(r'\([^()]+\)\s*\[', match.group(3)):
return
matched_funcptr = match.group(3)
if (matched_new_or_template is None and
not (matched_funcptr and
(Match(r'\((?:[^() ]+::\s*\*\s*)?[^() ]+\)\s*\(',
matched_funcptr) or
matched_funcptr.startswith('(*)'))) and
not Match(r'\s*using\s+\S+\s*=\s*' + matched_type, line) and
not Search(r'new\(\S+\)\s*' + matched_type, line)):
error(filename, linenum, 'readability/casting', 4,
'Using deprecated casting style. '
'Use static_cast<%s>(...) instead' %
matched_type)
if not expecting_function:
CheckCStyleCast(filename, clean_lines, linenum, 'static_cast',
r'\((int|float|double|bool|char|u?int(16|32|64))\)', error)
#
# (char *) "foo" should always be a const_cast (reinterpret_cast won't
if CheckCStyleCast(filename, clean_lines, linenum, 'const_cast',
r'\((char\s?\*+\s?)\)\s*"', error):
pass
else:
# Check pointer casts for other than string constants
CheckCStyleCast(filename, clean_lines, linenum, 'reinterpret_cast',
r'\((\w+\s?\*+\s?)\)', error)
# In addition, we look for people taking the address of a cast. This
# is dangerous -- casts can assign to temporaries, so the pointer doesn't
# point where you think.
#
# Some non-identifier character is required before the '&' for the
# expression to be recognized as a cast. These are casts:
# expression = &static_cast<int*>(temporary());
# function(&(int*)(temporary()));
#
# This is not a cast:
# reference_type&(int* function_param);
match = Search(
r'(?:[^\w]&\(([^)]+)\)[\w(])|'
r'(?:[^\w]&(static|dynamic|down|reinterpret)_cast\b)', line)
if match and match.group(1) != '*':
# Try a better error message when the & is bound to something
# dereferenced by the casted pointer, as opposed to the casted
# pointer itself.
parenthesis_error = False
match = Match(r'^(.*&(?:static|dynamic|down|reinterpret)_cast\b)<', line)
if match:
_, y1, x1 = CloseExpression(clean_lines, linenum, len(match.group(1)))
if x1 >= 0 and clean_lines.elided[y1][x1] == '(':
_, y2, x2 = CloseExpression(clean_lines, y1, x1)
if x2 >= 0:
extended_line = clean_lines.elided[y2][x2:]
if y2 < clean_lines.NumLines() - 1:
extended_line += clean_lines.elided[y2 + 1]
if Match(r'\s*(?:->|\[)', extended_line):
parenthesis_error = True
if parenthesis_error:
error(filename, linenum, 'readability/casting', 4,
('Are you taking an address of something dereferenced '
'from a cast? Wrapping the dereferenced expression in '
'parentheses will make the binding more obvious'))
else:
error(filename, linenum, 'runtime/casting', 4,
('Are you taking an address of a cast? '
'This is dangerous: could be a temp var. '
'Take the address before doing the cast, rather than after'))
def CheckCStyleCast(filename, clean_lines, linenum, cast_type, pattern, error):
line = clean_lines.elided[linenum]
match = Search(pattern, line)
if not match:
return False
# Exclude lines with keywords that tend to look like casts
context = line[0:match.start(1) - 1]
if Match(r'.*\b(?:sizeof|alignof|alignas|[_A-Z][_A-Z0-9]*)\s*$', context):
return False
# Try expanding current context to see if we one level of
# parentheses inside a macro.
if linenum > 0:
for i in xrange(linenum - 1, max(0, linenum - 5), -1):
context = clean_lines.elided[i] + context
if Match(r'.*\b[_A-Z][_A-Z0-9]*\s*\((?:\([^()]*\)|[^()])*$', context):
return False
# operator++(int) and operator--(int)
if context.endswith(' operator++') or context.endswith(' operator--'):
return False
# A single unnamed argument for a function tends to look like old
# style cast. If we see those, don't issue warnings for deprecated
# casts, instead issue warnings for unnamed arguments where
# appropriate.
#
# These are things that we want warnings for, since the style guide
# explicitly require all parameters to be named:
# Function(int);
# Function(int) {
# ConstMember(int) const;
# ConstMember(int) const {
# ExceptionMember(int) throw (...);
# ExceptionMember(int) throw (...) {
# PureVirtual(int) = 0;
#
# These are functions of some sort, where the compiler would be fine
# if they had named parameters, but people often omit those
# identifiers to reduce clutter:
# (FunctionPointer)(int);
# (FunctionPointer)(int) = value;
# Function((function_pointer_arg)(int))
# Function((function_pointer_arg)(int), int param)
# <TemplateArgument(int)>;
# <(FunctionPointerTemplateArgument)(int)>;
remainder = line[match.end(0):]
if Match(r'^\s*(?:;|const\b|throw\b|final\b|override\b|[=>{),])',
remainder):
# Looks like an unnamed parameter.
# Don't warn on any kind of template arguments.
if Match(r'^\s*>', remainder):
return False
# Don't warn on assignments to function pointers, but keep warnings for
# unnamed parameters to pure virtual functions. Note that this pattern
# will also pass on assignments of "0" to function pointers, but the
# preferred values for those would be "nullptr" or "NULL".
matched_zero = Match(r'^\s=\s*(\S+)\s*;', remainder)
if matched_zero and matched_zero.group(1) != '0':
return False
# Don't warn on function pointer declarations. For this we need
# to check what came before the "(type)" string.
if Match(r'.*\)\s*$', line[0:match.start(0)]):
return False
# Don't warn if the parameter is named with block comments, e.g.:
# Function(int /*unused_param*/);
raw_line = clean_lines.raw_lines[linenum]
if '/*' in raw_line:
return False
# Passed all filters, issue warning here.
error(filename, linenum, 'readability/function', 3,
'All parameters should be named in a function')
return True
# At this point, all that should be left is actual casts.
error(filename, linenum, 'readability/casting', 4,
'Using C-style cast. Use %s<%s>(...) instead' %
(cast_type, match.group(1)))
return True
def ExpectingFunctionArgs(clean_lines, linenum):
line = clean_lines.elided[linenum]
return (Match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(', line) or
(linenum >= 2 and
(Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\((?:\S+,)?\s*$',
clean_lines.elided[linenum - 1]) or
Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\(\s*$',
clean_lines.elided[linenum - 2]) or
Search(r'\bstd::m?function\s*\<\s*$',
clean_lines.elided[linenum - 1]))))
_HEADERS_CONTAINING_TEMPLATES = (
('<deque>', ('deque',)),
('<functional>', ('unary_function', 'binary_function',
'plus', 'minus', 'multiplies', 'divides', 'modulus',
'negate',
'equal_to', 'not_equal_to', 'greater', 'less',
'greater_equal', 'less_equal',
'logical_and', 'logical_or', 'logical_not',
'unary_negate', 'not1', 'binary_negate', 'not2',
'bind1st', 'bind2nd',
'pointer_to_unary_function',
'pointer_to_binary_function',
'ptr_fun',
'mem_fun_t', 'mem_fun', 'mem_fun1_t', 'mem_fun1_ref_t',
'mem_fun_ref_t',
'const_mem_fun_t', 'const_mem_fun1_t',
'const_mem_fun_ref_t', 'const_mem_fun1_ref_t',
'mem_fun_ref',
)),
('<limits>', ('numeric_limits',)),
('<list>', ('list',)),
('<map>', ('map', 'multimap',)),
('<memory>', ('allocator',)),
('<queue>', ('queue', 'priority_queue',)),
('<set>', ('set', 'multiset',)),
('<stack>', ('stack',)),
('<string>', ('char_traits', 'basic_string',)),
('<utility>', ('pair',)),
('<vector>', ('vector',)),
# gcc extensions.
# Note: std::hash is their hash, ::hash is our hash
('<hash_map>', ('hash_map', 'hash_multimap',)),
('<hash_set>', ('hash_set', 'hash_multiset',)),
('<slist>', ('slist',)),
)
_RE_PATTERN_STRING = re.compile(r'\bstring\b')
_re_pattern_algorithm_header = []
for _template in ('copy', 'max', 'min', 'min_element', 'sort', 'swap',
'transform'):
# Match max<type>(..., ...), max(..., ...), but not foo->max, foo.max or
# type::max().
_re_pattern_algorithm_header.append(
(re.compile(r'[^>.]\b' + _template + r'(<.*?>)?\([^\)]'),
_template,
'<algorithm>'))
_re_pattern_templates = []
for _header, _templates in _HEADERS_CONTAINING_TEMPLATES:
for _template in _templates:
_re_pattern_templates.append(
(re.compile(r'(\<|\b)' + _template + r'\s*\<'),
_template + '<>',
_header))
def FilesBelongToSameModule(filename_cc, filename_h):
if not filename_cc.endswith('.cc'):
return (False, '')
filename_cc = filename_cc[:-len('.cc')]
if filename_cc.endswith('_unittest'):
filename_cc = filename_cc[:-len('_unittest')]
elif filename_cc.endswith('_test'):
filename_cc = filename_cc[:-len('_test')]
filename_cc = filename_cc.replace('/public/', '/')
filename_cc = filename_cc.replace('/internal/', '/')
if not filename_h.endswith('.h'):
return (False, '')
filename_h = filename_h[:-len('.h')]
if filename_h.endswith('-inl'):
filename_h = filename_h[:-len('-inl')]
filename_h = filename_h.replace('/public/', '/')
filename_h = filename_h.replace('/internal/', '/')
files_belong_to_same_module = filename_cc.endswith(filename_h)
common_path = ''
if files_belong_to_same_module:
common_path = filename_cc[:-len(filename_h)]
return files_belong_to_same_module, common_path
def UpdateIncludeState(filename, include_dict, io=codecs):
headerfile = None
try:
headerfile = io.open(filename, 'r', 'utf8', 'replace')
except IOError:
return False
linenum = 0
for line in headerfile:
linenum += 1
clean_line = CleanseComments(line)
match = _RE_PATTERN_INCLUDE.search(clean_line)
if match:
include = match.group(2)
include_dict.setdefault(include, linenum)
return True
def CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error,
io=codecs):
required = {} # A map of header name to linenumber and the template entity.
# Example of required: { '<functional>': (1219, 'less<>') }
for linenum in xrange(clean_lines.NumLines()):
line = clean_lines.elided[linenum]
if not line or line[0] == '#':
continue
# String is special -- it is a non-templatized type in STL.
matched = _RE_PATTERN_STRING.search(line)
if matched:
# Don't warn about strings in non-STL namespaces:
# (We check only the first match per line; good enough.)
prefix = line[:matched.start()]
if prefix.endswith('std::') or not prefix.endswith('::'):
required['<string>'] = (linenum, 'string')
for pattern, template, header in _re_pattern_algorithm_header:
if pattern.search(line):
required[header] = (linenum, template)
# The following function is just a speed up, no semantics are changed.
if not '<' in line: # Reduces the cpu time usage by skipping lines.
continue
for pattern, template, header in _re_pattern_templates:
if pattern.search(line):
required[header] = (linenum, template)
# The policy is that if you #include something in foo.h you don't need to
# include it again in foo.cc. Here, we will look at possible includes.
# Let's flatten the include_state include_list and copy it into a dictionary.
include_dict = dict([item for sublist in include_state.include_list
for item in sublist])
# Did we find the header for this file (if any) and successfully load it?
header_found = False
# Use the absolute path so that matching works properly.
abs_filename = FileInfo(filename).FullName()
# For Emacs's flymake.
# If cpplint is invoked from Emacs's flymake, a temporary file is generated
# by flymake and that file name might end with '_flymake.cc'. In that case,
# restore original file name here so that the corresponding header file can be
# found.
# e.g. If the file name is 'foo_flymake.cc', we should search for 'foo.h'
# instead of 'foo_flymake.h'
abs_filename = re.sub(r'_flymake\.cc$', '.cc', abs_filename)
# include_dict is modified during iteration, so we iterate over a copy of
# the keys.
header_keys = include_dict.keys()
for header in header_keys:
(same_module, common_path) = FilesBelongToSameModule(abs_filename, header)
fullpath = common_path + header
if same_module and UpdateIncludeState(fullpath, include_dict, io):
header_found = True
# If we can't find the header file for a .cc, assume it's because we don't
# know where to look. In that case we'll give up as we're not sure they
# didn't include it in the .h file.
# TODO(unknown): Do a better job of finding .h files so we are confident that
# not having the .h file means there isn't one.
if filename.endswith('.cc') and not header_found:
return
# All the lines have been processed, report the errors found.
for required_header_unstripped in required:
template = required[required_header_unstripped][1]
if required_header_unstripped.strip('<>"') not in include_dict:
error(filename, required[required_header_unstripped][0],
'build/include_what_you_use', 4,
'Add #include ' + required_header_unstripped + ' for ' + template)
_RE_PATTERN_EXPLICIT_MAKEPAIR = re.compile(r'\bmake_pair\s*<')
def CheckMakePairUsesDeduction(filename, clean_lines, linenum, error):
line = clean_lines.elided[linenum]
match = _RE_PATTERN_EXPLICIT_MAKEPAIR.search(line)
if match:
error(filename, linenum, 'build/explicit_make_pair',
4,
'For C++11-compatibility, omit template arguments from make_pair'
' OR use pair directly OR if appropriate, construct a pair directly')
def CheckDefaultLambdaCaptures(filename, clean_lines, linenum, error):
line = clean_lines.elided[linenum]
match = Match(r'^(.*)\[\s*(?:=|&[^\w])', line)
if match:
# (for compound-statement), it's not a lambda.
line, _, pos = CloseExpression(clean_lines, linenum, len(match.group(1)))
if pos >= 0 and Match(r'^\s*[{(]', line[pos:]):
error(filename, linenum, 'build/c++11',
4,
'Default lambda captures are an unapproved C++ feature.')
def CheckRedundantVirtual(filename, clean_lines, linenum, error):
line = clean_lines.elided[linenum]
virtual = Match(r'^(.*\bvirtual\b)', line)
if not virtual: return
# decltype() or other things that use parentheses, but csearch suggests
# that this is rare.
end_col = -1
end_line = -1
start_col = len(virtual.group(1))
for start_line in xrange(linenum, min(linenum + 3, clean_lines.NumLines())):
line = clean_lines.elided[start_line][start_col:]
parameter_list = Match(r'^([^(]*)\(', line)
if parameter_list:
# Match parentheses to find the end of the parameter list
(_, end_line, end_col) = CloseExpression(
clean_lines, start_line, start_col + len(parameter_list.group(1)))
break
start_col = 0
if end_col < 0:
return # Couldn't find end of parameter list, give up
for i in xrange(end_line, min(end_line + 3, clean_lines.NumLines())):
line = clean_lines.elided[i][end_col:]
match = Search(r'\b(override|final)\b', line)
if match:
error(filename, linenum, 'readability/inheritance', 4,
('"virtual" is redundant since function is '
'already declared as "%s"' % match.group(1)))
end_col = 0
if Search(r'[^\w]\s*$', line):
break
def CheckRedundantOverrideOrFinal(filename, clean_lines, linenum, error):
line = clean_lines.elided[linenum]
if Search(r'\boverride\b', line) and Search(r'\bfinal\b', line):
error(filename, linenum, 'readability/inheritance', 4,
('"override" is redundant since function is '
'already declared as "final"'))
def IsBlockInNameSpace(nesting_state, is_forward_declaration):
if is_forward_declaration:
if len(nesting_state.stack) >= 1 and (
isinstance(nesting_state.stack[-1], _NamespaceInfo)):
return True
else:
return False
return (len(nesting_state.stack) > 1 and
nesting_state.stack[-1].check_namespace_indentation and
isinstance(nesting_state.stack[-2], _NamespaceInfo))
def ShouldCheckNamespaceIndentation(nesting_state, is_namespace_indent_item,
raw_lines_no_comments, linenum):
is_forward_declaration = IsForwardClassDeclaration(raw_lines_no_comments,
linenum)
if not (is_namespace_indent_item or is_forward_declaration):
return False
if IsMacroDefinition(raw_lines_no_comments, linenum):
return False
return IsBlockInNameSpace(nesting_state, is_forward_declaration)
def CheckItemIndentationInNamespace(filename, raw_lines_no_comments, linenum,
error):
line = raw_lines_no_comments[linenum]
if Match(r'^\s+', line):
error(filename, linenum, 'runtime/indentation_namespace', 4,
'Do not indent within a namespace')
def ProcessLine(filename, file_extension, clean_lines, line,
include_state, function_state, nesting_state, error,
extra_check_functions=[]):
raw_lines = clean_lines.raw_lines
ParseNolintSuppressions(filename, raw_lines[line], line, error)
nesting_state.Update(filename, clean_lines, line, error)
CheckForNamespaceIndentation(filename, nesting_state, clean_lines, line,
error)
if nesting_state.InAsmBlock(): return
CheckForFunctionLengths(filename, clean_lines, line, function_state, error)
CheckForMultilineCommentsAndStrings(filename, clean_lines, line, error)
CheckStyle(filename, clean_lines, line, file_extension, nesting_state, error)
CheckLanguage(filename, clean_lines, line, file_extension, include_state,
nesting_state, error)
CheckForNonConstReference(filename, clean_lines, line, nesting_state, error)
CheckForNonStandardConstructs(filename, clean_lines, line,
nesting_state, error)
CheckVlogArguments(filename, clean_lines, line, error)
CheckPosixThreading(filename, clean_lines, line, error)
CheckInvalidIncrement(filename, clean_lines, line, error)
CheckMakePairUsesDeduction(filename, clean_lines, line, error)
CheckDefaultLambdaCaptures(filename, clean_lines, line, error)
CheckRedundantVirtual(filename, clean_lines, line, error)
CheckRedundantOverrideOrFinal(filename, clean_lines, line, error)
for check_fn in extra_check_functions:
check_fn(filename, clean_lines, line, error)
def FlagCxx11Features(filename, clean_lines, linenum, error):
line = clean_lines.elided[linenum]
include = Match(r'\s*#\s*include\s+[<"]([^<"]+)[">]', line)
if include and include.group(1) in ('cfenv',
'condition_variable',
'fenv.h',
'future',
'mutex',
'thread',
'chrono',
'ratio',
'regex',
'system_error',
):
error(filename, linenum, 'build/c++11', 5,
('<%s> is an unapproved C++11 header.') % include.group(1))
# The only place where we need to worry about C++11 keywords and library
# features in preprocessor directives is in macro definitions.
if Match(r'\s*#', line) and not Match(r'\s*#\s*define\b', line): return
# These are classes and free functions. The classes are always
# mentioned as std::*, but we only catch the free functions if
# they're not found by ADL. They're alphabetical by header.
for top_name in (
# type_traits
'alignment_of',
'aligned_union',
# utility
'forward',
):
if Search(r'\bstd::%s\b' % top_name, line):
error(filename, linenum, 'build/c++11', 5,
('std::%s is an unapproved C++11 class or function. Send c-style '
'an example of where it would make your code more readable, and '
'they may let you use it.') % top_name)
def ProcessFileData(filename, file_extension, lines, error,
extra_check_functions=[]):
lines = (['// marker so line numbers and indices both start at 1'] + lines +
['// marker so line numbers end in a known way'])
include_state = _IncludeState()
function_state = _FunctionState()
nesting_state = NestingState()
ResetNolintSuppressions()
CheckForCopyright(filename, lines, error)
if file_extension == 'h':
CheckForHeaderGuard(filename, lines, error)
RemoveMultiLineComments(filename, lines, error)
clean_lines = CleansedLines(lines)
for line in xrange(clean_lines.NumLines()):
ProcessLine(filename, file_extension, clean_lines, line,
include_state, function_state, nesting_state, error,
extra_check_functions)
FlagCxx11Features(filename, clean_lines, line, error)
nesting_state.CheckCompletedBlocks(filename, error)
CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error)
# We check here rather than inside ProcessLine so that we see raw
# lines rather than "cleaned" lines.
CheckForBadCharacters(filename, lines, error)
CheckForNewlineAtEOF(filename, lines, error)
def ProcessConfigOverrides(filename):
abs_filename = os.path.abspath(filename)
cfg_filters = []
keep_looking = True
while keep_looking:
abs_path, base_name = os.path.split(abs_filename)
if not base_name:
break # Reached the root directory.
cfg_file = os.path.join(abs_path, "CPPLINT.cfg")
abs_filename = abs_path
if not os.path.isfile(cfg_file):
continue
try:
with open(cfg_file) as file_handle:
for line in file_handle:
line, _, _ = line.partition('#') # Remove comments.
if not line.strip():
continue
name, _, val = line.partition('=')
name = name.strip()
val = val.strip()
if name == 'set noparent':
keep_looking = False
elif name == 'filter':
cfg_filters.append(val)
elif name == 'exclude_files':
# When matching exclude_files pattern, use the base_name of
# the current file name or the directory name we are processing.
# For example, if we are checking for lint errors in /foo/bar/baz.cc
# and we found the .cfg file at /foo/CPPLINT.cfg, then the config
# file's "exclude_files" filter is meant to be checked against "bar"
# and not "baz" nor "bar/baz.cc".
if base_name:
pattern = re.compile(val)
if pattern.match(base_name):
sys.stderr.write('Ignoring "%s": file excluded by "%s". '
'File path component "%s" matches '
'pattern "%s"\n' %
(filename, cfg_file, base_name, val))
return False
elif name == 'linelength':
global _line_length
try:
_line_length = int(val)
except ValueError:
sys.stderr.write('Line length must be numeric.')
else:
sys.stderr.write(
'Invalid configuration option (%s) in file %s\n' %
(name, cfg_file))
except IOError:
sys.stderr.write(
"Skipping config file '%s': Can't open for reading\n" % cfg_file)
keep_looking = False
# Apply all the accumulated filters in reverse order (top-level directory
# config options having the least priority).
for filter in reversed(cfg_filters):
_AddFilters(filter)
return True
def ProcessFile(filename, vlevel, extra_check_functions=[]):
_SetVerboseLevel(vlevel)
_BackupFilters()
if not ProcessConfigOverrides(filename):
_RestoreFilters()
return
lf_lines = []
crlf_lines = []
try:
# Support the UNIX convention of using "-" for stdin. Note that
# we are not opening the file with universal newline support
# (which codecs doesn't support anyway), so the resulting lines do
# contain trailing '\r' characters if we are reading a file that
# has CRLF endings.
# If after the split a trailing '\r' is present, it is removed
# below.
if filename == '-':
lines = codecs.StreamReaderWriter(sys.stdin,
codecs.getreader('utf8'),
codecs.getwriter('utf8'),
'replace').read().split('\n')
else:
lines = codecs.open(filename, 'r', 'utf8', 'replace').read().split('\n')
# Remove trailing '\r'.
# The -1 accounts for the extra trailing blank line we get from split()
for linenum in range(len(lines) - 1):
if lines[linenum].endswith('\r'):
lines[linenum] = lines[linenum].rstrip('\r')
crlf_lines.append(linenum + 1)
else:
lf_lines.append(linenum + 1)
except IOError:
sys.stderr.write(
"Skipping input '%s': Can't open for reading\n" % filename)
_RestoreFilters()
return
# Note, if no dot is found, this will give the entire filename as the ext.
file_extension = filename[filename.rfind('.') + 1:]
# When reading from stdin, the extension is unknown, so no cpplint tests
# should rely on the extension.
if filename != '-' and file_extension not in _valid_extensions:
sys.stderr.write('Ignoring %s; not a valid file name '
'(%s)\n' % (filename, ', '.join(_valid_extensions)))
else:
ProcessFileData(filename, file_extension, lines, Error,
extra_check_functions)
# If end-of-line sequences are a mix of LF and CR-LF, issue
# warnings on the lines with CR.
#
# Don't issue any warnings if all lines are uniformly LF or CR-LF,
# since critique can handle these just fine, and the style guide
# doesn't dictate a particular end of line sequence.
#
# We can't depend on os.linesep to determine what the desired
# end-of-line sequence should be, since that will return the
# server-side end-of-line sequence.
if lf_lines and crlf_lines:
# Warn on every line with CR. An alternative approach might be to
# check whether the file is mostly CRLF or just LF, and warn on the
# minority, we bias toward LF here since most tools prefer LF.
for linenum in crlf_lines:
Error(filename, linenum, 'whitespace/newline', 1,
'Unexpected \\r (^M) found; better to use only \\n')
sys.stderr.write('Done processing %s\n' % filename)
_RestoreFilters()
def PrintUsage(message):
sys.stderr.write(_USAGE)
if message:
sys.exit('\nFATAL ERROR: ' + message)
else:
sys.exit(1)
def PrintCategories():
sys.stderr.write(''.join(' %s\n' % cat for cat in _ERROR_CATEGORIES))
sys.exit(0)
def ParseArguments(args):
try:
(opts, filenames) = getopt.getopt(args, '', ['help', 'output=', 'verbose=',
'counting=',
'filter=',
'root=',
'linelength=',
'extensions='])
except getopt.GetoptError:
PrintUsage('Invalid arguments.')
verbosity = _VerboseLevel()
output_format = _OutputFormat()
filters = ''
counting_style = ''
for (opt, val) in opts:
if opt == '--help':
PrintUsage(None)
elif opt == '--output':
if val not in ('emacs', 'vs7', 'eclipse'):
PrintUsage('The only allowed output formats are emacs, vs7 and eclipse.')
output_format = val
elif opt == '--verbose':
verbosity = int(val)
elif opt == '--filter':
filters = val
if not filters:
PrintCategories()
elif opt == '--counting':
if val not in ('total', 'toplevel', 'detailed'):
PrintUsage('Valid counting options are total, toplevel, and detailed')
counting_style = val
elif opt == '--root':
global _root
_root = val
elif opt == '--linelength':
global _line_length
try:
_line_length = int(val)
except ValueError:
PrintUsage('Line length must be digits.')
elif opt == '--extensions':
global _valid_extensions
try:
_valid_extensions = set(val.split(','))
except ValueError:
PrintUsage('Extensions must be comma seperated list.')
if not filenames:
PrintUsage('No files were specified.')
_SetOutputFormat(output_format)
_SetVerboseLevel(verbosity)
_SetFilters(filters)
_SetCountingStyle(counting_style)
return filenames
def main():
filenames = ParseArguments(sys.argv[1:])
# Change stderr to write with replacement characters so we don't die
# if we try to print something containing non-ASCII characters.
sys.stderr = codecs.StreamReaderWriter(sys.stderr,
codecs.getreader('utf8'),
codecs.getwriter('utf8'),
'replace')
_cpplint_state.ResetErrorCounts()
for filename in filenames:
ProcessFile(filename, _cpplint_state.verbose_level)
_cpplint_state.PrintErrorCounts()
sys.exit(_cpplint_state.error_count > 0)
if __name__ == '__main__':
main()
| true | true |
f7397f66da06a94adfa8579e5135288d2a19efe2 | 425 | py | Python | src/kol/request/TakeMeatFromClosetRequest.py | danheath/temppykol | 7f9621b44df9f9d2d9fc0a5b2a06db116b9ccfab | [
"BSD-3-Clause"
] | 19 | 2015-02-16T08:30:49.000Z | 2020-05-01T06:06:33.000Z | src/kol/request/TakeMeatFromClosetRequest.py | danheath/temppykol | 7f9621b44df9f9d2d9fc0a5b2a06db116b9ccfab | [
"BSD-3-Clause"
] | 5 | 2015-01-13T23:01:54.000Z | 2016-11-30T15:23:43.000Z | src/kol/request/TakeMeatFromClosetRequest.py | danheath/temppykol | 7f9621b44df9f9d2d9fc0a5b2a06db116b9ccfab | [
"BSD-3-Clause"
] | 19 | 2015-05-28T09:36:19.000Z | 2022-03-15T23:19:29.000Z | from GenericRequest import GenericRequest
class TakeMeatFromClosetRequest(GenericRequest):
"Adds meat to the player's closet."
def __init__(self, session, meat=""):
super(TakeMeatFromClosetRequest, self).__init__(session)
self.url = session.serverURL + "closet.php"
self.requestData["pwd"] = session.pwd
self.requestData["action"] = "takemeat"
self.requestData["amt"] = meat
| 35.416667 | 64 | 0.691765 | from GenericRequest import GenericRequest
class TakeMeatFromClosetRequest(GenericRequest):
def __init__(self, session, meat=""):
super(TakeMeatFromClosetRequest, self).__init__(session)
self.url = session.serverURL + "closet.php"
self.requestData["pwd"] = session.pwd
self.requestData["action"] = "takemeat"
self.requestData["amt"] = meat
| true | true |
f7397fbb415af0ad08cbdf9fc88f1effdda80b6b | 765 | py | Python | scripts/init-all-courses.py | osmaelo/university-setup | 16ec634e1c64dac4cebdd5c071b22907ec6f8848 | [
"MIT"
] | 245 | 2019-09-15T19:17:47.000Z | 2022-03-29T06:09:52.000Z | scripts/init-all-courses.py | osmaelo/university-setup | 16ec634e1c64dac4cebdd5c071b22907ec6f8848 | [
"MIT"
] | 9 | 2019-12-24T20:02:52.000Z | 2020-11-24T12:25:02.000Z | scripts/init-all-courses.py | osmaelo/university-setup | 16ec634e1c64dac4cebdd5c071b22907ec6f8848 | [
"MIT"
] | 76 | 2019-09-16T00:10:24.000Z | 2022-02-19T01:27:48.000Z | #!/bin/python3
from courses import Courses
for course in Courses():
lectures = course.lectures
course_title = lectures.course.info["title"]
lines = [r'\documentclass[a4paper]{article}',
r'\input{../preamble.tex}',
fr'\title{{{course_title}}}',
r'\begin{document}',
r' \maketitle',
r' \tableofcontents',
fr' % start lectures',
fr' % end lectures',
r'\end{document}'
]
lectures.master_file.touch()
lectures.master_file.write_text('\n'.join(lines))
(lectures.root / 'master.tex.latexmain').touch()
(lectures.root / 'figures').mkdir(exist_ok=True)
| 36.428571 | 57 | 0.512418 |
from courses import Courses
for course in Courses():
lectures = course.lectures
course_title = lectures.course.info["title"]
lines = [r'\documentclass[a4paper]{article}',
r'\input{../preamble.tex}',
fr'\title{{{course_title}}}',
r'\begin{document}',
r' \maketitle',
r' \tableofcontents',
fr' % start lectures',
fr' % end lectures',
r'\end{document}'
]
lectures.master_file.touch()
lectures.master_file.write_text('\n'.join(lines))
(lectures.root / 'master.tex.latexmain').touch()
(lectures.root / 'figures').mkdir(exist_ok=True)
| true | true |
f73980d50f3fd61ff3f286d51df51ca3393b6781 | 1,085 | py | Python | utils/kaggle.py | AdityaSidharta/kaggle_pneumonia | d4c2f257257aff3a30f0742f534fcf8975fc0815 | [
"MIT"
] | null | null | null | utils/kaggle.py | AdityaSidharta/kaggle_pneumonia | d4c2f257257aff3a30f0742f534fcf8975fc0815 | [
"MIT"
] | null | null | null | utils/kaggle.py | AdityaSidharta/kaggle_pneumonia | d4c2f257257aff3a30f0742f534fcf8975fc0815 | [
"MIT"
] | 1 | 2018-10-10T07:16:24.000Z | 2018-10-10T07:16:24.000Z | def to_predictionstring(x):
prob, x, y, width, height = x
return "{} {} {} {} {}".format(prob, x, y, width, height)
def create_predict_df(label_predict_df, bb_predict_df):
predict_df = label_predict_df.merge(bb_predict_df, how="left", on="name")
predict_df["patientId"] = predict_df["name"]
predict_df["x"] = predict_df["label"].apply(lambda x: x.split(" ")[0])
predict_df["y"] = predict_df["label"].apply(lambda x: x.split(" ")[1])
predict_df["width"] = predict_df["label"].apply(lambda x: x.split(" ")[2])
predict_df["height"] = predict_df["label"].apply(lambda x: x.split(" ")[3])
predict_df["Target"] = predict_df["prob"].apply(lambda x: 1. if x >= 0.5 else 0.)
predict_df = predict_df[
["patientId", "Target", "prob", "x", "y", "width", "height"]
]
return predict_df
def create_kaggle_df(predict_df):
predict_df["PredictionString"] = predict_df[
["prob", "x", "y", "width", "height"]
].apply(to_predictionstring, axis=1)
kaggle_df = predict_df[["patientId", "PredictionString"]]
return kaggle_df
| 41.730769 | 85 | 0.635023 | def to_predictionstring(x):
prob, x, y, width, height = x
return "{} {} {} {} {}".format(prob, x, y, width, height)
def create_predict_df(label_predict_df, bb_predict_df):
predict_df = label_predict_df.merge(bb_predict_df, how="left", on="name")
predict_df["patientId"] = predict_df["name"]
predict_df["x"] = predict_df["label"].apply(lambda x: x.split(" ")[0])
predict_df["y"] = predict_df["label"].apply(lambda x: x.split(" ")[1])
predict_df["width"] = predict_df["label"].apply(lambda x: x.split(" ")[2])
predict_df["height"] = predict_df["label"].apply(lambda x: x.split(" ")[3])
predict_df["Target"] = predict_df["prob"].apply(lambda x: 1. if x >= 0.5 else 0.)
predict_df = predict_df[
["patientId", "Target", "prob", "x", "y", "width", "height"]
]
return predict_df
def create_kaggle_df(predict_df):
predict_df["PredictionString"] = predict_df[
["prob", "x", "y", "width", "height"]
].apply(to_predictionstring, axis=1)
kaggle_df = predict_df[["patientId", "PredictionString"]]
return kaggle_df
| true | true |
f739810c170f625cf1a359eda8729aeef7377da5 | 2,720 | py | Python | Python Programs & Projects/Queue.py | EdgarCastillo101/Python | 3d47aaaa911def287e3b7a02f8c637e87539ffe7 | [
"MIT"
] | 2 | 2021-05-25T16:17:46.000Z | 2021-05-25T16:17:47.000Z | Python Programs & Projects/Queue.py | EdgarCastillo101/Python | 3d47aaaa911def287e3b7a02f8c637e87539ffe7 | [
"MIT"
] | null | null | null | Python Programs & Projects/Queue.py | EdgarCastillo101/Python | 3d47aaaa911def287e3b7a02f8c637e87539ffe7 | [
"MIT"
] | null | null | null | # Queue
# Queue is a FIFO data structure - - first-in, first-out.
# Deque is a double-ended queue, but we can use it for our queue.
# We use append() to enqueue an item, and popleft() to dequeue an item.
# See Python docs for deque.
"""
from collections import deque
my_queue = deque()
my_queue.append(5)
my_queue.append(10)
print(my_queue)
print(my_queue.popleft())
"""
# Python Single-ended Quque wrapper class using deque
from collections import deque
class Queue():
def __init__(self):
self.queue = deque() # inputter
self.size = 0
def enqueue(self, item):
self.queue.append(item) # adds element
self.size += 1
def dequeue(self, item):
if self.size > 0:
self.size -= 1
return self.queue.popleft() # removes element
else:
return None
def peek(self):
if self.size > 0:
ret_val = self.queue.popleft() # peek/look
queue.appendleft(ret_val)
return ret_val
else:
return None
def get_size(self):
return self.size # outputter
# Maxheap
class MaxHeap:
def __init__(self, items=[]):
super().__init__()
self.heap = [0]
for item in items:
self.heap.append(item)
self.__floatUp(len(self.heap) - 1)
def push(self, data):
self.heap.append(data)
self.__floatUp(len(self.heap) - 1)
def peek(self):
if self.heap[1]:
return self.heap[1]
else:
return False
def pop(self):
if len(self.heap) > 2:
self.__swap(1, len(self.heap) - 1)
max = self.heap.pop()
self.__bubbleDown(1)
elif len(self.heap) == 2:
max = self.heap.pop()
else:
max = False
return max
def __swap(self, i, j):
self.heap[i], self.heap[j] = self.heap[j], self.heap[i]
def __floatUp(self, index):
parent = index//2
if index <= 1:
return
elif self.heap[index] > self.heap[parent]:
self.__swap(index, parent)
self.__floatUp(parent)
def __bubbleDown(self, index):
left = index * 2
right = index * 2 + 1
largest = index
if len(self.heap) > left and self.heap[largest] < self.heap[left]:
largest = left
if len(self.heap) > right and self.heap[largest] < self.heap[right]:
largest = right
if largest != index:
self.__swap(index, largest)
self.__bubbleDown(largest)
def __str__(self):
return str(self.heap)
m = MaxHeap([95, 3, 21])
m.push(10)
print(m)
print(m.pop())
print(m.peek())
| 24.727273 | 76 | 0.557721 |
from collections import deque
class Queue():
def __init__(self):
self.queue = deque()
self.size = 0
def enqueue(self, item):
self.queue.append(item)
self.size += 1
def dequeue(self, item):
if self.size > 0:
self.size -= 1
return self.queue.popleft()
else:
return None
def peek(self):
if self.size > 0:
ret_val = self.queue.popleft()
queue.appendleft(ret_val)
return ret_val
else:
return None
def get_size(self):
return self.size
class MaxHeap:
def __init__(self, items=[]):
super().__init__()
self.heap = [0]
for item in items:
self.heap.append(item)
self.__floatUp(len(self.heap) - 1)
def push(self, data):
self.heap.append(data)
self.__floatUp(len(self.heap) - 1)
def peek(self):
if self.heap[1]:
return self.heap[1]
else:
return False
def pop(self):
if len(self.heap) > 2:
self.__swap(1, len(self.heap) - 1)
max = self.heap.pop()
self.__bubbleDown(1)
elif len(self.heap) == 2:
max = self.heap.pop()
else:
max = False
return max
def __swap(self, i, j):
self.heap[i], self.heap[j] = self.heap[j], self.heap[i]
def __floatUp(self, index):
parent = index//2
if index <= 1:
return
elif self.heap[index] > self.heap[parent]:
self.__swap(index, parent)
self.__floatUp(parent)
def __bubbleDown(self, index):
left = index * 2
right = index * 2 + 1
largest = index
if len(self.heap) > left and self.heap[largest] < self.heap[left]:
largest = left
if len(self.heap) > right and self.heap[largest] < self.heap[right]:
largest = right
if largest != index:
self.__swap(index, largest)
self.__bubbleDown(largest)
def __str__(self):
return str(self.heap)
m = MaxHeap([95, 3, 21])
m.push(10)
print(m)
print(m.pop())
print(m.peek())
| true | true |
f7398156ae31a52e060987da62538da487c61775 | 3,448 | py | Python | selenium_egencia.py | ghandic/egenica | 000956721115d190d5e2af2ee0a01ac3f84ff55c | [
"MIT"
] | null | null | null | selenium_egencia.py | ghandic/egenica | 000956721115d190d5e2af2ee0a01ac3f84ff55c | [
"MIT"
] | null | null | null | selenium_egencia.py | ghandic/egenica | 000956721115d190d5e2af2ee0a01ac3f84ff55c | [
"MIT"
] | null | null | null | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
class Egencia(object):
def __init__(self, credentials):
self.driver = webdriver.Remote('http://localhost:4444/wd/hub', webdriver.DesiredCapabilities.CHROME)
self.__login(credentials)
self.standard_wait_time = 10
def close(self):
self.driver.quit()
def __login(self, credentials):
# Go to the home page to log in
self.driver.get("https://www.egencia.com/public/uk/")
self.driver.find_element_by_id("login").click()
self.driver.find_element_by_id("userName").send_keys(credentials["email"])
self.driver.find_element_by_id("password").send_keys(credentials["password"])
self.driver.find_element_by_id("authLoginSubmit").click()
def get_double_points_offers(self, checkin, checkout, brand, lat, lon, maxspend):
double_points_offers = []
self.driver.get("https://www.egencia.co.uk/hotels/search?" + \
"lon=" + lon + "&lat=" + lat + \
"&start_date=" + checkin + \
"&end_date=" + checkout + \
"&hotel_name=" + brand)
time.sleep(self.standard_wait_time)
try:
self.driver.find_elements_by_class_name("modal-close")[0].click()
except:
pass
time.sleep(self.standard_wait_time)
available_hotels = self.driver.find_elements_by_class_name("hotel-available")
for hotel in available_hotels:
# Find the hotel name and make an empty entry
hotel_name = hotel.find_elements_by_class_name("hotel-name-for-coworker-bookings")[0].get_attribute('innerHTML')
#print("Hotel: {}".format(hotel_name))
hotels_dp_offers = {"name": hotel_name, "offers":[]}
# Click on the hotel
hotel.click()
time.sleep(self.standard_wait_time)
# Show all of the rates
self.driver.find_element_by_id("hotel-details-view-all-rates-toggle").click()
time.sleep(self.standard_wait_time)
# Look through all the rates for a double points offer within budget
for rate in self.driver.find_elements_by_class_name("rate-tile"):
dp_offers = rate.find_elements_by_xpath('.//span[contains(text(), "2 X Points")]')
dp_rate = float(rate.find_elements_by_class_name('price-details')[0].get_attribute('innerHTML').replace('£', ''))
if len(dp_offers) > 0 and dp_rate <= maxspend:
description = dp_offers[0].get_attribute('innerHTML')
hotels_dp_offers["offers"].append({"description": description, "rate": dp_rate})
# If there are offers then add to the payload
if len(hotels_dp_offers["offers"]) > 0:
double_points_offers.append(hotels_dp_offers)
# Return to view all the hotels
self.driver.find_element_by_id("hotel-details-close-button").click()
return double_points_offers
def make_pretty_message(offers):
html = ""
for hotel in offers:
html += "*{}*\n".format(hotel['name'])
for offer in hotel["offers"]:
html += "_{}\n".format(offer["description"])
html += "£{0:.2f}_\n".format(offer["rate"])
html += "\n"
return html
| 37.478261 | 129 | 0.607889 | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
class Egencia(object):
def __init__(self, credentials):
self.driver = webdriver.Remote('http://localhost:4444/wd/hub', webdriver.DesiredCapabilities.CHROME)
self.__login(credentials)
self.standard_wait_time = 10
def close(self):
self.driver.quit()
def __login(self, credentials):
self.driver.get("https://www.egencia.com/public/uk/")
self.driver.find_element_by_id("login").click()
self.driver.find_element_by_id("userName").send_keys(credentials["email"])
self.driver.find_element_by_id("password").send_keys(credentials["password"])
self.driver.find_element_by_id("authLoginSubmit").click()
def get_double_points_offers(self, checkin, checkout, brand, lat, lon, maxspend):
double_points_offers = []
self.driver.get("https://www.egencia.co.uk/hotels/search?" + \
"lon=" + lon + "&lat=" + lat + \
"&start_date=" + checkin + \
"&end_date=" + checkout + \
"&hotel_name=" + brand)
time.sleep(self.standard_wait_time)
try:
self.driver.find_elements_by_class_name("modal-close")[0].click()
except:
pass
time.sleep(self.standard_wait_time)
available_hotels = self.driver.find_elements_by_class_name("hotel-available")
for hotel in available_hotels:
hotel_name = hotel.find_elements_by_class_name("hotel-name-for-coworker-bookings")[0].get_attribute('innerHTML')
hotels_dp_offers = {"name": hotel_name, "offers":[]}
hotel.click()
time.sleep(self.standard_wait_time)
self.driver.find_element_by_id("hotel-details-view-all-rates-toggle").click()
time.sleep(self.standard_wait_time)
for rate in self.driver.find_elements_by_class_name("rate-tile"):
dp_offers = rate.find_elements_by_xpath('.//span[contains(text(), "2 X Points")]')
dp_rate = float(rate.find_elements_by_class_name('price-details')[0].get_attribute('innerHTML').replace('£', ''))
if len(dp_offers) > 0 and dp_rate <= maxspend:
description = dp_offers[0].get_attribute('innerHTML')
hotels_dp_offers["offers"].append({"description": description, "rate": dp_rate})
if len(hotels_dp_offers["offers"]) > 0:
double_points_offers.append(hotels_dp_offers)
self.driver.find_element_by_id("hotel-details-close-button").click()
return double_points_offers
def make_pretty_message(offers):
html = ""
for hotel in offers:
html += "*{}*\n".format(hotel['name'])
for offer in hotel["offers"]:
html += "_{}\n".format(offer["description"])
html += "£{0:.2f}_\n".format(offer["rate"])
html += "\n"
return html
| true | true |
f739832932d30cf4fcf2d8ee9fb77c4e75b99176 | 5,823 | py | Python | episcanpy/preprocessing/_readimpute.py | mruffalo/epiScanpy | bcb86347d2b8451c384f97162625c8d5efb27ffc | [
"BSD-3-Clause"
] | 96 | 2019-05-25T17:41:13.000Z | 2022-02-28T10:29:23.000Z | episcanpy/preprocessing/_readimpute.py | mruffalo/epiScanpy | bcb86347d2b8451c384f97162625c8d5efb27ffc | [
"BSD-3-Clause"
] | 43 | 2019-07-12T03:12:51.000Z | 2022-03-30T13:07:19.000Z | episcanpy/preprocessing/_readimpute.py | mruffalo/epiScanpy | bcb86347d2b8451c384f97162625c8d5efb27ffc | [
"BSD-3-Clause"
] | 28 | 2019-03-28T16:40:52.000Z | 2022-03-16T16:12:40.000Z | import numpy as np
import anndata as ad
import pandas as pd
def load_met_noimput(matrix_file, path='', save=False):
"""
read the raw count matrix and convert it into an AnnData object.
write down the matrix as .h5ad (AnnData object) if save = True.
Return AnnData object
"""
matrix = []
cell_names = []
feature_names = []
with open(path+matrix_file) as f:
line = f.readline()[:-2].split('\t')
if line[0] == 'sample_name':
feature_names = line[1:]
else:
matrix.append(line[1:])
cell_names.append(line[0])
if matrix == []:
line = f.readline()[:-2].split('\t')
matrix.append(line[1:])
cell_names.append(line[0])
for line in f:
line = line[:-2].split('\t')
matrix.append(line[1:])
cell_names.append(line[0])
matrix = np.array(matrix)
if feature_names != []:
adata = ad.AnnData(matrix, obs=pd.DataFrame(index=cell_names), var=pd.DataFrame(index=feature_names))
else:
adata = ad.AnnData(matrix, obs=pd.DataFrame(index=cell_names))
adata.uns['omic'] = 'methylation'
adata.uns['imputation'] = 'no_imputation'
if save:
adata.write("".join([".".split(matrix_file)[0],'.h5ad']))
return(adata)
def imputation_met(adata, number_cell_covered=10, imputation_value='mean', save=None, copy=False):
"""
Impute missing values in methyaltion level matrices. The imputsation is based on the average
methylation value of the given variable.
It also filter out variables that are covered in an unsufficient number of cells in order to
reduce the feature space to meaningful variables and discard potential coverage biases.
Parameters
----------
adata: AnnData object containing 'nan'
number_cell_covered: minimum number of cells to be covered in order to retain a variable
imputation_value: imputation of the missing value can be made either on the mean or the median
Return
------
Return a new AnnData object
"""
# This step need to be sped up and could be multithread.
# Only the mean available for now. And only the minimum number of cells covered and not the variety of the
# methylation levels
# also, it odes not return the variable annoations and force to add 2 values
old_features = adata.var_names.tolist()
new_matrix = []
new_features_name = []
means = []
medians = []
feat_nb = 0
length1 = len(adata.X[0,:])
length2 = len(adata.X[:,0])
adata.obs['coverage_cells'] = [length1 - np.isnan(line).sum() for line in adata.X]
adata.obs['mean_cell_methylation'] = [np.nansum(line)/length1 for line in adata.X]
adata.var['coverage_feature'] = [length2 - np.isnan(line).sum() for line in adata.X.T]
adata.var['mean_feature_methylation'] = [np.nansum(line)/length2 for line in adata.X.T]
adata2 = adata[:, adata.var['coverage_feature']>=number_cell_covered].copy()
for index in range(len(adata2.var_names.tolist())):
adata2.X[:,index] = np.nan_to_num(adata2.X[:,index], nan=adata2.var['mean_feature_methylation'][index])
if save!= None:
adata2.write(save.rstrip('.h5ad')+'.h5ad')
if copy==False:
adata = adata2.copy()
else:
return(adata2)
def readandimputematrix(file_name, min_coverage=1):
"""
Temporary function to load and impute methyaltion count matrix into an AnnData object
Parameters
----------
file_name : file name to read and load
min_coverage : minimum number of cells covered for which we keep and impute a variable
Returns
-------
adata : :class:`~anndata.AnnData`
Annotated data matrix.
"""
with open(file_name) as f:
file = f.readlines()
# separate annotation from data
head_var = file[0]
head_var = head_var.split('\t')
# Then, extract the sample names
sample_names = []
data_raw = []
for l in file[1:]:
l = l.split('\t')
sample_names.append(l[0])
data_raw.append(l[1:])
# clear memory of useless variables
del file
##########################################
# now, removing empty columns
empties = []
partial = []
full = []
for index in range(1, len(data_raw[0])):
column = [element[index] for element in data_raw]
if len(list(set(column))) == 1:
empties.append(index)
elif len(list(set(column))) <= min_coverage:
partial.append(index)
else:
full.append(index)
##########################################
intermed_matrix = []
name_windows_covered = []
# let's remove the compltetly uninformative columns
for index in range(1, len(head_var[1:])):
if index in full:
intermed_matrix.append([element[index] for element in data_raw])
name_windows_covered.append(head_var[index])
########################################
# imputing values.
imputed_matrix = []
for row in intermed_matrix:
imputed_row = []
if "nan" in row:
mean = np.mean([float(e) for e in row if e != "nan"])
for element in row:
if element == "nan":
imputed_row.append(str(mean))
else:
imputed_row.append(element)
imputed_matrix.append(imputed_row)
else:
imputed_matrix.append(row)
imputed_matrix = np.matrix(imputed_matrix).transpose()
return(ad.AnnData(imputed_matrix, obs=pd.DataFrame(index=sample_names), var=pd.DataFrame(index=name_windows_covered)))
#return(imputed_matrix, sample_names, name_windows_covered)
| 32.713483 | 122 | 0.604499 | import numpy as np
import anndata as ad
import pandas as pd
def load_met_noimput(matrix_file, path='', save=False):
matrix = []
cell_names = []
feature_names = []
with open(path+matrix_file) as f:
line = f.readline()[:-2].split('\t')
if line[0] == 'sample_name':
feature_names = line[1:]
else:
matrix.append(line[1:])
cell_names.append(line[0])
if matrix == []:
line = f.readline()[:-2].split('\t')
matrix.append(line[1:])
cell_names.append(line[0])
for line in f:
line = line[:-2].split('\t')
matrix.append(line[1:])
cell_names.append(line[0])
matrix = np.array(matrix)
if feature_names != []:
adata = ad.AnnData(matrix, obs=pd.DataFrame(index=cell_names), var=pd.DataFrame(index=feature_names))
else:
adata = ad.AnnData(matrix, obs=pd.DataFrame(index=cell_names))
adata.uns['omic'] = 'methylation'
adata.uns['imputation'] = 'no_imputation'
if save:
adata.write("".join([".".split(matrix_file)[0],'.h5ad']))
return(adata)
def imputation_met(adata, number_cell_covered=10, imputation_value='mean', save=None, copy=False):
old_features = adata.var_names.tolist()
new_matrix = []
new_features_name = []
means = []
medians = []
feat_nb = 0
length1 = len(adata.X[0,:])
length2 = len(adata.X[:,0])
adata.obs['coverage_cells'] = [length1 - np.isnan(line).sum() for line in adata.X]
adata.obs['mean_cell_methylation'] = [np.nansum(line)/length1 for line in adata.X]
adata.var['coverage_feature'] = [length2 - np.isnan(line).sum() for line in adata.X.T]
adata.var['mean_feature_methylation'] = [np.nansum(line)/length2 for line in adata.X.T]
adata2 = adata[:, adata.var['coverage_feature']>=number_cell_covered].copy()
for index in range(len(adata2.var_names.tolist())):
adata2.X[:,index] = np.nan_to_num(adata2.X[:,index], nan=adata2.var['mean_feature_methylation'][index])
if save!= None:
adata2.write(save.rstrip('.h5ad')+'.h5ad')
if copy==False:
adata = adata2.copy()
else:
return(adata2)
def readandimputematrix(file_name, min_coverage=1):
with open(file_name) as f:
file = f.readlines()
head_var = file[0]
head_var = head_var.split('\t')
sample_names = []
data_raw = []
for l in file[1:]:
l = l.split('\t')
sample_names.append(l[0])
data_raw.append(l[1:])
del file
| true | true |
f73984c433d98d58cae5e8d8f3b8afeceb366e7d | 18,890 | py | Python | nuitka/tools/quality/autoformat/Autoformat.py | lermana/Nuitka | 6e31e323278fff9fe6c1a99e52d1017129e4120a | [
"Apache-2.0"
] | null | null | null | nuitka/tools/quality/autoformat/Autoformat.py | lermana/Nuitka | 6e31e323278fff9fe6c1a99e52d1017129e4120a | [
"Apache-2.0"
] | null | null | null | nuitka/tools/quality/autoformat/Autoformat.py | lermana/Nuitka | 6e31e323278fff9fe6c1a99e52d1017129e4120a | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright 2021, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Tool to automatically format source code in Nuitka style.
"""
import os
import re
import subprocess
import sys
from nuitka.tools.quality.Git import (
getFileHashContent,
putFileHashContent,
updateFileIndex,
updateWorkingFile,
)
from nuitka.tools.quality.ScanSources import isPythonFile
from nuitka.Tracing import general, my_print
from nuitka.utils.Execution import (
NuitkaCalledProcessError,
check_call,
check_output,
getExecutablePath,
getNullOutput,
withEnvironmentPathAdded,
)
from nuitka.utils.FileOperations import (
getFileContentByLine,
getFileContents,
putTextFileContents,
renameFile,
withPreserveFileMode,
)
from nuitka.utils.Utils import getOS
def cleanupWindowsNewlines(filename):
"""Remove Windows new-lines from a file.
Simple enough to not depend on external binary and used by
the doctest extractions of the CPython test suites.
"""
with open(filename, "rb") as f:
source_code = f.read()
updated_code = source_code.replace(b"\r\n", b"\n")
updated_code = updated_code.replace(b"\n\r", b"\n")
# Smuggle consistency replacement in here.
if "Autoformat.py" not in filename:
updated_code = updated_code.replace(b'.decode("utf-8")', b'.decode("utf8")')
updated_code = updated_code.replace(b'.encode("utf-8")', b'.encode("utf8")')
if updated_code != source_code:
with open(filename, "wb") as out_file:
out_file.write(updated_code)
def _cleanupTrailingWhitespace(filename):
"""Remove trailing white spaces from a file."""
source_lines = list(getFileContentByLine(filename))
clean_lines = [line.rstrip().replace("\t", " ") for line in source_lines]
while clean_lines and clean_lines[-1] == "":
del clean_lines[-1]
if clean_lines != source_lines:
putTextFileContents(filename, contents=clean_lines)
def _getRequirementsContentsByLine():
return getFileContentByLine(
os.path.join(
os.path.dirname(__file__), "..", "..", "..", "..", "requirements-devel.txt"
)
)
def _getRequiredVersion(tool):
for line in _getRequirementsContentsByLine():
if line.startswith(tool + " =="):
return line.split()[2]
sys.exit("Error, cannot find %r in requirements-devel.txt" % tool)
def _checkRequiredVersion(tool, tool_call):
required_version = _getRequiredVersion(tool)
for line in _getRequirementsContentsByLine():
if line.startswith(tool + " =="):
required_version = line.split()[2]
break
else:
sys.exit("Error, cannot find %r in requirements-devel.txt" % tool)
tool_call = list(tool_call) + ["--version"]
try:
version_output = check_output(tool_call)
except NuitkaCalledProcessError:
return False, "failed to execute"
if str is not bytes:
version_output = version_output.decode("utf8")
for line in version_output.splitlines():
line = line.strip()
if line.startswith(
("black, version", "python -m black, version", "__main__.py, version ")
):
actual_version = line.split()[-1]
break
if line.startswith("VERSION "):
actual_version = line.split()[-1]
break
if line.startswith("rstfmt "):
actual_version = line.split()[-1]
break
else:
sys.exit(
"Error, couldn't determine version output of %r (%r)"
% (tool, " ".join(tool_call))
)
message = "Version of %r via %r is required to be %r and not %r." % (
tool,
" ".join(tool_call),
required_version,
actual_version,
)
return required_version == actual_version, message
def _updateCommentNode(comment_node):
if "pylint:" in str(comment_node.value):
def replacer(part):
def renamer(pylint_token):
# pylint: disable=too-many-branches,too-many-return-statements
if pylint_token == "E0602":
return "undefined-variable"
elif pylint_token in ("E0401", "F0401"):
return "import-error"
elif pylint_token == "E1102":
return "not-callable"
elif pylint_token == "E1133":
return " not-an-iterable"
elif pylint_token == "E1128":
return "assignment-from-none"
# Save line length for this until isort is better at long lines.
elif pylint_token == "useless-suppression":
return "I0021"
# elif pylint_token == "I0021":
# return "useless-suppression"
elif pylint_token == "R0911":
return "too-many-return-statements"
elif pylint_token == "R0201":
return "no-self-use"
elif pylint_token == "R0902":
return "too-many-instance-attributes"
elif pylint_token == "R0912":
return "too-many-branches"
elif pylint_token == "R0914":
return "too-many-locals"
elif pylint_token == "R0915":
return "too-many-statements"
elif pylint_token == "W0123":
return "eval-used"
elif pylint_token == "W0603":
return "global-statement"
elif pylint_token == "W0613":
return "unused-argument"
elif pylint_token == "W0622":
return "redefined-builtin"
elif pylint_token == "W0703":
return "broad-except"
else:
return pylint_token
return part.group(1) + ",".join(
sorted(renamer(token) for token in part.group(2).split(",") if token)
)
new_value = str(comment_node.value).replace("pylint:disable", "pylint: disable")
new_value = re.sub(r"(pylint\: disable=)(.*)", replacer, new_value, flags=re.M)
comment_node.value = new_value
def _cleanupPyLintComments(filename, abort):
from redbaron import ( # pylint: disable=I0021,import-error,no-name-in-module
RedBaron,
)
old_code = getFileContents(filename)
# Baron does assertions too, and all kinds of strange errors, pylint: disable=broad-except
try:
red = RedBaron(old_code)
except Exception:
if abort:
raise
return
for node in red.find_all("CommentNode"):
try:
_updateCommentNode(node)
except Exception:
my_print("Problem with", node)
node.help(deep=True, with_formatting=True)
raise
new_code = red.dumps()
if new_code != old_code:
putTextFileContents(filename, contents=red.dumps())
def _cleanupImportRelative(filename):
"""Make imports of Nuitka package when possible."""
# Avoid doing it for "__main__" packages, because for those the Visual Code
# IDE doesn't like it and it may not run
if os.path.basename(filename) == "__main__.py.tmp":
return
package_name = os.path.dirname(filename).replace(os.path.sep, ".")
# Make imports local if possible.
if not package_name.startswith("nuitka."):
return
source_code = getFileContents(filename)
updated_code = re.sub(
r"from %s import" % package_name, "from . import", source_code
)
updated_code = re.sub(r"from %s\." % package_name, "from .", source_code)
if source_code != updated_code:
putTextFileContents(filename, contents=updated_code)
_binary_calls = {}
def _getPythonBinaryCall(binary_name):
if binary_name not in _binary_calls:
messages = []
# Try running Python installation.
try:
__import__(binary_name)
except ImportError:
pass
else:
call = [sys.executable, "-m", binary_name]
ok, message = _checkRequiredVersion(binary_name, call)
if ok:
_binary_calls[binary_name] = call
return _binary_calls[binary_name]
else:
messages.append(message)
with withEnvironmentPathAdded(
"PATH", os.path.join(sys.prefix, "Scripts"), os.path.join(sys.prefix, "bin")
):
binary_path = getExecutablePath(binary_name)
if binary_path:
call = [binary_path]
ok, message = _checkRequiredVersion(binary_name, call)
if ok:
_binary_calls[binary_name] = call
return _binary_calls[binary_name]
else:
messages.append(message)
if messages:
my_print("ERROR")
for message in messages:
my_print(message, style="red")
sys.exit(
"Error, cannot find %r version %r, not installed or wrong version for this Python?"
% (binary_name, _getRequiredVersion(binary_name))
)
return _binary_calls[binary_name]
def _cleanupImportSortOrder(filename):
_cleanupImportRelative(filename)
isort_call = _getPythonBinaryCall("isort")
contents = getFileContents(filename)
start_index = None
if "\n# isort:start" in contents:
parts = contents.splitlines()
start_index = parts.index("# isort:start")
contents = "\n".join(parts[start_index + 1 :]) + "\n"
putTextFileContents(filename, contents=contents)
check_call(
isort_call
+ [
"-q", # quiet, but stdout is still garbage
"--overwrite-in-place", # avoid using another temp file, this is already on one.
"-ot", # Order imports by type in addition to alphabetically
"-m3", # "vert-hanging"
"-tc", # Trailing commas
"-p", # make sure nuitka is first party package in import sorting.
"nuitka",
"-o",
"SCons",
filename,
],
stdout=getNullOutput(),
)
if start_index is not None:
contents = getFileContents(filename)
contents = "\n".join(parts[: start_index + 1]) + "\n" + contents
putTextFileContents(filename, contents=contents)
def _cleanupRstFmt(filename):
rstfmt_call = _getPythonBinaryCall("rstfmt")
check_call(
rstfmt_call
+ [
filename,
],
# stdout=devnull,
)
cleanupWindowsNewlines(filename)
with open(filename, "rb") as f:
contents = f.read()
updated_contents = contents.replace(b":\n\n.. code::\n", b"::\n")
lines = []
inside = False
needs_empty = False
for line in updated_contents.splitlines():
if line.startswith(b"-"):
if inside and needs_empty:
lines.append(b"")
inside = True
needs_empty = True
lines.append(line)
elif inside and line == b"":
needs_empty = False
lines.append(line)
elif inside and line.startswith(b" "):
needs_empty = True
lines.append(line)
else:
inside = False
lines.append(line)
updated_contents = b"\n".join(lines)
if updated_contents != contents:
with open(filename, "wb") as out_file:
out_file.write(updated_contents)
warned_clang_format = False
def _cleanupClangFormat(filename):
"""Call clang-format on a given filename to format C code.
Args:
filename: What file to re-format.
"""
# Using global here, as this is really a singleton, in
# the form of a module, pylint: disable=global-statement
global warned_clang_format
clang_format_path = (
getExecutablePath("clang-format-10")
or getExecutablePath("clang-format-9")
or getExecutablePath("clang-format-8")
or getExecutablePath("clang-format-7")
)
# Extra ball on Windows, check default installations paths in MSVC and LLVM too.
if not clang_format_path and getOS() == "Windows":
with withEnvironmentPathAdded(
"PATH",
r"C:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Tools\Llvm\bin",
r"C:\Program Files\LLVM\bin",
):
clang_format_path = getExecutablePath("clang-format")
if clang_format_path:
subprocess.call(
[
clang_format_path,
"-i",
"-style={BasedOnStyle: llvm, IndentWidth: 4, ColumnLimit: 120}",
filename,
]
)
else:
if not warned_clang_format:
general.warning("Need to install LLVM for C files format.")
warned_clang_format = True
def _shouldNotFormatCode(filename):
parts = os.path.abspath(filename).split(os.path.sep)
if "inline_copy" in parts:
return True
elif (
"tests" in parts
and not "basics" in parts
and "programs" not in parts
and "commercial" not in parts
):
return parts[-1] not in (
"run_all.py",
"compile_itself.py",
"update_doctest_generated.py",
"compile_itself.py",
"compile_python_modules.py",
"compile_extension_modules.py",
)
elif parts[-1] in ("incbin.h", "hedley.h"):
return True
else:
return False
def _transferBOM(source_filename, target_filename):
with open(source_filename, "rb") as f:
source_code = f.read()
if source_code.startswith(b"\xef\xbb\xbf"):
with open(target_filename, "rb") as f:
source_code = f.read()
if not source_code.startswith(b"\xef\xbb\xbf"):
with open(target_filename, "wb") as f:
f.write(b"\xef\xbb\xbf")
f.write(source_code)
def autoformat(filename, git_stage, abort, effective_filename=None, trace=True):
"""Format source code with external tools
Args:
filename: filename to work on
git_stage: indicate if this is to be done on staged content
abort: error exit in case a tool shows a problem
effective_filename: derive type of file from this name
Notes:
The effective filename can be used in case this is already a
temporary filename intended to replace another.
Returns:
None
"""
# This does a lot of distinctions, pylint: disable=too-many-branches,too-many-locals,too-many-statements
if effective_filename is None:
effective_filename = filename
if os.path.isdir(effective_filename):
return
filename = os.path.normpath(filename)
effective_filename = os.path.normpath(effective_filename)
if trace:
my_print("Consider", filename, end=": ")
is_python = isPythonFile(filename, effective_filename)
is_c = effective_filename.endswith((".c", ".h"))
is_cpp = effective_filename.endswith((".cpp", ".h"))
is_txt = effective_filename.endswith(
(
".patch",
".txt",
".qml",
".rst",
".sh",
".in",
".md",
".asciidoc",
".nuspec",
".yml",
".stylesheet",
".j2",
".gitignore",
".json",
".spec",
"-rpmlintrc",
"Containerfile",
)
) or os.path.basename(filename) in (
"changelog",
"compat",
"control",
"copyright",
"lintian-overrides",
)
is_rst = effective_filename.endswith(".rst")
# Some parts of Nuitka must not be re-formatted with black or clang-format
# as they have different intentions.
if not (is_python or is_c or is_cpp or is_txt or is_rst):
my_print("Ignored file type.")
return
# Work on a temporary copy
tmp_filename = filename + ".tmp"
if git_stage:
old_code = getFileHashContent(git_stage["dst_hash"])
else:
old_code = getFileContents(filename, "rb")
with open(tmp_filename, "wb") as output_file:
output_file.write(old_code)
try:
if is_python:
cleanupWindowsNewlines(tmp_filename)
if not _shouldNotFormatCode(effective_filename):
_cleanupImportSortOrder(tmp_filename)
_cleanupPyLintComments(tmp_filename, abort)
black_call = _getPythonBinaryCall("black")
subprocess.call(black_call + ["-q", "--fast", tmp_filename])
cleanupWindowsNewlines(tmp_filename)
elif is_c or is_cpp:
cleanupWindowsNewlines(tmp_filename)
if not _shouldNotFormatCode(effective_filename):
_cleanupClangFormat(tmp_filename)
cleanupWindowsNewlines(tmp_filename)
elif is_txt:
cleanupWindowsNewlines(tmp_filename)
_cleanupTrailingWhitespace(tmp_filename)
cleanupWindowsNewlines(tmp_filename)
if is_rst:
_cleanupRstFmt(tmp_filename)
_transferBOM(filename, tmp_filename)
changed = False
if old_code != getFileContents(tmp_filename, "rb"):
if trace:
my_print("Updated.")
with withPreserveFileMode(filename):
if git_stage:
new_hash_value = putFileHashContent(tmp_filename)
updateFileIndex(git_stage, new_hash_value)
updateWorkingFile(filename, git_stage["dst_hash"], new_hash_value)
else:
renameFile(tmp_filename, filename)
changed = True
else:
if trace:
my_print("OK.")
return changed
finally:
if os.path.exists(tmp_filename):
os.unlink(tmp_filename)
| 30.175719 | 108 | 0.591424 |
import os
import re
import subprocess
import sys
from nuitka.tools.quality.Git import (
getFileHashContent,
putFileHashContent,
updateFileIndex,
updateWorkingFile,
)
from nuitka.tools.quality.ScanSources import isPythonFile
from nuitka.Tracing import general, my_print
from nuitka.utils.Execution import (
NuitkaCalledProcessError,
check_call,
check_output,
getExecutablePath,
getNullOutput,
withEnvironmentPathAdded,
)
from nuitka.utils.FileOperations import (
getFileContentByLine,
getFileContents,
putTextFileContents,
renameFile,
withPreserveFileMode,
)
from nuitka.utils.Utils import getOS
def cleanupWindowsNewlines(filename):
with open(filename, "rb") as f:
source_code = f.read()
updated_code = source_code.replace(b"\r\n", b"\n")
updated_code = updated_code.replace(b"\n\r", b"\n")
if "Autoformat.py" not in filename:
updated_code = updated_code.replace(b'.decode("utf-8")', b'.decode("utf8")')
updated_code = updated_code.replace(b'.encode("utf-8")', b'.encode("utf8")')
if updated_code != source_code:
with open(filename, "wb") as out_file:
out_file.write(updated_code)
def _cleanupTrailingWhitespace(filename):
source_lines = list(getFileContentByLine(filename))
clean_lines = [line.rstrip().replace("\t", " ") for line in source_lines]
while clean_lines and clean_lines[-1] == "":
del clean_lines[-1]
if clean_lines != source_lines:
putTextFileContents(filename, contents=clean_lines)
def _getRequirementsContentsByLine():
return getFileContentByLine(
os.path.join(
os.path.dirname(__file__), "..", "..", "..", "..", "requirements-devel.txt"
)
)
def _getRequiredVersion(tool):
for line in _getRequirementsContentsByLine():
if line.startswith(tool + " =="):
return line.split()[2]
sys.exit("Error, cannot find %r in requirements-devel.txt" % tool)
def _checkRequiredVersion(tool, tool_call):
required_version = _getRequiredVersion(tool)
for line in _getRequirementsContentsByLine():
if line.startswith(tool + " =="):
required_version = line.split()[2]
break
else:
sys.exit("Error, cannot find %r in requirements-devel.txt" % tool)
tool_call = list(tool_call) + ["--version"]
try:
version_output = check_output(tool_call)
except NuitkaCalledProcessError:
return False, "failed to execute"
if str is not bytes:
version_output = version_output.decode("utf8")
for line in version_output.splitlines():
line = line.strip()
if line.startswith(
("black, version", "python -m black, version", "__main__.py, version ")
):
actual_version = line.split()[-1]
break
if line.startswith("VERSION "):
actual_version = line.split()[-1]
break
if line.startswith("rstfmt "):
actual_version = line.split()[-1]
break
else:
sys.exit(
"Error, couldn't determine version output of %r (%r)"
% (tool, " ".join(tool_call))
)
message = "Version of %r via %r is required to be %r and not %r." % (
tool,
" ".join(tool_call),
required_version,
actual_version,
)
return required_version == actual_version, message
def _updateCommentNode(comment_node):
if "pylint:" in str(comment_node.value):
def replacer(part):
def renamer(pylint_token):
# pylint: disable=too-many-branches,too-many-return-statements
if pylint_token == "E0602":
return "undefined-variable"
elif pylint_token in ("E0401", "F0401"):
return "import-error"
elif pylint_token == "E1102":
return "not-callable"
elif pylint_token == "E1133":
return " not-an-iterable"
elif pylint_token == "E1128":
return "assignment-from-none"
# Save line length for this until isort is better at long lines.
elif pylint_token == "useless-suppression":
return "I0021"
# elif pylint_token == "I0021":
# return "useless-suppression"
elif pylint_token == "R0911":
return "too-many-return-statements"
elif pylint_token == "R0201":
return "no-self-use"
elif pylint_token == "R0902":
return "too-many-instance-attributes"
elif pylint_token == "R0912":
return "too-many-branches"
elif pylint_token == "R0914":
return "too-many-locals"
elif pylint_token == "R0915":
return "too-many-statements"
elif pylint_token == "W0123":
return "eval-used"
elif pylint_token == "W0603":
return "global-statement"
elif pylint_token == "W0613":
return "unused-argument"
elif pylint_token == "W0622":
return "redefined-builtin"
elif pylint_token == "W0703":
return "broad-except"
else:
return pylint_token
return part.group(1) + ",".join(
sorted(renamer(token) for token in part.group(2).split(",") if token)
)
new_value = str(comment_node.value).replace("pylint:disable", "pylint: disable")
new_value = re.sub(r"(pylint\: disable=)(.*)", replacer, new_value, flags=re.M)
comment_node.value = new_value
def _cleanupPyLintComments(filename, abort):
from redbaron import ( # pylint: disable=I0021,import-error,no-name-in-module
RedBaron,
)
old_code = getFileContents(filename)
# Baron does assertions too, and all kinds of strange errors, pylint: disable=broad-except
try:
red = RedBaron(old_code)
except Exception:
if abort:
raise
return
for node in red.find_all("CommentNode"):
try:
_updateCommentNode(node)
except Exception:
my_print("Problem with", node)
node.help(deep=True, with_formatting=True)
raise
new_code = red.dumps()
if new_code != old_code:
putTextFileContents(filename, contents=red.dumps())
def _cleanupImportRelative(filename):
# Avoid doing it for "__main__" packages, because for those the Visual Code
# IDE doesn't like it and it may not run
if os.path.basename(filename) == "__main__.py.tmp":
return
package_name = os.path.dirname(filename).replace(os.path.sep, ".")
if not package_name.startswith("nuitka."):
return
source_code = getFileContents(filename)
updated_code = re.sub(
r"from %s import" % package_name, "from . import", source_code
)
updated_code = re.sub(r"from %s\." % package_name, "from .", source_code)
if source_code != updated_code:
putTextFileContents(filename, contents=updated_code)
_binary_calls = {}
def _getPythonBinaryCall(binary_name):
if binary_name not in _binary_calls:
messages = []
try:
__import__(binary_name)
except ImportError:
pass
else:
call = [sys.executable, "-m", binary_name]
ok, message = _checkRequiredVersion(binary_name, call)
if ok:
_binary_calls[binary_name] = call
return _binary_calls[binary_name]
else:
messages.append(message)
with withEnvironmentPathAdded(
"PATH", os.path.join(sys.prefix, "Scripts"), os.path.join(sys.prefix, "bin")
):
binary_path = getExecutablePath(binary_name)
if binary_path:
call = [binary_path]
ok, message = _checkRequiredVersion(binary_name, call)
if ok:
_binary_calls[binary_name] = call
return _binary_calls[binary_name]
else:
messages.append(message)
if messages:
my_print("ERROR")
for message in messages:
my_print(message, style="red")
sys.exit(
"Error, cannot find %r version %r, not installed or wrong version for this Python?"
% (binary_name, _getRequiredVersion(binary_name))
)
return _binary_calls[binary_name]
def _cleanupImportSortOrder(filename):
_cleanupImportRelative(filename)
isort_call = _getPythonBinaryCall("isort")
contents = getFileContents(filename)
start_index = None
if "\n# isort:start" in contents:
parts = contents.splitlines()
start_index = parts.index("# isort:start")
contents = "\n".join(parts[start_index + 1 :]) + "\n"
putTextFileContents(filename, contents=contents)
check_call(
isort_call
+ [
"-q",
"--overwrite-in-place",
"-ot",
"-m3",
"-tc",
"-p",
"nuitka",
"-o",
"SCons",
filename,
],
stdout=getNullOutput(),
)
if start_index is not None:
contents = getFileContents(filename)
contents = "\n".join(parts[: start_index + 1]) + "\n" + contents
putTextFileContents(filename, contents=contents)
def _cleanupRstFmt(filename):
rstfmt_call = _getPythonBinaryCall("rstfmt")
check_call(
rstfmt_call
+ [
filename,
],
)
cleanupWindowsNewlines(filename)
with open(filename, "rb") as f:
contents = f.read()
updated_contents = contents.replace(b":\n\n.. code::\n", b"::\n")
lines = []
inside = False
needs_empty = False
for line in updated_contents.splitlines():
if line.startswith(b"-"):
if inside and needs_empty:
lines.append(b"")
inside = True
needs_empty = True
lines.append(line)
elif inside and line == b"":
needs_empty = False
lines.append(line)
elif inside and line.startswith(b" "):
needs_empty = True
lines.append(line)
else:
inside = False
lines.append(line)
updated_contents = b"\n".join(lines)
if updated_contents != contents:
with open(filename, "wb") as out_file:
out_file.write(updated_contents)
warned_clang_format = False
def _cleanupClangFormat(filename):
global warned_clang_format
clang_format_path = (
getExecutablePath("clang-format-10")
or getExecutablePath("clang-format-9")
or getExecutablePath("clang-format-8")
or getExecutablePath("clang-format-7")
)
if not clang_format_path and getOS() == "Windows":
with withEnvironmentPathAdded(
"PATH",
r"C:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Tools\Llvm\bin",
r"C:\Program Files\LLVM\bin",
):
clang_format_path = getExecutablePath("clang-format")
if clang_format_path:
subprocess.call(
[
clang_format_path,
"-i",
"-style={BasedOnStyle: llvm, IndentWidth: 4, ColumnLimit: 120}",
filename,
]
)
else:
if not warned_clang_format:
general.warning("Need to install LLVM for C files format.")
warned_clang_format = True
def _shouldNotFormatCode(filename):
parts = os.path.abspath(filename).split(os.path.sep)
if "inline_copy" in parts:
return True
elif (
"tests" in parts
and not "basics" in parts
and "programs" not in parts
and "commercial" not in parts
):
return parts[-1] not in (
"run_all.py",
"compile_itself.py",
"update_doctest_generated.py",
"compile_itself.py",
"compile_python_modules.py",
"compile_extension_modules.py",
)
elif parts[-1] in ("incbin.h", "hedley.h"):
return True
else:
return False
def _transferBOM(source_filename, target_filename):
with open(source_filename, "rb") as f:
source_code = f.read()
if source_code.startswith(b"\xef\xbb\xbf"):
with open(target_filename, "rb") as f:
source_code = f.read()
if not source_code.startswith(b"\xef\xbb\xbf"):
with open(target_filename, "wb") as f:
f.write(b"\xef\xbb\xbf")
f.write(source_code)
def autoformat(filename, git_stage, abort, effective_filename=None, trace=True):
if effective_filename is None:
effective_filename = filename
if os.path.isdir(effective_filename):
return
filename = os.path.normpath(filename)
effective_filename = os.path.normpath(effective_filename)
if trace:
my_print("Consider", filename, end=": ")
is_python = isPythonFile(filename, effective_filename)
is_c = effective_filename.endswith((".c", ".h"))
is_cpp = effective_filename.endswith((".cpp", ".h"))
is_txt = effective_filename.endswith(
(
".patch",
".txt",
".qml",
".rst",
".sh",
".in",
".md",
".asciidoc",
".nuspec",
".yml",
".stylesheet",
".j2",
".gitignore",
".json",
".spec",
"-rpmlintrc",
"Containerfile",
)
) or os.path.basename(filename) in (
"changelog",
"compat",
"control",
"copyright",
"lintian-overrides",
)
is_rst = effective_filename.endswith(".rst")
if not (is_python or is_c or is_cpp or is_txt or is_rst):
my_print("Ignored file type.")
return
tmp_filename = filename + ".tmp"
if git_stage:
old_code = getFileHashContent(git_stage["dst_hash"])
else:
old_code = getFileContents(filename, "rb")
with open(tmp_filename, "wb") as output_file:
output_file.write(old_code)
try:
if is_python:
cleanupWindowsNewlines(tmp_filename)
if not _shouldNotFormatCode(effective_filename):
_cleanupImportSortOrder(tmp_filename)
_cleanupPyLintComments(tmp_filename, abort)
black_call = _getPythonBinaryCall("black")
subprocess.call(black_call + ["-q", "--fast", tmp_filename])
cleanupWindowsNewlines(tmp_filename)
elif is_c or is_cpp:
cleanupWindowsNewlines(tmp_filename)
if not _shouldNotFormatCode(effective_filename):
_cleanupClangFormat(tmp_filename)
cleanupWindowsNewlines(tmp_filename)
elif is_txt:
cleanupWindowsNewlines(tmp_filename)
_cleanupTrailingWhitespace(tmp_filename)
cleanupWindowsNewlines(tmp_filename)
if is_rst:
_cleanupRstFmt(tmp_filename)
_transferBOM(filename, tmp_filename)
changed = False
if old_code != getFileContents(tmp_filename, "rb"):
if trace:
my_print("Updated.")
with withPreserveFileMode(filename):
if git_stage:
new_hash_value = putFileHashContent(tmp_filename)
updateFileIndex(git_stage, new_hash_value)
updateWorkingFile(filename, git_stage["dst_hash"], new_hash_value)
else:
renameFile(tmp_filename, filename)
changed = True
else:
if trace:
my_print("OK.")
return changed
finally:
if os.path.exists(tmp_filename):
os.unlink(tmp_filename)
| true | true |
f7398579137bd1f5161e88e5dafc93be49c62d75 | 2,054 | py | Python | training_curves.py | yourwanghao/Ultrasound_Nerve_Segmentation | 9a73cdb9a97b27c375a1023f4426d7e5a89b6a4d | [
"MIT"
] | null | null | null | training_curves.py | yourwanghao/Ultrasound_Nerve_Segmentation | 9a73cdb9a97b27c375a1023f4426d7e5a89b6a4d | [
"MIT"
] | null | null | null | training_curves.py | yourwanghao/Ultrasound_Nerve_Segmentation | 9a73cdb9a97b27c375a1023f4426d7e5a89b6a4d | [
"MIT"
] | null | null | null | #!/usr/bin/env python
## based on https://github.com/dmlc/mxnet/issues/1302
## Parses the model fit log file and generates a train/val vs epoch plot
import matplotlib.pyplot as plt
import numpy as np
import re
import argparse
def log_train_metric(period, auto_reset=False):
"""Callback to log the training evaluation result every period.
Parameters
----------
period : int
The number of batch to log the training evaluation metric.
auto_reset : bool
Reset the metric after each log
Returns
-------
callback : function
The callback function that can be passed as iter_epoch_callback to fit.
"""
def _callback(param):
"""The checkpoint function."""
if param.nbatch % period == 0 and param.eval_metric is not None:
name_value = param.eval_metric.get_name_value()
for name, value in name_value:
logging.info('Iter[%d] Batch[%d] Train-%s=%f',
param.epoch, param.nbatch, name, value)
if auto_reset:
param.eval_metric.reset()
return _callback
parser = argparse.ArgumentParser(description='Parses log file and generates train/val curves')
parser.add_argument('--log-file', type=str,default="log_tr_va",
help='the path of log file')
args = parser.parse_args()
print('ok')
TR_RE = re.compile('\s+Train-dicecoef=([\d\.]+)')
VA_RE = re.compile('.*?]\sValidation-dicecoef=([\d\.]+)')
log = open(args.log_file).read()
log_tr = [float(x) for x in TR_RE.findall(log)]
log_va = [float(x) for x in VA_RE.findall(log)]
idx = np.arange(len(log_tr))
print(len(log_tr), len(log_va))
plt.figure(figsize=(8, 6))
plt.xlabel("Epoch")
plt.ylabel("Accuracy")
plt.plot(idx, log_tr, 'o', linestyle='-', color="r",
label="Train dicecoef")
plt.plot(idx, log_va, 'o', linestyle='-', color="b",
label="Validation dicecoef")
plt.legend(loc="best")
plt.xticks(np.arange(min(idx), max(idx)+1, 5))
plt.yticks(np.arange(0, 1, 0.2))
plt.ylim([0,1])
plt.show()
| 29.342857 | 94 | 0.642162 |
se):
def _callback(param):
if param.nbatch % period == 0 and param.eval_metric is not None:
name_value = param.eval_metric.get_name_value()
for name, value in name_value:
logging.info('Iter[%d] Batch[%d] Train-%s=%f',
param.epoch, param.nbatch, name, value)
if auto_reset:
param.eval_metric.reset()
return _callback
parser = argparse.ArgumentParser(description='Parses log file and generates train/val curves')
parser.add_argument('--log-file', type=str,default="log_tr_va",
help='the path of log file')
args = parser.parse_args()
print('ok')
TR_RE = re.compile('\s+Train-dicecoef=([\d\.]+)')
VA_RE = re.compile('.*?]\sValidation-dicecoef=([\d\.]+)')
log = open(args.log_file).read()
log_tr = [float(x) for x in TR_RE.findall(log)]
log_va = [float(x) for x in VA_RE.findall(log)]
idx = np.arange(len(log_tr))
print(len(log_tr), len(log_va))
plt.figure(figsize=(8, 6))
plt.xlabel("Epoch")
plt.ylabel("Accuracy")
plt.plot(idx, log_tr, 'o', linestyle='-', color="r",
label="Train dicecoef")
plt.plot(idx, log_va, 'o', linestyle='-', color="b",
label="Validation dicecoef")
plt.legend(loc="best")
plt.xticks(np.arange(min(idx), max(idx)+1, 5))
plt.yticks(np.arange(0, 1, 0.2))
plt.ylim([0,1])
plt.show()
| true | true |
f739857b3a1066812a6cd3d08249b014803a9745 | 364 | py | Python | tests/test_ghebbrish_converter.py | ctrl-escp/gibberish2utf8 | 052cad946e8fa847dfbc984ee02adbb6d8c07a65 | [
"MIT"
] | 3 | 2020-11-14T17:11:52.000Z | 2020-11-30T08:34:06.000Z | tests/test_ghebbrish_converter.py | ctrl-escp/gibberish2utf8 | 052cad946e8fa847dfbc984ee02adbb6d8c07a65 | [
"MIT"
] | null | null | null | tests/test_ghebbrish_converter.py | ctrl-escp/gibberish2utf8 | 052cad946e8fa847dfbc984ee02adbb6d8c07a65 | [
"MIT"
] | 1 | 2021-03-29T19:11:11.000Z | 2021-03-29T19:11:11.000Z | from unittest import TestCase
from ..gib2u import GhebbrishConverter
class TestGhebbrishConverter(TestCase):
gib = GhebbrishConverter('')
def test_gibberish_to_utf8_converter(self):
original_string = "ùìåí çðåê"
expected_output = "שלום חנוך"
self.assertEqual(self.gib.convert_gibberish_to_utf8(original_string), expected_output)
| 30.333333 | 94 | 0.760989 | from unittest import TestCase
from ..gib2u import GhebbrishConverter
class TestGhebbrishConverter(TestCase):
gib = GhebbrishConverter('')
def test_gibberish_to_utf8_converter(self):
original_string = "ùìåí çðåê"
expected_output = "שלום חנוך"
self.assertEqual(self.gib.convert_gibberish_to_utf8(original_string), expected_output)
| true | true |
f739872425e13a4775572df191a1cc5a921c1485 | 4,855 | py | Python | src/selectedtests/test_mappings/update_test_mappings.py | isabella232/selected-tests | 890cd5f39f5571d50f0406b4c25a1a2eef1006a3 | [
"Apache-2.0"
] | 2 | 2020-04-13T11:26:57.000Z | 2022-01-21T00:03:52.000Z | src/selectedtests/test_mappings/update_test_mappings.py | mongodb/selected-tests | 467f71f1d45b06ac3cc5db252f18658f8cd93083 | [
"Apache-2.0"
] | 54 | 2019-09-26T18:56:34.000Z | 2022-03-12T01:07:00.000Z | src/selectedtests/test_mappings/update_test_mappings.py | isabella232/selected-tests | 890cd5f39f5571d50f0406b4c25a1a2eef1006a3 | [
"Apache-2.0"
] | 6 | 2019-10-01T14:24:27.000Z | 2020-02-13T15:53:47.000Z | """Methods to update test mappings for a project."""
from typing import Any, Dict, List
import structlog
from evergreen.api import EvergreenApi
from pymongo import ReturnDocument, UpdateOne
from pymongo.errors import BulkWriteError
from selectedtests.datasource.mongo_wrapper import MongoWrapper
from selectedtests.helpers import create_query
from selectedtests.project_config import ProjectConfig
from selectedtests.test_mappings.commit_limit import CommitLimit
from selectedtests.test_mappings.create_test_mappings import generate_test_mappings
LOGGER = structlog.get_logger()
def update_test_mappings_test_files(
test_files: List[Dict[str, Any]], test_mapping_id: Dict[str, Any], mongo: MongoWrapper
) -> None:
"""
Update test_files in the test mappings test_files project config collection.
:param test_files: A list of test mappings.
:param test_mapping_id: The containing test_mappings identifier.
:param mongo: An instance of MongoWrapper.
"""
operations = []
for test_file in test_files:
query = create_query(test_file, mutable=["test_file_seen_count"])
query = dict(**query, **test_mapping_id)
update_test_file = UpdateOne(
query,
{"$inc": {"test_file_seen_count": test_file["test_file_seen_count"]}},
upsert=True,
)
operations.append(update_test_file)
try:
result = mongo.test_mappings_test_files().bulk_write(operations)
LOGGER.debug("bulk_write", result=result.bulk_api_result, parent=test_mapping_id)
except BulkWriteError as bwe:
# bulk write error default message is not always that helpful, so dump the details here.
LOGGER.exception(
"bulk_write error", parent=test_mapping_id, operations=operations, details=bwe.details
)
raise
def update_test_mappings(test_mappings: List[Dict[str, Any]], mongo: MongoWrapper) -> None:
"""
Update test mappings in the test mappings collection.
:param test_mappings: A list of test mappings.
:param mongo: An instance of MongoWrapper.
"""
for mapping in test_mappings:
source_file_seen_count = mapping["source_file_seen_count"]
query = create_query(mapping, joined=["test_files"], mutable=["source_file_seen_count"])
test_mapping = mongo.test_mappings().find_one_and_update(
query,
{"$inc": {"source_file_seen_count": source_file_seen_count}},
projection={"_id": 1},
upsert=True,
return_document=ReturnDocument.AFTER,
)
LOGGER.debug(
"update_one test_mappings",
test_mapping=test_mapping,
query=query,
inc=source_file_seen_count,
)
test_files = mapping.get("test_files", [])
if test_files:
test_mapping_id = {"test_mapping_id": test_mapping["_id"]}
update_test_mappings_test_files(test_files, test_mapping_id, mongo)
def update_test_mappings_since_last_commit(evg_api: EvergreenApi, mongo: MongoWrapper) -> None:
"""
Update test mappings that are being tracked in the test mappings project config collection.
:param evg_api: An instance of the evg_api client
:param mongo: An instance of MongoWrapper.
"""
LOGGER.info("Updating test mappings")
project_cursor = mongo.project_config().find({})
for project_config in project_cursor:
LOGGER.info("Updating test mappings for project", project_config=project_config)
test_config = project_config["test_config"]
test_mappings_result = generate_test_mappings(
evg_api,
project_config["project"],
CommitLimit(stop_at_commit_sha=test_config["most_recent_project_commit_analyzed"]),
test_config["source_file_regex"],
test_config["test_file_regex"],
module_name=test_config["module"],
module_commit_limit=CommitLimit(
stop_at_commit_sha=test_config["most_recent_module_commit_analyzed"]
),
module_source_file_pattern=test_config["module_source_file_regex"],
module_test_file_pattern=test_config["module_source_file_regex"],
)
project_config = ProjectConfig.get(mongo.project_config(), project_config["project"])
project_config.test_config.update_most_recent_commits_analyzed(
test_mappings_result.most_recent_project_commit_analyzed,
test_mappings_result.most_recent_module_commit_analyzed,
)
project_config.save(mongo.project_config())
if test_mappings_result.test_mappings_list:
update_test_mappings(test_mappings_result.test_mappings_list, mongo)
else:
LOGGER.info("No test mappings generated")
LOGGER.info("Finished test mapping updating")
| 39.471545 | 98 | 0.702987 | from typing import Any, Dict, List
import structlog
from evergreen.api import EvergreenApi
from pymongo import ReturnDocument, UpdateOne
from pymongo.errors import BulkWriteError
from selectedtests.datasource.mongo_wrapper import MongoWrapper
from selectedtests.helpers import create_query
from selectedtests.project_config import ProjectConfig
from selectedtests.test_mappings.commit_limit import CommitLimit
from selectedtests.test_mappings.create_test_mappings import generate_test_mappings
LOGGER = structlog.get_logger()
def update_test_mappings_test_files(
test_files: List[Dict[str, Any]], test_mapping_id: Dict[str, Any], mongo: MongoWrapper
) -> None:
operations = []
for test_file in test_files:
query = create_query(test_file, mutable=["test_file_seen_count"])
query = dict(**query, **test_mapping_id)
update_test_file = UpdateOne(
query,
{"$inc": {"test_file_seen_count": test_file["test_file_seen_count"]}},
upsert=True,
)
operations.append(update_test_file)
try:
result = mongo.test_mappings_test_files().bulk_write(operations)
LOGGER.debug("bulk_write", result=result.bulk_api_result, parent=test_mapping_id)
except BulkWriteError as bwe:
LOGGER.exception(
"bulk_write error", parent=test_mapping_id, operations=operations, details=bwe.details
)
raise
def update_test_mappings(test_mappings: List[Dict[str, Any]], mongo: MongoWrapper) -> None:
for mapping in test_mappings:
source_file_seen_count = mapping["source_file_seen_count"]
query = create_query(mapping, joined=["test_files"], mutable=["source_file_seen_count"])
test_mapping = mongo.test_mappings().find_one_and_update(
query,
{"$inc": {"source_file_seen_count": source_file_seen_count}},
projection={"_id": 1},
upsert=True,
return_document=ReturnDocument.AFTER,
)
LOGGER.debug(
"update_one test_mappings",
test_mapping=test_mapping,
query=query,
inc=source_file_seen_count,
)
test_files = mapping.get("test_files", [])
if test_files:
test_mapping_id = {"test_mapping_id": test_mapping["_id"]}
update_test_mappings_test_files(test_files, test_mapping_id, mongo)
def update_test_mappings_since_last_commit(evg_api: EvergreenApi, mongo: MongoWrapper) -> None:
LOGGER.info("Updating test mappings")
project_cursor = mongo.project_config().find({})
for project_config in project_cursor:
LOGGER.info("Updating test mappings for project", project_config=project_config)
test_config = project_config["test_config"]
test_mappings_result = generate_test_mappings(
evg_api,
project_config["project"],
CommitLimit(stop_at_commit_sha=test_config["most_recent_project_commit_analyzed"]),
test_config["source_file_regex"],
test_config["test_file_regex"],
module_name=test_config["module"],
module_commit_limit=CommitLimit(
stop_at_commit_sha=test_config["most_recent_module_commit_analyzed"]
),
module_source_file_pattern=test_config["module_source_file_regex"],
module_test_file_pattern=test_config["module_source_file_regex"],
)
project_config = ProjectConfig.get(mongo.project_config(), project_config["project"])
project_config.test_config.update_most_recent_commits_analyzed(
test_mappings_result.most_recent_project_commit_analyzed,
test_mappings_result.most_recent_module_commit_analyzed,
)
project_config.save(mongo.project_config())
if test_mappings_result.test_mappings_list:
update_test_mappings(test_mappings_result.test_mappings_list, mongo)
else:
LOGGER.info("No test mappings generated")
LOGGER.info("Finished test mapping updating")
| true | true |
f739880c7d893e39c63d767d3c9cfaafd4237b81 | 416 | py | Python | dashboard/migrations/0006_auto_20181212_0011.py | HemanthJella/ewallet | cc7f9b5abb7aa552b8769b9324c3d79630e5ea6a | [
"Apache-2.0"
] | null | null | null | dashboard/migrations/0006_auto_20181212_0011.py | HemanthJella/ewallet | cc7f9b5abb7aa552b8769b9324c3d79630e5ea6a | [
"Apache-2.0"
] | null | null | null | dashboard/migrations/0006_auto_20181212_0011.py | HemanthJella/ewallet | cc7f9b5abb7aa552b8769b9324c3d79630e5ea6a | [
"Apache-2.0"
] | null | null | null | # Generated by Django 2.0.9 on 2018-12-11 18:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dashboard', '0005_profile_card_id'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='card_id',
field=models.CharField(max_length=30, null=True),
),
]
| 21.894737 | 62 | 0.576923 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dashboard', '0005_profile_card_id'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='card_id',
field=models.CharField(max_length=30, null=True),
),
]
| true | true |
f7398845d400b1fd4fedd532eb1520dff30d47a0 | 6,990 | py | Python | tensorflow/python/keras/_impl/keras/layers/embeddings.py | rxbtz/tensorflow | 499f7ed810928e29986453c83778f71e2b351eb5 | [
"Apache-2.0"
] | 14 | 2018-12-06T06:51:33.000Z | 2021-03-23T11:29:24.000Z | tensorflow/python/keras/_impl/keras/layers/embeddings.py | rxbtz/tensorflow | 499f7ed810928e29986453c83778f71e2b351eb5 | [
"Apache-2.0"
] | 1 | 2018-05-11T18:18:05.000Z | 2018-05-11T18:18:05.000Z | tensorflow/python/keras/_impl/keras/layers/embeddings.py | rxbtz/tensorflow | 499f7ed810928e29986453c83778f71e2b351eb5 | [
"Apache-2.0"
] | 2 | 2021-03-25T13:15:18.000Z | 2022-03-31T13:12:24.000Z | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Embedding layer.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras._impl.keras import backend as K
from tensorflow.python.keras._impl.keras import constraints
from tensorflow.python.keras._impl.keras import initializers
from tensorflow.python.keras._impl.keras import regularizers
from tensorflow.python.keras._impl.keras.engine import Layer
from tensorflow.python.keras._impl.keras.utils import tf_utils
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export('keras.layers.Embedding')
class Embedding(Layer):
"""Turns positive integers (indexes) into dense vectors of fixed size.
eg. [[4], [20]] -> [[0.25, 0.1], [0.6, -0.2]]
This layer can only be used as the first layer in a model.
Example:
```python
model = Sequential()
model.add(Embedding(1000, 64, input_length=10))
# the model will take as input an integer matrix of size (batch,
input_length).
# the largest integer (i.e. word index) in the input should be no larger
than 999 (vocabulary size).
# now model.output_shape == (None, 10, 64), where None is the batch
dimension.
input_array = np.random.randint(1000, size=(32, 10))
model.compile('rmsprop', 'mse')
output_array = model.predict(input_array)
assert output_array.shape == (32, 10, 64)
```
Arguments:
input_dim: int > 0. Size of the vocabulary,
i.e. maximum integer index + 1.
output_dim: int >= 0. Dimension of the dense embedding.
embeddings_initializer: Initializer for the `embeddings` matrix.
embeddings_regularizer: Regularizer function applied to
the `embeddings` matrix.
embeddings_constraint: Constraint function applied to
the `embeddings` matrix.
mask_zero: Whether or not the input value 0 is a special "padding"
value that should be masked out.
This is useful when using recurrent layers
which may take variable length input.
If this is `True` then all subsequent layers
in the model need to support masking or an exception will be raised.
If mask_zero is set to True, as a consequence, index 0 cannot be
used in the vocabulary (input_dim should equal size of
vocabulary + 1).
input_length: Length of input sequences, when it is constant.
This argument is required if you are going to connect
`Flatten` then `Dense` layers upstream
(without it, the shape of the dense outputs cannot be computed).
Input shape:
2D tensor with shape: `(batch_size, sequence_length)`.
Output shape:
3D tensor with shape: `(batch_size, sequence_length, output_dim)`.
"""
def __init__(self,
input_dim,
output_dim,
embeddings_initializer='uniform',
embeddings_regularizer=None,
activity_regularizer=None,
embeddings_constraint=None,
mask_zero=False,
input_length=None,
**kwargs):
if 'input_shape' not in kwargs:
if input_length:
kwargs['input_shape'] = (input_length,)
else:
kwargs['input_shape'] = (None,)
dtype = kwargs.pop('dtype', K.floatx())
super(Embedding, self).__init__(dtype=dtype, **kwargs)
self.input_dim = input_dim
self.output_dim = output_dim
self.embeddings_initializer = initializers.get(embeddings_initializer)
self.embeddings_regularizer = regularizers.get(embeddings_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.embeddings_constraint = constraints.get(embeddings_constraint)
self.mask_zero = mask_zero
self.input_length = input_length
@tf_utils.shape_type_conversion
def build(self, input_shape):
self.embeddings = self.add_weight(
shape=(self.input_dim, self.output_dim),
initializer=self.embeddings_initializer,
name='embeddings',
regularizer=self.embeddings_regularizer,
constraint=self.embeddings_constraint)
self.built = True
def compute_mask(self, inputs, mask=None):
if not self.mask_zero:
return None
else:
return math_ops.not_equal(inputs, 0)
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
if self.input_length is None:
return input_shape + (self.output_dim,)
else:
# input_length can be tuple if input is 3D or higher
if isinstance(self.input_length, (list, tuple)):
in_lens = list(self.input_length)
else:
in_lens = [self.input_length]
if len(in_lens) != len(input_shape) - 1:
ValueError('"input_length" is %s, but received input has shape %s' %
(str(self.input_length), str(input_shape)))
else:
for i, (s1, s2) in enumerate(zip(in_lens, input_shape[1:])):
if s1 is not None and s2 is not None and s1 != s2:
ValueError('"input_length" is %s, but received input has shape %s' %
(str(self.input_length), str(input_shape)))
elif s1 is None:
in_lens[i] = s2
return (input_shape[0],) + tuple(in_lens) + (self.output_dim,)
def call(self, inputs):
dtype = K.dtype(inputs)
if dtype != 'int32' and dtype != 'int64':
inputs = math_ops.cast(inputs, 'int32')
out = embedding_ops.embedding_lookup(self.embeddings, inputs)
return out
def get_config(self):
config = {
'input_dim':
self.input_dim,
'output_dim':
self.output_dim,
'embeddings_initializer':
initializers.serialize(self.embeddings_initializer),
'embeddings_regularizer':
regularizers.serialize(self.embeddings_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'embeddings_constraint':
constraints.serialize(self.embeddings_constraint),
'mask_zero':
self.mask_zero,
'input_length':
self.input_length
}
base_config = super(Embedding, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| 38.196721 | 80 | 0.67568 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras._impl.keras import backend as K
from tensorflow.python.keras._impl.keras import constraints
from tensorflow.python.keras._impl.keras import initializers
from tensorflow.python.keras._impl.keras import regularizers
from tensorflow.python.keras._impl.keras.engine import Layer
from tensorflow.python.keras._impl.keras.utils import tf_utils
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export('keras.layers.Embedding')
class Embedding(Layer):
def __init__(self,
input_dim,
output_dim,
embeddings_initializer='uniform',
embeddings_regularizer=None,
activity_regularizer=None,
embeddings_constraint=None,
mask_zero=False,
input_length=None,
**kwargs):
if 'input_shape' not in kwargs:
if input_length:
kwargs['input_shape'] = (input_length,)
else:
kwargs['input_shape'] = (None,)
dtype = kwargs.pop('dtype', K.floatx())
super(Embedding, self).__init__(dtype=dtype, **kwargs)
self.input_dim = input_dim
self.output_dim = output_dim
self.embeddings_initializer = initializers.get(embeddings_initializer)
self.embeddings_regularizer = regularizers.get(embeddings_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.embeddings_constraint = constraints.get(embeddings_constraint)
self.mask_zero = mask_zero
self.input_length = input_length
@tf_utils.shape_type_conversion
def build(self, input_shape):
self.embeddings = self.add_weight(
shape=(self.input_dim, self.output_dim),
initializer=self.embeddings_initializer,
name='embeddings',
regularizer=self.embeddings_regularizer,
constraint=self.embeddings_constraint)
self.built = True
def compute_mask(self, inputs, mask=None):
if not self.mask_zero:
return None
else:
return math_ops.not_equal(inputs, 0)
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
if self.input_length is None:
return input_shape + (self.output_dim,)
else:
if isinstance(self.input_length, (list, tuple)):
in_lens = list(self.input_length)
else:
in_lens = [self.input_length]
if len(in_lens) != len(input_shape) - 1:
ValueError('"input_length" is %s, but received input has shape %s' %
(str(self.input_length), str(input_shape)))
else:
for i, (s1, s2) in enumerate(zip(in_lens, input_shape[1:])):
if s1 is not None and s2 is not None and s1 != s2:
ValueError('"input_length" is %s, but received input has shape %s' %
(str(self.input_length), str(input_shape)))
elif s1 is None:
in_lens[i] = s2
return (input_shape[0],) + tuple(in_lens) + (self.output_dim,)
def call(self, inputs):
dtype = K.dtype(inputs)
if dtype != 'int32' and dtype != 'int64':
inputs = math_ops.cast(inputs, 'int32')
out = embedding_ops.embedding_lookup(self.embeddings, inputs)
return out
def get_config(self):
config = {
'input_dim':
self.input_dim,
'output_dim':
self.output_dim,
'embeddings_initializer':
initializers.serialize(self.embeddings_initializer),
'embeddings_regularizer':
regularizers.serialize(self.embeddings_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'embeddings_constraint':
constraints.serialize(self.embeddings_constraint),
'mask_zero':
self.mask_zero,
'input_length':
self.input_length
}
base_config = super(Embedding, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| true | true |
f73988562b7b327b20555dd522e268f576e4a131 | 568 | py | Python | salvia/protocols/introducer_protocol.py | mikando/salvia-blockchain | 02181d0b5a063374f01eea951570dbc661bddc34 | [
"Apache-2.0"
] | 6 | 2021-09-13T17:20:49.000Z | 2022-02-09T04:31:47.000Z | salvia/protocols/introducer_protocol.py | mikando/salvia-blockchain | 02181d0b5a063374f01eea951570dbc661bddc34 | [
"Apache-2.0"
] | 21 | 2021-09-20T00:56:54.000Z | 2022-03-22T01:12:12.000Z | salvia/protocols/introducer_protocol.py | mikando/salvia-blockchain | 02181d0b5a063374f01eea951570dbc661bddc34 | [
"Apache-2.0"
] | 9 | 2021-09-13T17:54:04.000Z | 2022-03-15T08:38:35.000Z | from dataclasses import dataclass
from typing import List
from salvia.types.peer_info import TimestampedPeerInfo
from salvia.util.streamable import Streamable, streamable
"""
Protocol to introducer
Note: When changing this file, also change protocol_message_types.py, and the protocol version in shared_protocol.py
"""
@dataclass(frozen=True)
@streamable
class RequestPeersIntroducer(Streamable):
"""
Return full list of peers
"""
@dataclass(frozen=True)
@streamable
class RespondPeersIntroducer(Streamable):
peer_list: List[TimestampedPeerInfo]
| 22.72 | 116 | 0.795775 | from dataclasses import dataclass
from typing import List
from salvia.types.peer_info import TimestampedPeerInfo
from salvia.util.streamable import Streamable, streamable
@dataclass(frozen=True)
@streamable
class RequestPeersIntroducer(Streamable):
@dataclass(frozen=True)
@streamable
class RespondPeersIntroducer(Streamable):
peer_list: List[TimestampedPeerInfo]
| true | true |
f73988ca2c8902727c2fd83b0ac9135c87264288 | 2,810 | py | Python | chaco/tests/test_grid_data_source.py | martinRenou/chaco | 1888da3ecee89f9b2d11900cda9333b32fc5e89a | [
"BSD-3-Clause"
] | null | null | null | chaco/tests/test_grid_data_source.py | martinRenou/chaco | 1888da3ecee89f9b2d11900cda9333b32fc5e89a | [
"BSD-3-Clause"
] | null | null | null | chaco/tests/test_grid_data_source.py | martinRenou/chaco | 1888da3ecee89f9b2d11900cda9333b32fc5e89a | [
"BSD-3-Clause"
] | null | null | null | """
Tests of GridDataSource behavior.
"""
import unittest
from numpy import array
from numpy.testing import assert_array_equal
from chaco.api import GridDataSource
from traits.testing.unittest_tools import UnittestTools
class GridDataSourceTestCase(UnittestTools, unittest.TestCase):
def setUp(self):
self.data_source = GridDataSource(
xdata=array([1, 2, 3]),
ydata=array([1.5, 0.5, -0.5, -1.5]),
sort_order=('ascending', 'descending'))
def test_empty(self):
data_source = GridDataSource()
self.assertEqual(data_source.sort_order, ('none', 'none'))
self.assertEqual(data_source.index_dimension, 'image')
self.assertEqual(data_source.value_dimension, 'scalar')
self.assertEqual(data_source.metadata,
{"selections":[], "annotations":[]})
xdata, ydata = data_source.get_data()
assert_array_equal(xdata.get_data(), array([]))
assert_array_equal(ydata.get_data(), array([]))
self.assertEqual(data_source.get_bounds(), ((0,0),(0,0)))
def test_init(self):
test_xd = array([1, 2, 3])
test_yd = array([1.5, 0.5, -0.5, -1.5])
test_sort_order = ('ascending', 'descending')
self.assertEqual(self.data_source.sort_order, test_sort_order)
xd, yd = self.data_source.get_data()
assert_array_equal(xd.get_data(), test_xd)
assert_array_equal(yd.get_data(), test_yd)
self.assertEqual(self.data_source.get_bounds(),
((min(test_xd),min(test_yd)),
(max(test_xd),max(test_yd))))
def test_set_data(self):
test_xd = array([0,2,4])
test_yd = array([0,1,2,3,4,5])
test_sort_order = ('none', 'none')
self.data_source.set_data(xdata=test_xd, ydata=test_yd,
sort_order=('none', 'none'))
self.assertEqual(self.data_source.sort_order, test_sort_order)
xd, yd = self.data_source.get_data()
assert_array_equal(xd.get_data(), test_xd)
assert_array_equal(yd.get_data(), test_yd)
self.assertEqual(self.data_source.get_bounds(),
((min(test_xd),min(test_yd)),
(max(test_xd),max(test_yd))))
def test_metadata(self):
self.assertEqual(self.data_source.metadata,
{'annotations': [], 'selections': []})
def test_metadata_changed(self):
with self.assertTraitChanges(self.data_source, 'metadata_changed', count=1):
self.data_source.metadata = {'new_metadata': True}
def test_metadata_items_changed(self):
with self.assertTraitChanges(self.data_source, 'metadata_changed', count=1):
self.data_source.metadata['new_metadata'] = True
| 37.466667 | 84 | 0.623488 |
import unittest
from numpy import array
from numpy.testing import assert_array_equal
from chaco.api import GridDataSource
from traits.testing.unittest_tools import UnittestTools
class GridDataSourceTestCase(UnittestTools, unittest.TestCase):
def setUp(self):
self.data_source = GridDataSource(
xdata=array([1, 2, 3]),
ydata=array([1.5, 0.5, -0.5, -1.5]),
sort_order=('ascending', 'descending'))
def test_empty(self):
data_source = GridDataSource()
self.assertEqual(data_source.sort_order, ('none', 'none'))
self.assertEqual(data_source.index_dimension, 'image')
self.assertEqual(data_source.value_dimension, 'scalar')
self.assertEqual(data_source.metadata,
{"selections":[], "annotations":[]})
xdata, ydata = data_source.get_data()
assert_array_equal(xdata.get_data(), array([]))
assert_array_equal(ydata.get_data(), array([]))
self.assertEqual(data_source.get_bounds(), ((0,0),(0,0)))
def test_init(self):
test_xd = array([1, 2, 3])
test_yd = array([1.5, 0.5, -0.5, -1.5])
test_sort_order = ('ascending', 'descending')
self.assertEqual(self.data_source.sort_order, test_sort_order)
xd, yd = self.data_source.get_data()
assert_array_equal(xd.get_data(), test_xd)
assert_array_equal(yd.get_data(), test_yd)
self.assertEqual(self.data_source.get_bounds(),
((min(test_xd),min(test_yd)),
(max(test_xd),max(test_yd))))
def test_set_data(self):
test_xd = array([0,2,4])
test_yd = array([0,1,2,3,4,5])
test_sort_order = ('none', 'none')
self.data_source.set_data(xdata=test_xd, ydata=test_yd,
sort_order=('none', 'none'))
self.assertEqual(self.data_source.sort_order, test_sort_order)
xd, yd = self.data_source.get_data()
assert_array_equal(xd.get_data(), test_xd)
assert_array_equal(yd.get_data(), test_yd)
self.assertEqual(self.data_source.get_bounds(),
((min(test_xd),min(test_yd)),
(max(test_xd),max(test_yd))))
def test_metadata(self):
self.assertEqual(self.data_source.metadata,
{'annotations': [], 'selections': []})
def test_metadata_changed(self):
with self.assertTraitChanges(self.data_source, 'metadata_changed', count=1):
self.data_source.metadata = {'new_metadata': True}
def test_metadata_items_changed(self):
with self.assertTraitChanges(self.data_source, 'metadata_changed', count=1):
self.data_source.metadata['new_metadata'] = True
| true | true |
f73988e65ec63a80b874f6af00124bc0d252e6d3 | 751 | py | Python | app/pizzarest/urls.py | LuisQBlanco/django-pizza-rest | f0ce591e58dc988375b8a79443293e1517fbc2b0 | [
"MIT"
] | null | null | null | app/pizzarest/urls.py | LuisQBlanco/django-pizza-rest | f0ce591e58dc988375b8a79443293e1517fbc2b0 | [
"MIT"
] | null | null | null | app/pizzarest/urls.py | LuisQBlanco/django-pizza-rest | f0ce591e58dc988375b8a79443293e1517fbc2b0 | [
"MIT"
] | null | null | null | """pizzarest URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
| 34.136364 | 77 | 0.70972 | from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
| true | true |
f73989fd13fd3fa05e6c36bc3e9eee18c7a46981 | 106 | py | Python | lab01/client/client_app/urls.py | Boris-Barboris/rsoi | 30b03f50549f7977d5ecb7788b8e22b789f8859f | [
"MIT"
] | null | null | null | lab01/client/client_app/urls.py | Boris-Barboris/rsoi | 30b03f50549f7977d5ecb7788b8e22b789f8859f | [
"MIT"
] | null | null | null | lab01/client/client_app/urls.py | Boris-Barboris/rsoi | 30b03f50549f7977d5ecb7788b8e22b789f8859f | [
"MIT"
] | null | null | null | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^', views.index),
] | 15.142857 | 33 | 0.632075 | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^', views.index),
] | true | true |
f7398a8a479eccf26faa8f62e461a5f188216d6f | 1,562 | py | Python | test/2020/test_2020_day7.py | TedCassirer/advent-of-code | fb87dfdbb48b44f864337750aa58a809dcf72392 | [
"MIT"
] | 1 | 2020-11-30T19:17:50.000Z | 2020-11-30T19:17:50.000Z | test/2020/test_2020_day7.py | TedCassirer/advent-of-code | fb87dfdbb48b44f864337750aa58a809dcf72392 | [
"MIT"
] | null | null | null | test/2020/test_2020_day7.py | TedCassirer/advent-of-code | fb87dfdbb48b44f864337750aa58a809dcf72392 | [
"MIT"
] | null | null | null | from aoc_cas.aoc2020 import day7 as aoc
def testPart1():
data = """
light red bags contain 1 bright white bag, 2 muted yellow bags.
dark orange bags contain 3 bright white bags, 4 muted yellow bags.
bright white bags contain 1 shiny gold bag.
muted yellow bags contain 2 shiny gold bags, 9 faded blue bags.
shiny gold bags contain 1 dark olive bag, 2 vibrant plum bags.
dark olive bags contain 3 faded blue bags, 4 dotted black bags.
vibrant plum bags contain 5 faded blue bags, 6 dotted black bags.
faded blue bags contain no other bags.
dotted black bags contain no other bags.
""".strip()
assert aoc.part1(data) == 4
def testPart2():
data = """light red bags contain 1 bright white bag, 2 muted yellow bags.
dark orange bags contain 3 bright white bags, 4 muted yellow bags.
bright white bags contain 1 shiny gold bag.
muted yellow bags contain 2 shiny gold bags, 9 faded blue bags.
shiny gold bags contain 1 dark olive bag, 2 vibrant plum bags.
dark olive bags contain 3 faded blue bags, 4 dotted black bags.
vibrant plum bags contain 5 faded blue bags, 6 dotted black bags.
faded blue bags contain no other bags.
dotted black bags contain no other bags.
"""
assert aoc.part2(data) == 32
data = """shiny gold bags contain 2 dark red bags.
dark red bags contain 2 dark orange bags.
dark orange bags contain 2 dark yellow bags.
dark yellow bags contain 2 dark green bags.
dark green bags contain 2 dark blue bags.
dark blue bags contain 2 dark violet bags.
dark violet bags contain no other bags.
"""
assert aoc.part2(data) == 126
| 38.097561 | 77 | 0.752241 | from aoc_cas.aoc2020 import day7 as aoc
def testPart1():
data = """
light red bags contain 1 bright white bag, 2 muted yellow bags.
dark orange bags contain 3 bright white bags, 4 muted yellow bags.
bright white bags contain 1 shiny gold bag.
muted yellow bags contain 2 shiny gold bags, 9 faded blue bags.
shiny gold bags contain 1 dark olive bag, 2 vibrant plum bags.
dark olive bags contain 3 faded blue bags, 4 dotted black bags.
vibrant plum bags contain 5 faded blue bags, 6 dotted black bags.
faded blue bags contain no other bags.
dotted black bags contain no other bags.
""".strip()
assert aoc.part1(data) == 4
def testPart2():
data = """light red bags contain 1 bright white bag, 2 muted yellow bags.
dark orange bags contain 3 bright white bags, 4 muted yellow bags.
bright white bags contain 1 shiny gold bag.
muted yellow bags contain 2 shiny gold bags, 9 faded blue bags.
shiny gold bags contain 1 dark olive bag, 2 vibrant plum bags.
dark olive bags contain 3 faded blue bags, 4 dotted black bags.
vibrant plum bags contain 5 faded blue bags, 6 dotted black bags.
faded blue bags contain no other bags.
dotted black bags contain no other bags.
"""
assert aoc.part2(data) == 32
data = """shiny gold bags contain 2 dark red bags.
dark red bags contain 2 dark orange bags.
dark orange bags contain 2 dark yellow bags.
dark yellow bags contain 2 dark green bags.
dark green bags contain 2 dark blue bags.
dark blue bags contain 2 dark violet bags.
dark violet bags contain no other bags.
"""
assert aoc.part2(data) == 126
| true | true |
f7398b5c58c4df1dde303c88c55a586a55e6fd7f | 729 | py | Python | server/utils.py | shambu09/chat-bot | b84e74535091d71c9f26f3ab99829aad217cb18b | [
"MIT"
] | 1 | 2021-12-21T15:56:33.000Z | 2021-12-21T15:56:33.000Z | server/utils.py | shambu09/chat-bot-model | b84e74535091d71c9f26f3ab99829aad217cb18b | [
"MIT"
] | 1 | 2021-06-20T18:14:39.000Z | 2021-06-21T16:54:55.000Z | server/utils.py | shambu09/chat-bot-model | b84e74535091d71c9f26f3ab99829aad217cb18b | [
"MIT"
] | 1 | 2021-06-22T22:26:37.000Z | 2021-06-22T22:26:37.000Z | def write_qs(qs):
with open("qs.txt", "w") as f:
for i in qs:
f.write(i + "\n")
def write_ans(ans):
with open("ans.txt", "w") as f:
for i in ans:
f.write(i + "\n")
def read_qs():
with open("qs.txt", "r") as f:
qs = f.readlines()
qs = [i.rstrip("\n") for i in qs]
return qs
def read_ans():
with open("ans.txt", "r") as f:
ans = f.readlines()
ans = [i.rstrip("\n") for i in ans]
return ans
if __name__ == "__main__":
from data import training_qs, answers
write_qs(training_qs)
qs = read_qs()
write_ans(answers)
ans = read_ans()
assert(len(qs) == len(ans))
assert(qs == training_qs)
assert(ans == answers) | 21.441176 | 41 | 0.533608 | def write_qs(qs):
with open("qs.txt", "w") as f:
for i in qs:
f.write(i + "\n")
def write_ans(ans):
with open("ans.txt", "w") as f:
for i in ans:
f.write(i + "\n")
def read_qs():
with open("qs.txt", "r") as f:
qs = f.readlines()
qs = [i.rstrip("\n") for i in qs]
return qs
def read_ans():
with open("ans.txt", "r") as f:
ans = f.readlines()
ans = [i.rstrip("\n") for i in ans]
return ans
if __name__ == "__main__":
from data import training_qs, answers
write_qs(training_qs)
qs = read_qs()
write_ans(answers)
ans = read_ans()
assert(len(qs) == len(ans))
assert(qs == training_qs)
assert(ans == answers) | true | true |
f7398b8ee2735d8542260e92ce02ce683faeb4b4 | 577 | py | Python | pipeline/table_figure_scripts/rename_abundance_file_datasets.py | dewyman/TALON-paper-2019 | 8644b34573d6a5924e8d84a234fd0fcbf010c233 | [
"MIT"
] | 4 | 2019-08-23T20:59:26.000Z | 2020-05-07T02:32:35.000Z | pipeline/table_figure_scripts/rename_abundance_file_datasets.py | dewyman/TALON-paper-2019 | 8644b34573d6a5924e8d84a234fd0fcbf010c233 | [
"MIT"
] | null | null | null | pipeline/table_figure_scripts/rename_abundance_file_datasets.py | dewyman/TALON-paper-2019 | 8644b34573d6a5924e8d84a234fd0fcbf010c233 | [
"MIT"
] | 1 | 2020-04-28T07:34:09.000Z | 2020-04-28T07:34:09.000Z | import pandas as pd
from collections import defaultdict
import argparse
parser = argparse.ArgumentParser(description=\
'Renames PacBio and ONT datasets with more\
intelligent names')
parser.add_argument('--f', help='file to swap dataset col names in')
args = parser.parse_args()
f = args.f
# read in mapping file
map_df = pd.read_csv('dataset_id_name_map.tsv', sep='\t')
map_df.set_index('dataset_id', inplace=True)
map_dict = map_df.to_dict()
df = pd.read_csv(f, sep='\t')
df.rename(columns=map_dict['dataset_name'], inplace=True)
df.to_csv(f, sep='\t', index=False)
| 26.227273 | 68 | 0.745234 | import pandas as pd
from collections import defaultdict
import argparse
parser = argparse.ArgumentParser(description=\
'Renames PacBio and ONT datasets with more\
intelligent names')
parser.add_argument('--f', help='file to swap dataset col names in')
args = parser.parse_args()
f = args.f
map_df = pd.read_csv('dataset_id_name_map.tsv', sep='\t')
map_df.set_index('dataset_id', inplace=True)
map_dict = map_df.to_dict()
df = pd.read_csv(f, sep='\t')
df.rename(columns=map_dict['dataset_name'], inplace=True)
df.to_csv(f, sep='\t', index=False)
| true | true |
f7398bf1eff15fbb3d9d6bd1f250fd98451b79cc | 20,748 | py | Python | scf/_vhf.py | gmwang18/pyscf | fcd6877751661c8a9743c1c872a4a2b65f6dd7ac | [
"BSD-2-Clause"
] | null | null | null | scf/_vhf.py | gmwang18/pyscf | fcd6877751661c8a9743c1c872a4a2b65f6dd7ac | [
"BSD-2-Clause"
] | null | null | null | scf/_vhf.py | gmwang18/pyscf | fcd6877751661c8a9743c1c872a4a2b65f6dd7ac | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
import sys
import ctypes
import _ctypes
import numpy
import pyscf.lib
from pyscf import gto
from pyscf.gto.moleintor import make_cintopt
libcvhf = pyscf.lib.load_library('libcvhf')
def _fpointer(name):
return ctypes.c_void_p(_ctypes.dlsym(libcvhf._handle, name))
class VHFOpt(object):
def __init__(self, mol, intor,
prescreen='CVHFnoscreen', qcondname=None, dmcondname=None):
self._this = ctypes.POINTER(_CVHFOpt)()
#print self._this.contents, expect ValueError: NULL pointer access
self._intor = _fpointer(intor)
self._cintopt = pyscf.lib.c_null_ptr()
self._dmcondname = dmcondname
self.init_cvhf_direct(mol, intor, prescreen, qcondname)
def init_cvhf_direct(self, mol, intor, prescreen, qcondname):
c_atm = numpy.asarray(mol._atm, dtype=numpy.int32, order='C')
c_bas = numpy.asarray(mol._bas, dtype=numpy.int32, order='C')
c_env = numpy.asarray(mol._env, dtype=numpy.double, order='C')
natm = ctypes.c_int(c_atm.shape[0])
nbas = ctypes.c_int(c_bas.shape[0])
self._cintopt = make_cintopt(c_atm, c_bas, c_env, intor)
# libcvhf.CVHFnr_optimizer(ctypes.byref(self._this),
# c_atm.ctypes.data_as(ctypes.c_void_p), natm,
# c_bas.ctypes.data_as(ctypes.c_void_p), nbas,
# c_env.ctypes.data_as(ctypes.c_void_p))
libcvhf.CVHFinit_optimizer(ctypes.byref(self._this),
c_atm.ctypes.data_as(ctypes.c_void_p), natm,
c_bas.ctypes.data_as(ctypes.c_void_p), nbas,
c_env.ctypes.data_as(ctypes.c_void_p))
self._this.contents.fprescreen = _fpointer(prescreen)
if prescreen != 'CVHFnoscreen':
fsetqcond = getattr(libcvhf, qcondname)
fsetqcond(self._this,
c_atm.ctypes.data_as(ctypes.c_void_p), natm,
c_bas.ctypes.data_as(ctypes.c_void_p), nbas,
c_env.ctypes.data_as(ctypes.c_void_p))
@property
def direct_scf_tol(self):
return self._this.contents.direct_scf_cutoff
@direct_scf_tol.setter
def direct_scf_tol(self, v):
self._this.contents.direct_scf_cutoff = v
def set_dm(self, dm, atm, bas, env):
if self._dmcondname is not None:
c_atm = numpy.asarray(atm, dtype=numpy.int32, order='C')
c_bas = numpy.asarray(bas, dtype=numpy.int32, order='C')
c_env = numpy.asarray(env, dtype=numpy.double, order='C')
natm = ctypes.c_int(c_atm.shape[0])
nbas = ctypes.c_int(c_bas.shape[0])
if isinstance(dm, numpy.ndarray) and dm.ndim == 2:
n_dm = 1
else:
n_dm = len(dm)
dm = numpy.asarray(dm, order='C')
fsetdm = getattr(libcvhf, self._dmcondname)
fsetdm(self._this,
dm.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(n_dm),
c_atm.ctypes.data_as(ctypes.c_void_p), natm,
c_bas.ctypes.data_as(ctypes.c_void_p), nbas,
c_env.ctypes.data_as(ctypes.c_void_p))
class _CVHFOpt(ctypes.Structure):
_fields_ = [('nbas', ctypes.c_int),
('_padding', ctypes.c_int),
('direct_scf_cutoff', ctypes.c_double),
('q_cond', ctypes.c_void_p),
('dm_cond', ctypes.c_void_p),
('fprescreen', ctypes.c_void_p),
('r_vkscreen', ctypes.c_void_p)]
################################################
# for general DM
# hermi = 0 : arbitary
# hermi = 1 : hermitian
# hermi = 2 : anti-hermitian
################################################
def incore(eri, dm, hermi=0):
assert(not numpy.iscomplexobj(eri))
eri = numpy.ascontiguousarray(eri)
dm = numpy.ascontiguousarray(dm)
nao = dm.shape[0]
vj = numpy.empty((nao,nao))
vk = numpy.empty((nao,nao))
npair = nao*(nao+1)//2
if eri.ndim == 2 and npair*npair == eri.size: # 4-fold symmetry eri
fdrv = getattr(libcvhf, 'CVHFnrs4_incore_drv')
# 'ijkl,kl->ij'
fvj = _fpointer('CVHFics4_kl_s2ij')
# 'ijkl,il->jk'
fvk = _fpointer('CVHFics4_il_s1jk')
# or
## 'ijkl,ij->kl'
#fvj = _fpointer('CVHFics4_ij_s2kl')
## 'ijkl,jk->il'
#fvk = _fpointer('CVHFics4_jk_s1il')
tridm = dm
elif eri.ndim == 1 and npair*(npair+1)//2 == eri.size: # 8-fold symmetry eri
fdrv = getattr(libcvhf, 'CVHFnrs8_incore_drv')
fvj = _fpointer('CVHFics8_tridm_vj')
if hermi == 1:
fvk = _fpointer('CVHFics8_jk_s2il')
else:
fvk = _fpointer('CVHFics8_jk_s1il')
tridm = pyscf.lib.pack_tril(pyscf.lib.transpose_sum(dm))
i = numpy.arange(nao)
tridm[i*(i+1)//2+i] *= .5
else:
raise RuntimeError('Array shape not consistent: DM %s, eri %s'
% (dm.shape, eri.shape))
fdrv(eri.ctypes.data_as(ctypes.c_void_p),
tridm.ctypes.data_as(ctypes.c_void_p),
vj.ctypes.data_as(ctypes.c_void_p),
dm.ctypes.data_as(ctypes.c_void_p),
vk.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(nao), fvj, fvk)
if hermi != 0:
vj = pyscf.lib.hermi_triu(vj, hermi)
vk = pyscf.lib.hermi_triu(vk, hermi)
else:
vj = pyscf.lib.hermi_triu(vj, 1)
return vj, vk
# use cint2e_sph as cintor, CVHFnrs8_ij_s2kl, CVHFnrs8_jk_s2il as fjk to call
# direct_mapdm
def direct(dms, atm, bas, env, vhfopt=None, hermi=0):
c_atm = numpy.asarray(atm, dtype=numpy.int32, order='C')
c_bas = numpy.asarray(bas, dtype=numpy.int32, order='C')
c_env = numpy.asarray(env, dtype=numpy.double, order='C')
natm = ctypes.c_int(c_atm.shape[0])
nbas = ctypes.c_int(c_bas.shape[0])
if isinstance(dms, numpy.ndarray) and dms.ndim == 2:
n_dm = 1
nao = dms.shape[0]
dms = (numpy.asarray(dms, order='C'),)
else:
n_dm = len(dms)
nao = dms[0].shape[0]
dms = numpy.asarray(dms, order='C')
if vhfopt is None:
cintor = _fpointer('cint2e_sph')
cintopt = make_cintopt(c_atm, c_bas, c_env, 'cint2e_sph')
cvhfopt = pyscf.lib.c_null_ptr()
else:
vhfopt.set_dm(dms, atm, bas, env)
cvhfopt = vhfopt._this
cintopt = vhfopt._cintopt
cintor = vhfopt._intor
fdrv = getattr(libcvhf, 'CVHFnr_direct_drv')
fdot = _fpointer('CVHFdot_nrs8')
fvj = _fpointer('CVHFnrs8_ji_s2kl')
if hermi == 1:
fvk = _fpointer('CVHFnrs8_li_s2kj')
else:
fvk = _fpointer('CVHFnrs8_li_s1kj')
vjk = numpy.empty((2,n_dm,nao,nao))
fjk = (ctypes.c_void_p*(2*n_dm))()
dmsptr = (ctypes.c_void_p*(2*n_dm))()
vjkptr = (ctypes.c_void_p*(2*n_dm))()
for i in range(n_dm):
dmsptr[i] = dms[i].ctypes.data_as(ctypes.c_void_p)
vjkptr[i] = vjk[0,i].ctypes.data_as(ctypes.c_void_p)
fjk[i] = fvj
for i in range(n_dm):
dmsptr[n_dm+i] = dms[i].ctypes.data_as(ctypes.c_void_p)
vjkptr[n_dm+i] = vjk[1,i].ctypes.data_as(ctypes.c_void_p)
fjk[n_dm+i] = fvk
shls_slice = (ctypes.c_int*8)(*([0, c_bas.shape[0]]*4))
ao_loc = numpy.asarray(make_ao_loc(bas), dtype=numpy.int32)
fdrv(cintor, fdot, fjk, dmsptr, vjkptr,
ctypes.c_int(n_dm*2), ctypes.c_int(1),
shls_slice, ao_loc.ctypes.data_as(ctypes.c_void_p), cintopt, cvhfopt,
c_atm.ctypes.data_as(ctypes.c_void_p), natm,
c_bas.ctypes.data_as(ctypes.c_void_p), nbas,
c_env.ctypes.data_as(ctypes.c_void_p))
# vj must be symmetric
for idm in range(n_dm):
vjk[0,idm] = pyscf.lib.hermi_triu(vjk[0,idm], 1)
if hermi != 0: # vk depends
for idm in range(n_dm):
vjk[1,idm] = pyscf.lib.hermi_triu(vjk[1,idm], hermi)
if n_dm == 1:
vjk = vjk.reshape(2,nao,nao)
return vjk
# call all fjk for each dm, the return array has len(dms)*len(jkdescript)*ncomp components
# jkdescript: 'ij->s1kl', 'kl->s2ij', ...
def direct_mapdm(intor, aosym, jkdescript,
dms, ncomp, atm, bas, env, vhfopt=None, shls_slice=None):
assert(aosym in ('s8', 's4', 's2ij', 's2kl', 's1',
'a4ij', 'a4kl', 'a2ij', 'a2kl'))
c_atm = numpy.asarray(atm, dtype=numpy.int32, order='C')
c_bas = numpy.asarray(bas, dtype=numpy.int32, order='C')
c_env = numpy.asarray(env, dtype=numpy.double, order='C')
natm = ctypes.c_int(c_atm.shape[0])
nbas = ctypes.c_int(c_bas.shape[0])
if isinstance(dms, numpy.ndarray) and dms.ndim == 2:
n_dm = 1
nao = dms.shape[0]
dms = (numpy.asarray(dms, order='C'),)
else:
n_dm = len(dms)
nao = dms[0].shape[0]
dms = [numpy.asarray(dm, order='C') for dm in dms]
if isinstance(jkdescript, str):
njk = 1
jkdescript = (jkdescript,)
else:
njk = len(jkdescript)
if vhfopt is None:
cintor = _fpointer(intor)
cintopt = make_cintopt(c_atm, c_bas, c_env, intor)
cvhfopt = pyscf.lib.c_null_ptr()
else:
vhfopt.set_dm(dms, atm, bas, env)
cvhfopt = vhfopt._this
cintopt = vhfopt._cintopt
cintor = vhfopt._intor
fdrv = getattr(libcvhf, 'CVHFnr_direct_drv')
dotsym = _INTSYMAP[aosym]
fdot = _fpointer('CVHFdot_nr'+dotsym)
if shls_slice is None:
shls_slice = (0, c_bas.shape[0])*4
ao_loc = numpy.asarray(make_ao_loc(bas), dtype=numpy.int32)
vjk = []
descr_sym = [x.split('->') for x in jkdescript]
fjk = (ctypes.c_void_p*(njk*n_dm))()
dmsptr = (ctypes.c_void_p*(njk*n_dm))()
vjkptr = (ctypes.c_void_p*(njk*n_dm))()
for i, (dmsym, vsym) in enumerate(descr_sym):
if dmsym in ('ij', 'kl', 'il', 'kj'):
sys.stderr.write('not support DM description %s, transpose to %s\n' %
(dmsym, dmsym[::-1]))
dmsym = dmsym[::-1]
f1 = _fpointer('CVHFnr%s_%s_%s'%(aosym, dmsym, vsym))
vshape = (n_dm,ncomp) + get_dims(vsym[-2:], shls_slice, ao_loc)
vjk.append(numpy.empty(vshape))
for j in range(n_dm):
assert(dms[j].shape == get_dims(dmsym, shls_slice, ao_loc))
dmsptr[i*n_dm+j] = dms[j].ctypes.data_as(ctypes.c_void_p)
vjkptr[i*n_dm+j] = vjk[i][j].ctypes.data_as(ctypes.c_void_p)
fjk[i*n_dm+j] = f1
shls_slice = (ctypes.c_int*8)(*shls_slice)
fdrv(cintor, fdot, fjk, dmsptr, vjkptr,
ctypes.c_int(njk*n_dm), ctypes.c_int(ncomp),
shls_slice, ao_loc.ctypes.data_as(ctypes.c_void_p), cintopt, cvhfopt,
c_atm.ctypes.data_as(ctypes.c_void_p), natm,
c_bas.ctypes.data_as(ctypes.c_void_p), nbas,
c_env.ctypes.data_as(ctypes.c_void_p))
if n_dm * ncomp == 1:
vjk = [v.reshape(v.shape[2:]) for v in vjk]
elif n_dm == 1:
vjk = [v.reshape((ncomp,)+v.shape[2:]) for v in vjk]
elif ncomp == 1:
vjk = [v.reshape((n_dm,)+v.shape[2:]) for v in vjk]
if njk == 1:
vjk = vjk[0]
return vjk
# for density matrices in dms, bind each dm to a jk operator
# jkdescript: 'ij->s1kl', 'kl->s2ij', ...
def direct_bindm(intor, aosym, jkdescript,
dms, ncomp, atm, bas, env, vhfopt=None, shls_slice=None):
assert(aosym in ('s8', 's4', 's2ij', 's2kl', 's1',
'a4ij', 'a4kl', 'a2ij', 'a2kl'))
c_atm = numpy.asarray(atm, dtype=numpy.int32, order='C')
c_bas = numpy.asarray(bas, dtype=numpy.int32, order='C')
c_env = numpy.asarray(env, dtype=numpy.double, order='C')
natm = ctypes.c_int(c_atm.shape[0])
nbas = ctypes.c_int(c_bas.shape[0])
if isinstance(dms, numpy.ndarray) and dms.ndim == 2:
n_dm = 1
nao = dms.shape[0]
dms = (numpy.asarray(dms, order='C'),)
else:
n_dm = len(dms)
nao = dms[0].shape[0]
dms = [numpy.asarray(dm, order='C') for dm in dms]
if isinstance(jkdescript, str):
njk = 1
jkdescript = (jkdescript,)
else:
njk = len(jkdescript)
assert(njk == n_dm)
if vhfopt is None:
cintor = _fpointer(intor)
cintopt = make_cintopt(c_atm, c_bas, c_env, intor)
cvhfopt = pyscf.lib.c_null_ptr()
else:
vhfopt.set_dm(dms, atm, bas, env)
cvhfopt = vhfopt._this
cintopt = vhfopt._cintopt
cintor = vhfopt._intor
fdrv = getattr(libcvhf, 'CVHFnr_direct_drv')
dotsym = _INTSYMAP[aosym]
fdot = _fpointer('CVHFdot_nr'+dotsym)
if shls_slice is None:
shls_slice = (0, c_bas.shape[0])*4
ao_loc = numpy.asarray(make_ao_loc(bas), dtype=numpy.int32)
vjk = []
descr_sym = [x.split('->') for x in jkdescript]
fjk = (ctypes.c_void_p*(n_dm))()
dmsptr = (ctypes.c_void_p*(n_dm))()
vjkptr = (ctypes.c_void_p*(n_dm))()
for i, (dmsym, vsym) in enumerate(descr_sym):
if dmsym in ('ij', 'kl', 'il', 'kj'):
sys.stderr.write('not support DM description %s, transpose to %s\n' %
(dmsym, dmsym[::-1]))
dmsym = dmsym[::-1]
f1 = _fpointer('CVHFnr%s_%s_%s'%(aosym, dmsym, vsym))
assert(dms[i].shape == get_dims(dmsym, shls_slice, ao_loc))
vshape = (ncomp,) + get_dims(vsym[-2:], shls_slice, ao_loc)
vjk.append(numpy.empty(vshape))
dmsptr[i] = dms[i].ctypes.data_as(ctypes.c_void_p)
vjkptr[i] = vjk[i].ctypes.data_as(ctypes.c_void_p)
fjk[i] = f1
shls_slice = (ctypes.c_int*8)(*shls_slice)
fdrv(cintor, fdot, fjk, dmsptr, vjkptr,
ctypes.c_int(n_dm), ctypes.c_int(ncomp),
shls_slice, ao_loc.ctypes.data_as(ctypes.c_void_p), cintopt, cvhfopt,
c_atm.ctypes.data_as(ctypes.c_void_p), natm,
c_bas.ctypes.data_as(ctypes.c_void_p), nbas,
c_env.ctypes.data_as(ctypes.c_void_p))
if ncomp == 1:
vjk = [v.reshape(v.shape[1:]) for v in vjk]
if njk == 1:
vjk = vjk[0]
return vjk
# 8-fold permutation symmetry
def int2e_sph(atm, bas, env):
c_atm = numpy.asarray(atm, dtype=numpy.int32, order='C')
c_bas = numpy.asarray(bas, dtype=numpy.int32, order='C')
c_env = numpy.asarray(env, dtype=numpy.double, order='C')
natm = ctypes.c_int(c_atm.shape[0])
nbas = ctypes.c_int(c_bas.shape[0])
libcvhf.CINTtot_cgto_spheric.restype = ctypes.c_int
nao = libcvhf.CINTtot_cgto_spheric(c_bas.ctypes.data_as(ctypes.c_void_p), nbas)
nao_pair = nao*(nao+1)//2
eri = numpy.empty((nao_pair*(nao_pair+1)//2))
libcvhf.int2e_sph(eri.ctypes.data_as(ctypes.c_void_p),
c_atm.ctypes.data_as(ctypes.c_void_p), natm,
c_bas.ctypes.data_as(ctypes.c_void_p), nbas,
c_env.ctypes.data_as(ctypes.c_void_p))
return eri
################################################################
# relativistic
def rdirect_mapdm(intor, aosym, jkdescript,
dms, ncomp, atm, bas, env, vhfopt=None):
assert(aosym in ('s8', 's4', 's2ij', 's2kl', 's1',
'a4ij', 'a4kl', 'a2ij', 'a2kl'))
c_atm = numpy.asarray(atm, dtype=numpy.int32, order='C')
c_bas = numpy.asarray(bas, dtype=numpy.int32, order='C')
c_env = numpy.asarray(env, dtype=numpy.double, order='C')
natm = ctypes.c_int(c_atm.shape[0])
nbas = ctypes.c_int(c_bas.shape[0])
if isinstance(dms, numpy.ndarray) and dms.ndim == 2:
n_dm = 1
nao = dms.shape[0]
dms = (numpy.asarray(dms, order='C', dtype=numpy.complex128),)
else:
n_dm = len(dms)
nao = dms[0].shape[0]
dms = numpy.asarray(dms, order='C', dtype=numpy.complex128)
if isinstance(jkdescript, str):
njk = 1
jkdescript = (jkdescript,)
else:
njk = len(jkdescript)
if vhfopt is None:
cintor = _fpointer(intor)
cintopt = make_cintopt(c_atm, c_bas, c_env, intor)
cvhfopt = pyscf.lib.c_null_ptr()
else:
vhfopt.set_dm(dms, atm, bas, env)
cvhfopt = vhfopt._this
cintopt = vhfopt._cintopt
cintor = vhfopt._intor
fdrv = getattr(libcvhf, 'CVHFr_direct_drv')
dotsym = _INTSYMAP[aosym]
fdot = _fpointer('CVHFdot_r'+dotsym)
unpackas = _INTUNPACKMAP_R[aosym]
descr_sym = [x.split('->') for x in jkdescript]
fjk = (ctypes.c_void_p*(njk*n_dm))()
dm1 = (ctypes.c_void_p*(njk*n_dm))()
for i, (dmsym, vsym) in enumerate(descr_sym):
f1 = _fpointer('CVHFr%s_%s_%s'%(unpackas, dmsym, vsym))
for j in range(n_dm):
dm1[i*n_dm+j] = dms[j].ctypes.data_as(ctypes.c_void_p)
fjk[i*n_dm+j] = f1
vjk = numpy.empty((njk,n_dm*ncomp,nao,nao), dtype=numpy.complex)
fdrv(cintor, fdot, fjk, dm1,
vjk.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(njk*n_dm), ctypes.c_int(ncomp),
cintopt, cvhfopt,
c_atm.ctypes.data_as(ctypes.c_void_p), natm,
c_bas.ctypes.data_as(ctypes.c_void_p), nbas,
c_env.ctypes.data_as(ctypes.c_void_p))
if n_dm * ncomp == 1:
vjk = vjk.reshape(njk,nao,nao)
if njk == 1:
vjk = vjk.reshape(vjk.shape[1:])
return vjk
# for density matrices in dms, bind each dm to a jk operator
def rdirect_bindm(intor, aosym, jkdescript,
dms, ncomp, atm, bas, env, vhfopt=None):
assert(aosym in ('s8', 's4', 's2ij', 's2kl', 's1',
'a4ij', 'a4kl', 'a2ij', 'a2kl'))
c_atm = numpy.asarray(atm, dtype=numpy.int32, order='C')
c_bas = numpy.asarray(bas, dtype=numpy.int32, order='C')
c_env = numpy.asarray(env, dtype=numpy.double, order='C')
natm = ctypes.c_int(c_atm.shape[0])
nbas = ctypes.c_int(c_bas.shape[0])
if isinstance(dms, numpy.ndarray) and dms.ndim == 2:
n_dm = 1
nao = dms.shape[0]
dms = (numpy.asarray(dms, order='C', dtype=numpy.complex128),)
else:
n_dm = len(dms)
nao = dms[0].shape[0]
dms = numpy.asarray(dms, order='C', dtype=numpy.complex128)
if isinstance(jkdescript, str):
njk = 1
jkdescript = (jkdescript,)
else:
njk = len(jkdescript)
assert(njk == n_dm)
if vhfopt is None:
cintor = _fpointer(intor)
cintopt = make_cintopt(c_atm, c_bas, c_env, intor)
cvhfopt = pyscf.lib.c_null_ptr()
else:
vhfopt.set_dm(dms, atm, bas, env)
cvhfopt = vhfopt._this
cintopt = vhfopt._cintopt
cintor = vhfopt._intor
fdrv = getattr(libcvhf, 'CVHFr_direct_drv')
dotsym = _INTSYMAP[aosym]
fdot = _fpointer('CVHFdot_r'+dotsym)
unpackas = _INTUNPACKMAP_R[aosym]
descr_sym = [x.split('->') for x in jkdescript]
fjk = (ctypes.c_void_p*(n_dm))()
dm1 = (ctypes.c_void_p*(n_dm))()
for i, (dmsym, vsym) in enumerate(descr_sym):
f1 = _fpointer('CVHFr%s_%s_%s'%(unpackas, dmsym, vsym))
dm1[i] = dms[i].ctypes.data_as(ctypes.c_void_p)
fjk[i] = f1
vjk = numpy.empty((njk,ncomp,nao,nao), dtype=numpy.complex)
fdrv(cintor, fdot, fjk, dm1,
vjk.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(n_dm), ctypes.c_int(ncomp),
cintopt, cvhfopt,
c_atm.ctypes.data_as(ctypes.c_void_p), natm,
c_bas.ctypes.data_as(ctypes.c_void_p), nbas,
c_env.ctypes.data_as(ctypes.c_void_p))
if ncomp == 1:
vjk = vjk.reshape(njk,nao,nao)
if njk == 1:
vjk = vjk.reshape(vjk.shape[1:])
return vjk
# 'a4ij': anti-symm between ij, symm between kl
# 'a4kl': anti-symm between kl, symm between ij
# 'a2ij': anti-symm between ij,
# 'a2kl': anti-symm between kl,
_INTSYMAP= {
's8' : 's8' ,
's4' : 's4' ,
's2ij': 's2ij',
's2kl': 's2kl',
's1' : 's1' ,
'a4ij': 's4' ,
'a4kl': 's4' ,
'a2ij': 's2ij',
'a2kl': 's2kl',
}
_INTUNPACKMAP_R = {
's8' : 's8' ,
's4' : 's4' ,
's2ij': 's2ij',
's2kl': 's2kl',
's1' : 's1' ,
'a4ij': 'ah4' ,
'a4kl': 'ha4' ,
'a2ij': 'ah2ij',
'a2kl': 'ha2kl',
}
def make_ao_loc(bas, cart=False):
l = bas[:,gto.ANG_OF]
if cart:
dims = (l+1)*(l+2)//2 * bas[:,gto.NCTR_OF]
else:
dims = (l*2+1) * bas[:,gto.NCTR_OF]
ao_loc = numpy.empty(len(bas)+1, dtype=numpy.int32)
ao_loc[0] = 0
dims.cumsum(dtype=numpy.int32, out=ao_loc[1:])
return ao_loc
_SHLINDEX = {'i': 0, 'j': 2, 'k': 4, 'l': 6}
def get_dims(descr_sym, shls_slice, ao_loc):
i = _SHLINDEX[descr_sym[0]]
j = _SHLINDEX[descr_sym[1]]
di = ao_loc[shls_slice[i+1]] - ao_loc[shls_slice[i]]
dj = ao_loc[shls_slice[j+1]] - ao_loc[shls_slice[j]]
return (di,dj)
| 37.05 | 90 | 0.587623 |
import sys
import ctypes
import _ctypes
import numpy
import pyscf.lib
from pyscf import gto
from pyscf.gto.moleintor import make_cintopt
libcvhf = pyscf.lib.load_library('libcvhf')
def _fpointer(name):
return ctypes.c_void_p(_ctypes.dlsym(libcvhf._handle, name))
class VHFOpt(object):
def __init__(self, mol, intor,
prescreen='CVHFnoscreen', qcondname=None, dmcondname=None):
self._this = ctypes.POINTER(_CVHFOpt)()
self._intor = _fpointer(intor)
self._cintopt = pyscf.lib.c_null_ptr()
self._dmcondname = dmcondname
self.init_cvhf_direct(mol, intor, prescreen, qcondname)
def init_cvhf_direct(self, mol, intor, prescreen, qcondname):
c_atm = numpy.asarray(mol._atm, dtype=numpy.int32, order='C')
c_bas = numpy.asarray(mol._bas, dtype=numpy.int32, order='C')
c_env = numpy.asarray(mol._env, dtype=numpy.double, order='C')
natm = ctypes.c_int(c_atm.shape[0])
nbas = ctypes.c_int(c_bas.shape[0])
self._cintopt = make_cintopt(c_atm, c_bas, c_env, intor)
libcvhf.CVHFinit_optimizer(ctypes.byref(self._this),
c_atm.ctypes.data_as(ctypes.c_void_p), natm,
c_bas.ctypes.data_as(ctypes.c_void_p), nbas,
c_env.ctypes.data_as(ctypes.c_void_p))
self._this.contents.fprescreen = _fpointer(prescreen)
if prescreen != 'CVHFnoscreen':
fsetqcond = getattr(libcvhf, qcondname)
fsetqcond(self._this,
c_atm.ctypes.data_as(ctypes.c_void_p), natm,
c_bas.ctypes.data_as(ctypes.c_void_p), nbas,
c_env.ctypes.data_as(ctypes.c_void_p))
@property
def direct_scf_tol(self):
return self._this.contents.direct_scf_cutoff
@direct_scf_tol.setter
def direct_scf_tol(self, v):
self._this.contents.direct_scf_cutoff = v
def set_dm(self, dm, atm, bas, env):
if self._dmcondname is not None:
c_atm = numpy.asarray(atm, dtype=numpy.int32, order='C')
c_bas = numpy.asarray(bas, dtype=numpy.int32, order='C')
c_env = numpy.asarray(env, dtype=numpy.double, order='C')
natm = ctypes.c_int(c_atm.shape[0])
nbas = ctypes.c_int(c_bas.shape[0])
if isinstance(dm, numpy.ndarray) and dm.ndim == 2:
n_dm = 1
else:
n_dm = len(dm)
dm = numpy.asarray(dm, order='C')
fsetdm = getattr(libcvhf, self._dmcondname)
fsetdm(self._this,
dm.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(n_dm),
c_atm.ctypes.data_as(ctypes.c_void_p), natm,
c_bas.ctypes.data_as(ctypes.c_void_p), nbas,
c_env.ctypes.data_as(ctypes.c_void_p))
class _CVHFOpt(ctypes.Structure):
_fields_ = [('nbas', ctypes.c_int),
('_padding', ctypes.c_int),
('direct_scf_cutoff', ctypes.c_double),
('q_cond', ctypes.c_void_p),
('dm_cond', ctypes.c_void_p),
('fprescreen', ctypes.c_void_p),
('r_vkscreen', ctypes.c_void_p)]
ptr()
else:
vhfopt.set_dm(dms, atm, bas, env)
cvhfopt = vhfopt._this
cintopt = vhfopt._cintopt
cintor = vhfopt._intor
fdrv = getattr(libcvhf, 'CVHFnr_direct_drv')
fdot = _fpointer('CVHFdot_nrs8')
fvj = _fpointer('CVHFnrs8_ji_s2kl')
if hermi == 1:
fvk = _fpointer('CVHFnrs8_li_s2kj')
else:
fvk = _fpointer('CVHFnrs8_li_s1kj')
vjk = numpy.empty((2,n_dm,nao,nao))
fjk = (ctypes.c_void_p*(2*n_dm))()
dmsptr = (ctypes.c_void_p*(2*n_dm))()
vjkptr = (ctypes.c_void_p*(2*n_dm))()
for i in range(n_dm):
dmsptr[i] = dms[i].ctypes.data_as(ctypes.c_void_p)
vjkptr[i] = vjk[0,i].ctypes.data_as(ctypes.c_void_p)
fjk[i] = fvj
for i in range(n_dm):
dmsptr[n_dm+i] = dms[i].ctypes.data_as(ctypes.c_void_p)
vjkptr[n_dm+i] = vjk[1,i].ctypes.data_as(ctypes.c_void_p)
fjk[n_dm+i] = fvk
shls_slice = (ctypes.c_int*8)(*([0, c_bas.shape[0]]*4))
ao_loc = numpy.asarray(make_ao_loc(bas), dtype=numpy.int32)
fdrv(cintor, fdot, fjk, dmsptr, vjkptr,
ctypes.c_int(n_dm*2), ctypes.c_int(1),
shls_slice, ao_loc.ctypes.data_as(ctypes.c_void_p), cintopt, cvhfopt,
c_atm.ctypes.data_as(ctypes.c_void_p), natm,
c_bas.ctypes.data_as(ctypes.c_void_p), nbas,
c_env.ctypes.data_as(ctypes.c_void_p))
for idm in range(n_dm):
vjk[0,idm] = pyscf.lib.hermi_triu(vjk[0,idm], 1)
if hermi != 0:
for idm in range(n_dm):
vjk[1,idm] = pyscf.lib.hermi_triu(vjk[1,idm], hermi)
if n_dm == 1:
vjk = vjk.reshape(2,nao,nao)
return vjk
def direct_mapdm(intor, aosym, jkdescript,
dms, ncomp, atm, bas, env, vhfopt=None, shls_slice=None):
assert(aosym in ('s8', 's4', 's2ij', 's2kl', 's1',
'a4ij', 'a4kl', 'a2ij', 'a2kl'))
c_atm = numpy.asarray(atm, dtype=numpy.int32, order='C')
c_bas = numpy.asarray(bas, dtype=numpy.int32, order='C')
c_env = numpy.asarray(env, dtype=numpy.double, order='C')
natm = ctypes.c_int(c_atm.shape[0])
nbas = ctypes.c_int(c_bas.shape[0])
if isinstance(dms, numpy.ndarray) and dms.ndim == 2:
n_dm = 1
nao = dms.shape[0]
dms = (numpy.asarray(dms, order='C'),)
else:
n_dm = len(dms)
nao = dms[0].shape[0]
dms = [numpy.asarray(dm, order='C') for dm in dms]
if isinstance(jkdescript, str):
njk = 1
jkdescript = (jkdescript,)
else:
njk = len(jkdescript)
if vhfopt is None:
cintor = _fpointer(intor)
cintopt = make_cintopt(c_atm, c_bas, c_env, intor)
cvhfopt = pyscf.lib.c_null_ptr()
else:
vhfopt.set_dm(dms, atm, bas, env)
cvhfopt = vhfopt._this
cintopt = vhfopt._cintopt
cintor = vhfopt._intor
fdrv = getattr(libcvhf, 'CVHFnr_direct_drv')
dotsym = _INTSYMAP[aosym]
fdot = _fpointer('CVHFdot_nr'+dotsym)
if shls_slice is None:
shls_slice = (0, c_bas.shape[0])*4
ao_loc = numpy.asarray(make_ao_loc(bas), dtype=numpy.int32)
vjk = []
descr_sym = [x.split('->') for x in jkdescript]
fjk = (ctypes.c_void_p*(njk*n_dm))()
dmsptr = (ctypes.c_void_p*(njk*n_dm))()
vjkptr = (ctypes.c_void_p*(njk*n_dm))()
for i, (dmsym, vsym) in enumerate(descr_sym):
if dmsym in ('ij', 'kl', 'il', 'kj'):
sys.stderr.write('not support DM description %s, transpose to %s\n' %
(dmsym, dmsym[::-1]))
dmsym = dmsym[::-1]
f1 = _fpointer('CVHFnr%s_%s_%s'%(aosym, dmsym, vsym))
vshape = (n_dm,ncomp) + get_dims(vsym[-2:], shls_slice, ao_loc)
vjk.append(numpy.empty(vshape))
for j in range(n_dm):
assert(dms[j].shape == get_dims(dmsym, shls_slice, ao_loc))
dmsptr[i*n_dm+j] = dms[j].ctypes.data_as(ctypes.c_void_p)
vjkptr[i*n_dm+j] = vjk[i][j].ctypes.data_as(ctypes.c_void_p)
fjk[i*n_dm+j] = f1
shls_slice = (ctypes.c_int*8)(*shls_slice)
fdrv(cintor, fdot, fjk, dmsptr, vjkptr,
ctypes.c_int(njk*n_dm), ctypes.c_int(ncomp),
shls_slice, ao_loc.ctypes.data_as(ctypes.c_void_p), cintopt, cvhfopt,
c_atm.ctypes.data_as(ctypes.c_void_p), natm,
c_bas.ctypes.data_as(ctypes.c_void_p), nbas,
c_env.ctypes.data_as(ctypes.c_void_p))
if n_dm * ncomp == 1:
vjk = [v.reshape(v.shape[2:]) for v in vjk]
elif n_dm == 1:
vjk = [v.reshape((ncomp,)+v.shape[2:]) for v in vjk]
elif ncomp == 1:
vjk = [v.reshape((n_dm,)+v.shape[2:]) for v in vjk]
if njk == 1:
vjk = vjk[0]
return vjk
def direct_bindm(intor, aosym, jkdescript,
dms, ncomp, atm, bas, env, vhfopt=None, shls_slice=None):
assert(aosym in ('s8', 's4', 's2ij', 's2kl', 's1',
'a4ij', 'a4kl', 'a2ij', 'a2kl'))
c_atm = numpy.asarray(atm, dtype=numpy.int32, order='C')
c_bas = numpy.asarray(bas, dtype=numpy.int32, order='C')
c_env = numpy.asarray(env, dtype=numpy.double, order='C')
natm = ctypes.c_int(c_atm.shape[0])
nbas = ctypes.c_int(c_bas.shape[0])
if isinstance(dms, numpy.ndarray) and dms.ndim == 2:
n_dm = 1
nao = dms.shape[0]
dms = (numpy.asarray(dms, order='C'),)
else:
n_dm = len(dms)
nao = dms[0].shape[0]
dms = [numpy.asarray(dm, order='C') for dm in dms]
if isinstance(jkdescript, str):
njk = 1
jkdescript = (jkdescript,)
else:
njk = len(jkdescript)
assert(njk == n_dm)
if vhfopt is None:
cintor = _fpointer(intor)
cintopt = make_cintopt(c_atm, c_bas, c_env, intor)
cvhfopt = pyscf.lib.c_null_ptr()
else:
vhfopt.set_dm(dms, atm, bas, env)
cvhfopt = vhfopt._this
cintopt = vhfopt._cintopt
cintor = vhfopt._intor
fdrv = getattr(libcvhf, 'CVHFnr_direct_drv')
dotsym = _INTSYMAP[aosym]
fdot = _fpointer('CVHFdot_nr'+dotsym)
if shls_slice is None:
shls_slice = (0, c_bas.shape[0])*4
ao_loc = numpy.asarray(make_ao_loc(bas), dtype=numpy.int32)
vjk = []
descr_sym = [x.split('->') for x in jkdescript]
fjk = (ctypes.c_void_p*(n_dm))()
dmsptr = (ctypes.c_void_p*(n_dm))()
vjkptr = (ctypes.c_void_p*(n_dm))()
for i, (dmsym, vsym) in enumerate(descr_sym):
if dmsym in ('ij', 'kl', 'il', 'kj'):
sys.stderr.write('not support DM description %s, transpose to %s\n' %
(dmsym, dmsym[::-1]))
dmsym = dmsym[::-1]
f1 = _fpointer('CVHFnr%s_%s_%s'%(aosym, dmsym, vsym))
assert(dms[i].shape == get_dims(dmsym, shls_slice, ao_loc))
vshape = (ncomp,) + get_dims(vsym[-2:], shls_slice, ao_loc)
vjk.append(numpy.empty(vshape))
dmsptr[i] = dms[i].ctypes.data_as(ctypes.c_void_p)
vjkptr[i] = vjk[i].ctypes.data_as(ctypes.c_void_p)
fjk[i] = f1
shls_slice = (ctypes.c_int*8)(*shls_slice)
fdrv(cintor, fdot, fjk, dmsptr, vjkptr,
ctypes.c_int(n_dm), ctypes.c_int(ncomp),
shls_slice, ao_loc.ctypes.data_as(ctypes.c_void_p), cintopt, cvhfopt,
c_atm.ctypes.data_as(ctypes.c_void_p), natm,
c_bas.ctypes.data_as(ctypes.c_void_p), nbas,
c_env.ctypes.data_as(ctypes.c_void_p))
if ncomp == 1:
vjk = [v.reshape(v.shape[1:]) for v in vjk]
if njk == 1:
vjk = vjk[0]
return vjk
def int2e_sph(atm, bas, env):
c_atm = numpy.asarray(atm, dtype=numpy.int32, order='C')
c_bas = numpy.asarray(bas, dtype=numpy.int32, order='C')
c_env = numpy.asarray(env, dtype=numpy.double, order='C')
natm = ctypes.c_int(c_atm.shape[0])
nbas = ctypes.c_int(c_bas.shape[0])
libcvhf.CINTtot_cgto_spheric.restype = ctypes.c_int
nao = libcvhf.CINTtot_cgto_spheric(c_bas.ctypes.data_as(ctypes.c_void_p), nbas)
nao_pair = nao*(nao+1)//2
eri = numpy.empty((nao_pair*(nao_pair+1)//2))
libcvhf.int2e_sph(eri.ctypes.data_as(ctypes.c_void_p),
c_atm.ctypes.data_as(ctypes.c_void_p), natm,
c_bas.ctypes.data_as(ctypes.c_void_p), nbas,
c_env.ctypes.data_as(ctypes.c_void_p))
return eri
s.c_void_p), natm,
c_bas.ctypes.data_as(ctypes.c_void_p), nbas,
c_env.ctypes.data_as(ctypes.c_void_p))
if n_dm * ncomp == 1:
vjk = vjk.reshape(njk,nao,nao)
if njk == 1:
vjk = vjk.reshape(vjk.shape[1:])
return vjk
def rdirect_bindm(intor, aosym, jkdescript,
dms, ncomp, atm, bas, env, vhfopt=None):
assert(aosym in ('s8', 's4', 's2ij', 's2kl', 's1',
'a4ij', 'a4kl', 'a2ij', 'a2kl'))
c_atm = numpy.asarray(atm, dtype=numpy.int32, order='C')
c_bas = numpy.asarray(bas, dtype=numpy.int32, order='C')
c_env = numpy.asarray(env, dtype=numpy.double, order='C')
natm = ctypes.c_int(c_atm.shape[0])
nbas = ctypes.c_int(c_bas.shape[0])
if isinstance(dms, numpy.ndarray) and dms.ndim == 2:
n_dm = 1
nao = dms.shape[0]
dms = (numpy.asarray(dms, order='C', dtype=numpy.complex128),)
else:
n_dm = len(dms)
nao = dms[0].shape[0]
dms = numpy.asarray(dms, order='C', dtype=numpy.complex128)
if isinstance(jkdescript, str):
njk = 1
jkdescript = (jkdescript,)
else:
njk = len(jkdescript)
assert(njk == n_dm)
if vhfopt is None:
cintor = _fpointer(intor)
cintopt = make_cintopt(c_atm, c_bas, c_env, intor)
cvhfopt = pyscf.lib.c_null_ptr()
else:
vhfopt.set_dm(dms, atm, bas, env)
cvhfopt = vhfopt._this
cintopt = vhfopt._cintopt
cintor = vhfopt._intor
fdrv = getattr(libcvhf, 'CVHFr_direct_drv')
dotsym = _INTSYMAP[aosym]
fdot = _fpointer('CVHFdot_r'+dotsym)
unpackas = _INTUNPACKMAP_R[aosym]
descr_sym = [x.split('->') for x in jkdescript]
fjk = (ctypes.c_void_p*(n_dm))()
dm1 = (ctypes.c_void_p*(n_dm))()
for i, (dmsym, vsym) in enumerate(descr_sym):
f1 = _fpointer('CVHFr%s_%s_%s'%(unpackas, dmsym, vsym))
dm1[i] = dms[i].ctypes.data_as(ctypes.c_void_p)
fjk[i] = f1
vjk = numpy.empty((njk,ncomp,nao,nao), dtype=numpy.complex)
fdrv(cintor, fdot, fjk, dm1,
vjk.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(n_dm), ctypes.c_int(ncomp),
cintopt, cvhfopt,
c_atm.ctypes.data_as(ctypes.c_void_p), natm,
c_bas.ctypes.data_as(ctypes.c_void_p), nbas,
c_env.ctypes.data_as(ctypes.c_void_p))
if ncomp == 1:
vjk = vjk.reshape(njk,nao,nao)
if njk == 1:
vjk = vjk.reshape(vjk.shape[1:])
return vjk
_INTSYMAP= {
's8' : 's8' ,
's4' : 's4' ,
's2ij': 's2ij',
's2kl': 's2kl',
's1' : 's1' ,
'a4ij': 's4' ,
'a4kl': 's4' ,
'a2ij': 's2ij',
'a2kl': 's2kl',
}
_INTUNPACKMAP_R = {
's8' : 's8' ,
's4' : 's4' ,
's2ij': 's2ij',
's2kl': 's2kl',
's1' : 's1' ,
'a4ij': 'ah4' ,
'a4kl': 'ha4' ,
'a2ij': 'ah2ij',
'a2kl': 'ha2kl',
}
def make_ao_loc(bas, cart=False):
l = bas[:,gto.ANG_OF]
if cart:
dims = (l+1)*(l+2)//2 * bas[:,gto.NCTR_OF]
else:
dims = (l*2+1) * bas[:,gto.NCTR_OF]
ao_loc = numpy.empty(len(bas)+1, dtype=numpy.int32)
ao_loc[0] = 0
dims.cumsum(dtype=numpy.int32, out=ao_loc[1:])
return ao_loc
_SHLINDEX = {'i': 0, 'j': 2, 'k': 4, 'l': 6}
def get_dims(descr_sym, shls_slice, ao_loc):
i = _SHLINDEX[descr_sym[0]]
j = _SHLINDEX[descr_sym[1]]
di = ao_loc[shls_slice[i+1]] - ao_loc[shls_slice[i]]
dj = ao_loc[shls_slice[j+1]] - ao_loc[shls_slice[j]]
return (di,dj)
| true | true |
f7398c1f7d30ab2bf048039b2b0e23837965d1ae | 16,869 | py | Python | yolov5/models/common.py | mrzhuzhe/yunru | faa7380a5363f654f1dc8f5d53b077d9f33bff6f | [
"MIT"
] | null | null | null | yolov5/models/common.py | mrzhuzhe/yunru | faa7380a5363f654f1dc8f5d53b077d9f33bff6f | [
"MIT"
] | null | null | null | yolov5/models/common.py | mrzhuzhe/yunru | faa7380a5363f654f1dc8f5d53b077d9f33bff6f | [
"MIT"
] | null | null | null | # YOLOv5 common modules
import math
from copy import copy
from pathlib import Path
import numpy as np
import pandas as pd
import requests
import torch
import torch.nn as nn
from PIL import Image
from torch.cuda import amp
from utils.datasets import letterbox
from utils.general import non_max_suppression, make_divisible, scale_coords, increment_path, xyxy2xywh, save_one_box
from utils.plots import colors, plot_one_box
from utils.torch_utils import time_synchronized
def autopad(k, p=None): # kernel, padding
# Pad to 'same'
if p is None:
p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad
return p
def DWConv(c1, c2, k=1, s=1, act=True):
# Depthwise convolution
return Conv(c1, c2, k, s, g=math.gcd(c1, c2), act=act)
class Conv(nn.Module):
# Standard convolution
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
super(Conv, self).__init__()
self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False)
self.bn = nn.BatchNorm2d(c2)
self.act = nn.SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity())
def forward(self, x):
return self.act(self.bn(self.conv(x)))
def fuseforward(self, x):
return self.act(self.conv(x))
class TransformerLayer(nn.Module):
# Transformer layer https://arxiv.org/abs/2010.11929 (LayerNorm layers removed for better performance)
def __init__(self, c, num_heads):
super().__init__()
self.q = nn.Linear(c, c, bias=False)
self.k = nn.Linear(c, c, bias=False)
self.v = nn.Linear(c, c, bias=False)
self.ma = nn.MultiheadAttention(embed_dim=c, num_heads=num_heads)
self.fc1 = nn.Linear(c, c, bias=False)
self.fc2 = nn.Linear(c, c, bias=False)
def forward(self, x):
x = self.ma(self.q(x), self.k(x), self.v(x))[0] + x
x = self.fc2(self.fc1(x)) + x
return x
class TransformerBlock(nn.Module):
# Vision Transformer https://arxiv.org/abs/2010.11929
def __init__(self, c1, c2, num_heads, num_layers):
super().__init__()
self.conv = None
if c1 != c2:
self.conv = Conv(c1, c2)
self.linear = nn.Linear(c2, c2) # learnable position embedding
self.tr = nn.Sequential(*[TransformerLayer(c2, num_heads) for _ in range(num_layers)])
self.c2 = c2
def forward(self, x):
if self.conv is not None:
x = self.conv(x)
b, _, w, h = x.shape
p = x.flatten(2)
p = p.unsqueeze(0)
p = p.transpose(0, 3)
p = p.squeeze(3)
e = self.linear(p)
x = p + e
x = self.tr(x)
x = x.unsqueeze(3)
x = x.transpose(0, 3)
x = x.reshape(b, self.c2, w, h)
return x
class Bottleneck(nn.Module):
# Standard bottleneck
def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion
super(Bottleneck, self).__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = Conv(c_, c2, 3, 1, g=g)
self.add = shortcut and c1 == c2
def forward(self, x):
return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
class BottleneckCSP(nn.Module):
# CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
super(BottleneckCSP, self).__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)
self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False)
self.cv4 = Conv(2 * c_, c2, 1, 1)
self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3)
self.act = nn.LeakyReLU(0.1, inplace=True)
self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])
def forward(self, x):
y1 = self.cv3(self.m(self.cv1(x)))
y2 = self.cv2(x)
return self.cv4(self.act(self.bn(torch.cat((y1, y2), dim=1))))
class C3(nn.Module):
# CSP Bottleneck with 3 convolutions
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
super(C3, self).__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = Conv(c1, c_, 1, 1)
self.cv3 = Conv(2 * c_, c2, 1) # act=FReLU(c2)
self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])
# self.m = nn.Sequential(*[CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n)])
def forward(self, x):
return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), dim=1))
class C3TR(C3):
# C3 module with TransformerBlock()
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
super().__init__(c1, c2, n, shortcut, g, e)
c_ = int(c2 * e)
self.m = TransformerBlock(c_, c_, 4, n)
class SPP(nn.Module):
# Spatial pyramid pooling layer used in YOLOv3-SPP
def __init__(self, c1, c2, k=(5, 9, 13)):
super(SPP, self).__init__()
c_ = c1 // 2 # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1)
self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k])
def forward(self, x):
x = self.cv1(x)
return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1))
class Focus(nn.Module):
# Focus wh information into c-space
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
super(Focus, self).__init__()
self.conv = Conv(c1 * 4, c2, k, s, p, g, act)
# self.contract = Contract(gain=2)
def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2)
return self.conv(torch.cat([x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]], 1))
# return self.conv(self.contract(x))
class Contract(nn.Module):
# Contract width-height into channels, i.e. x(1,64,80,80) to x(1,256,40,40)
def __init__(self, gain=2):
super().__init__()
self.gain = gain
def forward(self, x):
N, C, H, W = x.size() # assert (H / s == 0) and (W / s == 0), 'Indivisible gain'
s = self.gain
x = x.view(N, C, H // s, s, W // s, s) # x(1,64,40,2,40,2)
x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # x(1,2,2,64,40,40)
return x.view(N, C * s * s, H // s, W // s) # x(1,256,40,40)
class Expand(nn.Module):
# Expand channels into width-height, i.e. x(1,64,80,80) to x(1,16,160,160)
def __init__(self, gain=2):
super().__init__()
self.gain = gain
def forward(self, x):
N, C, H, W = x.size() # assert C / s ** 2 == 0, 'Indivisible gain'
s = self.gain
x = x.view(N, s, s, C // s ** 2, H, W) # x(1,2,2,16,80,80)
x = x.permute(0, 3, 4, 1, 5, 2).contiguous() # x(1,16,80,2,80,2)
return x.view(N, C // s ** 2, H * s, W * s) # x(1,16,160,160)
class Concat(nn.Module):
# Concatenate a list of tensors along dimension
def __init__(self, dimension=1):
super(Concat, self).__init__()
self.d = dimension
def forward(self, x):
return torch.cat(x, self.d)
class NMS(nn.Module):
# Non-Maximum Suppression (NMS) module
conf = 0.25 # confidence threshold
iou = 0.45 # IoU threshold
classes = None # (optional list) filter by class
max_det = 1000 # maximum number of detections per image
def __init__(self):
super(NMS, self).__init__()
def forward(self, x):
return non_max_suppression(x[0], self.conf, iou_thres=self.iou, classes=self.classes, max_det=self.max_det)
class AutoShape(nn.Module):
# input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS
conf = 0.25 # NMS confidence threshold
iou = 0.45 # NMS IoU threshold
classes = None # (optional list) filter by class
max_det = 1000 # maximum number of detections per image
def __init__(self, model):
super(AutoShape, self).__init__()
self.model = model.eval()
def autoshape(self):
print('AutoShape already enabled, skipping... ') # model already converted to model.autoshape()
return self
@torch.no_grad()
def forward(self, imgs, size=640, augment=False, profile=False):
# Inference from various sources. For height=640, width=1280, RGB images example inputs are:
# filename: imgs = 'data/images/zidane.jpg'
# URI: = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/zidane.jpg'
# OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(640,1280,3)
# PIL: = Image.open('image.jpg') # HWC x(640,1280,3)
# numpy: = np.zeros((640,1280,3)) # HWC
# torch: = torch.zeros(16,3,320,640) # BCHW (scaled to size=640, 0-1 values)
# multiple: = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...] # list of images
t = [time_synchronized()]
p = next(self.model.parameters()) # for device and type
if isinstance(imgs, torch.Tensor): # torch
with amp.autocast(enabled=p.device.type != 'cpu'):
return self.model(imgs.to(p.device).type_as(p), augment, profile) # inference
# Pre-process
n, imgs = (len(imgs), imgs) if isinstance(imgs, list) else (1, [imgs]) # number of images, list of images
shape0, shape1, files = [], [], [] # image and inference shapes, filenames
for i, im in enumerate(imgs):
f = f'image{i}' # filename
if isinstance(im, str): # filename or uri
im, f = np.asarray(Image.open(requests.get(im, stream=True).raw if im.startswith('http') else im)), im
elif isinstance(im, Image.Image): # PIL Image
im, f = np.asarray(im), getattr(im, 'filename', f) or f
files.append(Path(f).with_suffix('.jpg').name)
if im.shape[0] < 5: # image in CHW
im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1)
im = im[:, :, :3] if im.ndim == 3 else np.tile(im[:, :, None], 3) # enforce 3ch input
s = im.shape[:2] # HWC
shape0.append(s) # image shape
g = (size / max(s)) # gain
shape1.append([y * g for y in s])
imgs[i] = im if im.data.contiguous else np.ascontiguousarray(im) # update
shape1 = [make_divisible(x, int(self.stride.max())) for x in np.stack(shape1, 0).max(0)] # inference shape
x = [letterbox(im, new_shape=shape1, auto=False)[0] for im in imgs] # pad
x = np.stack(x, 0) if n > 1 else x[0][None] # stack
x = np.ascontiguousarray(x.transpose((0, 3, 1, 2))) # BHWC to BCHW
x = torch.from_numpy(x).to(p.device).type_as(p) / 255. # uint8 to fp16/32
t.append(time_synchronized())
with amp.autocast(enabled=p.device.type != 'cpu'):
# Inference
y = self.model(x, augment, profile)[0] # forward
t.append(time_synchronized())
# Post-process
y = non_max_suppression(y, self.conf, iou_thres=self.iou, classes=self.classes, max_det=self.max_det) # NMS
for i in range(n):
scale_coords(shape1, y[i][:, :4], shape0[i])
t.append(time_synchronized())
return Detections(imgs, y, files, t, self.names, x.shape)
class Detections:
# detections class for YOLOv5 inference results
def __init__(self, imgs, pred, files, times=None, names=None, shape=None):
super(Detections, self).__init__()
d = pred[0].device # device
gn = [torch.tensor([*[im.shape[i] for i in [1, 0, 1, 0]], 1., 1.], device=d) for im in imgs] # normalizations
self.imgs = imgs # list of images as numpy arrays
self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls)
self.names = names # class names
self.files = files # image filenames
self.xyxy = pred # xyxy pixels
self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels
self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized
self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized
self.n = len(self.pred) # number of images (batch size)
self.t = tuple((times[i + 1] - times[i]) * 1000 / self.n for i in range(3)) # timestamps (ms)
self.s = shape # inference BCHW shape
def display(self, pprint=False, show=False, save=False, crop=False, render=False, save_dir=Path('')):
for i, (im, pred) in enumerate(zip(self.imgs, self.pred)):
str = f'image {i + 1}/{len(self.pred)}: {im.shape[0]}x{im.shape[1]} '
if pred is not None:
for c in pred[:, -1].unique():
n = (pred[:, -1] == c).sum() # detections per class
str += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string
if show or save or render or crop:
for *box, conf, cls in pred: # xyxy, confidence, class
label = f'{self.names[int(cls)]} {conf:.2f}'
if crop:
save_one_box(box, im, file=save_dir / 'crops' / self.names[int(cls)] / self.files[i])
else: # all others
plot_one_box(box, im, label=label, color=colors(cls))
im = Image.fromarray(im.astype(np.uint8)) if isinstance(im, np.ndarray) else im # from np
if pprint:
print(str.rstrip(', '))
if show:
im.show(self.files[i]) # show
if save:
f = self.files[i]
im.save(save_dir / f) # save
print(f"{'Saved' * (i == 0)} {f}", end=',' if i < self.n - 1 else f' to {save_dir}\n')
if render:
self.imgs[i] = np.asarray(im)
def print(self):
self.display(pprint=True) # print results
print(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {tuple(self.s)}' % self.t)
def show(self):
self.display(show=True) # show results
def save(self, save_dir='runs/hub/exp'):
save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/hub/exp', mkdir=True) # increment save_dir
self.display(save=True, save_dir=save_dir) # save results
def crop(self, save_dir='runs/hub/exp'):
save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/hub/exp', mkdir=True) # increment save_dir
self.display(crop=True, save_dir=save_dir) # crop results
print(f'Saved results to {save_dir}\n')
def render(self):
self.display(render=True) # render results
return self.imgs
def pandas(self):
# return detections as pandas DataFrames, i.e. print(results.pandas().xyxy[0])
new = copy(self) # return copy
ca = 'xmin', 'ymin', 'xmax', 'ymax', 'confidence', 'class', 'name' # xyxy columns
cb = 'xcenter', 'ycenter', 'width', 'height', 'confidence', 'class', 'name' # xywh columns
for k, c in zip(['xyxy', 'xyxyn', 'xywh', 'xywhn'], [ca, ca, cb, cb]):
a = [[x[:5] + [int(x[5]), self.names[int(x[5])]] for x in x.tolist()] for x in getattr(self, k)] # update
setattr(new, k, [pd.DataFrame(x, columns=c) for x in a])
return new
def tolist(self):
# return a list of Detections objects, i.e. 'for result in results.tolist():'
x = [Detections([self.imgs[i]], [self.pred[i]], self.names, self.s) for i in range(self.n)]
for d in x:
for k in ['imgs', 'pred', 'xyxy', 'xyxyn', 'xywh', 'xywhn']:
setattr(d, k, getattr(d, k)[0]) # pop out of list
return x
def __len__(self):
return self.n
class Classify(nn.Module):
# Classification head, i.e. x(b,c1,20,20) to x(b,c2)
def __init__(self, c1, c2, k=1, s=1, p=None, g=1): # ch_in, ch_out, kernel, stride, padding, groups
super(Classify, self).__init__()
self.aap = nn.AdaptiveAvgPool2d(1) # to x(b,c1,1,1)
self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g) # to x(b,c2,1,1)
self.flat = nn.Flatten()
def forward(self, x):
z = torch.cat([self.aap(y) for y in (x if isinstance(x, list) else [x])], 1) # cat if list
return self.flat(self.conv(z)) # flatten to x(b,c2) | 42.706329 | 120 | 0.57247 |
import math
from copy import copy
from pathlib import Path
import numpy as np
import pandas as pd
import requests
import torch
import torch.nn as nn
from PIL import Image
from torch.cuda import amp
from utils.datasets import letterbox
from utils.general import non_max_suppression, make_divisible, scale_coords, increment_path, xyxy2xywh, save_one_box
from utils.plots import colors, plot_one_box
from utils.torch_utils import time_synchronized
def autopad(k, p=None):
if p is None:
p = k // 2 if isinstance(k, int) else [x // 2 for x in k]
return p
def DWConv(c1, c2, k=1, s=1, act=True):
return Conv(c1, c2, k, s, g=math.gcd(c1, c2), act=act)
class Conv(nn.Module):
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True):
super(Conv, self).__init__()
self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False)
self.bn = nn.BatchNorm2d(c2)
self.act = nn.SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity())
def forward(self, x):
return self.act(self.bn(self.conv(x)))
def fuseforward(self, x):
return self.act(self.conv(x))
class TransformerLayer(nn.Module):
def __init__(self, c, num_heads):
super().__init__()
self.q = nn.Linear(c, c, bias=False)
self.k = nn.Linear(c, c, bias=False)
self.v = nn.Linear(c, c, bias=False)
self.ma = nn.MultiheadAttention(embed_dim=c, num_heads=num_heads)
self.fc1 = nn.Linear(c, c, bias=False)
self.fc2 = nn.Linear(c, c, bias=False)
def forward(self, x):
x = self.ma(self.q(x), self.k(x), self.v(x))[0] + x
x = self.fc2(self.fc1(x)) + x
return x
class TransformerBlock(nn.Module):
def __init__(self, c1, c2, num_heads, num_layers):
super().__init__()
self.conv = None
if c1 != c2:
self.conv = Conv(c1, c2)
self.linear = nn.Linear(c2, c2)
self.tr = nn.Sequential(*[TransformerLayer(c2, num_heads) for _ in range(num_layers)])
self.c2 = c2
def forward(self, x):
if self.conv is not None:
x = self.conv(x)
b, _, w, h = x.shape
p = x.flatten(2)
p = p.unsqueeze(0)
p = p.transpose(0, 3)
p = p.squeeze(3)
e = self.linear(p)
x = p + e
x = self.tr(x)
x = x.unsqueeze(3)
x = x.transpose(0, 3)
x = x.reshape(b, self.c2, w, h)
return x
class Bottleneck(nn.Module):
def __init__(self, c1, c2, shortcut=True, g=1, e=0.5):
super(Bottleneck, self).__init__()
c_ = int(c2 * e)
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = Conv(c_, c2, 3, 1, g=g)
self.add = shortcut and c1 == c2
def forward(self, x):
return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
class BottleneckCSP(nn.Module):
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
super(BottleneckCSP, self).__init__()
c_ = int(c2 * e)
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)
self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False)
self.cv4 = Conv(2 * c_, c2, 1, 1)
self.bn = nn.BatchNorm2d(2 * c_)
self.act = nn.LeakyReLU(0.1, inplace=True)
self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])
def forward(self, x):
y1 = self.cv3(self.m(self.cv1(x)))
y2 = self.cv2(x)
return self.cv4(self.act(self.bn(torch.cat((y1, y2), dim=1))))
class C3(nn.Module):
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
super(C3, self).__init__()
c_ = int(c2 * e)
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = Conv(c1, c_, 1, 1)
self.cv3 = Conv(2 * c_, c2, 1)
self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])
def forward(self, x):
return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), dim=1))
class C3TR(C3):
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
super().__init__(c1, c2, n, shortcut, g, e)
c_ = int(c2 * e)
self.m = TransformerBlock(c_, c_, 4, n)
class SPP(nn.Module):
def __init__(self, c1, c2, k=(5, 9, 13)):
super(SPP, self).__init__()
c_ = c1 // 2
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1)
self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k])
def forward(self, x):
x = self.cv1(x)
return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1))
class Focus(nn.Module):
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True):
super(Focus, self).__init__()
self.conv = Conv(c1 * 4, c2, k, s, p, g, act)
def forward(self, x):
return self.conv(torch.cat([x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]], 1))
class Contract(nn.Module):
def __init__(self, gain=2):
super().__init__()
self.gain = gain
def forward(self, x):
N, C, H, W = x.size()
s = self.gain
x = x.view(N, C, H // s, s, W // s, s)
x = x.permute(0, 3, 5, 1, 2, 4).contiguous()
return x.view(N, C * s * s, H // s, W // s)
class Expand(nn.Module):
def __init__(self, gain=2):
super().__init__()
self.gain = gain
def forward(self, x):
N, C, H, W = x.size()
s = self.gain
x = x.view(N, s, s, C // s ** 2, H, W)
x = x.permute(0, 3, 4, 1, 5, 2).contiguous()
return x.view(N, C // s ** 2, H * s, W * s)
class Concat(nn.Module):
def __init__(self, dimension=1):
super(Concat, self).__init__()
self.d = dimension
def forward(self, x):
return torch.cat(x, self.d)
class NMS(nn.Module):
conf = 0.25
iou = 0.45
classes = None
max_det = 1000
def __init__(self):
super(NMS, self).__init__()
def forward(self, x):
return non_max_suppression(x[0], self.conf, iou_thres=self.iou, classes=self.classes, max_det=self.max_det)
class AutoShape(nn.Module):
conf = 0.25
iou = 0.45
classes = None
max_det = 1000
def __init__(self, model):
super(AutoShape, self).__init__()
self.model = model.eval()
def autoshape(self):
print('AutoShape already enabled, skipping... ')
return self
@torch.no_grad()
def forward(self, imgs, size=640, augment=False, profile=False):
s())
if isinstance(imgs, torch.Tensor):
with amp.autocast(enabled=p.device.type != 'cpu'):
return self.model(imgs.to(p.device).type_as(p), augment, profile)
n, imgs = (len(imgs), imgs) if isinstance(imgs, list) else (1, [imgs])
shape0, shape1, files = [], [], []
for i, im in enumerate(imgs):
f = f'image{i}'
if isinstance(im, str):
im, f = np.asarray(Image.open(requests.get(im, stream=True).raw if im.startswith('http') else im)), im
elif isinstance(im, Image.Image):
im, f = np.asarray(im), getattr(im, 'filename', f) or f
files.append(Path(f).with_suffix('.jpg').name)
if im.shape[0] < 5:
im = im.transpose((1, 2, 0))
im = im[:, :, :3] if im.ndim == 3 else np.tile(im[:, :, None], 3)
s = im.shape[:2]
shape0.append(s)
g = (size / max(s))
shape1.append([y * g for y in s])
imgs[i] = im if im.data.contiguous else np.ascontiguousarray(im)
shape1 = [make_divisible(x, int(self.stride.max())) for x in np.stack(shape1, 0).max(0)]
x = [letterbox(im, new_shape=shape1, auto=False)[0] for im in imgs]
x = np.stack(x, 0) if n > 1 else x[0][None]
x = np.ascontiguousarray(x.transpose((0, 3, 1, 2)))
x = torch.from_numpy(x).to(p.device).type_as(p) / 255.
t.append(time_synchronized())
with amp.autocast(enabled=p.device.type != 'cpu'):
y = self.model(x, augment, profile)[0]
t.append(time_synchronized())
y = non_max_suppression(y, self.conf, iou_thres=self.iou, classes=self.classes, max_det=self.max_det)
for i in range(n):
scale_coords(shape1, y[i][:, :4], shape0[i])
t.append(time_synchronized())
return Detections(imgs, y, files, t, self.names, x.shape)
class Detections:
def __init__(self, imgs, pred, files, times=None, names=None, shape=None):
super(Detections, self).__init__()
d = pred[0].device
gn = [torch.tensor([*[im.shape[i] for i in [1, 0, 1, 0]], 1., 1.], device=d) for im in imgs]
self.imgs = imgs
self.pred = pred
self.names = names
self.files = files
self.xyxy = pred
self.xywh = [xyxy2xywh(x) for x in pred]
self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)]
self.xywhn = [x / g for x, g in zip(self.xywh, gn)]
self.n = len(self.pred)
self.t = tuple((times[i + 1] - times[i]) * 1000 / self.n for i in range(3))
self.s = shape
def display(self, pprint=False, show=False, save=False, crop=False, render=False, save_dir=Path('')):
for i, (im, pred) in enumerate(zip(self.imgs, self.pred)):
str = f'image {i + 1}/{len(self.pred)}: {im.shape[0]}x{im.shape[1]} '
if pred is not None:
for c in pred[:, -1].unique():
n = (pred[:, -1] == c).sum()
str += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, "
if show or save or render or crop:
for *box, conf, cls in pred:
label = f'{self.names[int(cls)]} {conf:.2f}'
if crop:
save_one_box(box, im, file=save_dir / 'crops' / self.names[int(cls)] / self.files[i])
else:
plot_one_box(box, im, label=label, color=colors(cls))
im = Image.fromarray(im.astype(np.uint8)) if isinstance(im, np.ndarray) else im
if pprint:
print(str.rstrip(', '))
if show:
im.show(self.files[i])
if save:
f = self.files[i]
im.save(save_dir / f)
print(f"{'Saved' * (i == 0)} {f}", end=',' if i < self.n - 1 else f' to {save_dir}\n')
if render:
self.imgs[i] = np.asarray(im)
def print(self):
self.display(pprint=True)
print(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {tuple(self.s)}' % self.t)
def show(self):
self.display(show=True)
def save(self, save_dir='runs/hub/exp'):
save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/hub/exp', mkdir=True)
self.display(save=True, save_dir=save_dir)
def crop(self, save_dir='runs/hub/exp'):
save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/hub/exp', mkdir=True)
self.display(crop=True, save_dir=save_dir)
print(f'Saved results to {save_dir}\n')
def render(self):
self.display(render=True)
return self.imgs
def pandas(self):
new = copy(self)
ca = 'xmin', 'ymin', 'xmax', 'ymax', 'confidence', 'class', 'name'
cb = 'xcenter', 'ycenter', 'width', 'height', 'confidence', 'class', 'name'
for k, c in zip(['xyxy', 'xyxyn', 'xywh', 'xywhn'], [ca, ca, cb, cb]):
a = [[x[:5] + [int(x[5]), self.names[int(x[5])]] for x in x.tolist()] for x in getattr(self, k)]
setattr(new, k, [pd.DataFrame(x, columns=c) for x in a])
return new
def tolist(self):
x = [Detections([self.imgs[i]], [self.pred[i]], self.names, self.s) for i in range(self.n)]
for d in x:
for k in ['imgs', 'pred', 'xyxy', 'xyxyn', 'xywh', 'xywhn']:
setattr(d, k, getattr(d, k)[0])
return x
def __len__(self):
return self.n
class Classify(nn.Module):
def __init__(self, c1, c2, k=1, s=1, p=None, g=1):
super(Classify, self).__init__()
self.aap = nn.AdaptiveAvgPool2d(1)
self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g)
self.flat = nn.Flatten()
def forward(self, x):
z = torch.cat([self.aap(y) for y in (x if isinstance(x, list) else [x])], 1)
return self.flat(self.conv(z)) | true | true |
f7398c1fa458a70e403ac5bb83bd055850d38530 | 278 | py | Python | Task2C.py | xd2w/Lent_Computing | ad7f772618363dd82f21d21d5df5636ec997fe3c | [
"MIT"
] | null | null | null | Task2C.py | xd2w/Lent_Computing | ad7f772618363dd82f21d21d5df5636ec997fe3c | [
"MIT"
] | null | null | null | Task2C.py | xd2w/Lent_Computing | ad7f772618363dd82f21d21d5df5636ec997fe3c | [
"MIT"
] | 1 | 2022-02-02T21:30:35.000Z | 2022-02-02T21:30:35.000Z | from floodsystem.stationdata import build_station_list
from floodsystem.stationdata import update_water_levels
from floodsystem.flood import stations_highest_rel_level
stations = build_station_list()
update_water_levels(stations)
print(stations_highest_rel_level(stations,10))
| 34.75 | 56 | 0.888489 | from floodsystem.stationdata import build_station_list
from floodsystem.stationdata import update_water_levels
from floodsystem.flood import stations_highest_rel_level
stations = build_station_list()
update_water_levels(stations)
print(stations_highest_rel_level(stations,10))
| true | true |
f7398cd89e326e9453c3ce94ad448b5d8dce5e1c | 2,364 | py | Python | tools/icon_build.py | awslabs/aws-sdk-api-changes | a3e6cbad729bb69f9d63373dc52c5e53faa349f2 | [
"Apache-2.0"
] | 11 | 2020-04-27T22:53:01.000Z | 2021-09-09T16:19:09.000Z | tools/icon_build.py | awslabs/aws-sdk-api-changes | a3e6cbad729bb69f9d63373dc52c5e53faa349f2 | [
"Apache-2.0"
] | 4 | 2020-03-05T17:41:25.000Z | 2021-06-02T02:39:34.000Z | tools/icon_build.py | awslabs/aws-sdk-api-changes | a3e6cbad729bb69f9d63373dc52c5e53faa349f2 | [
"Apache-2.0"
] | 2 | 2020-03-12T10:23:51.000Z | 2021-01-27T10:56:10.000Z | #!/usr/bin/env python
# note we'll use glue to assemble a css sprite
# for now we use this to pickup the images we want, convert
# from svg to png in our preferred size
import click
import cairosvg
import os
from pathlib import Path
from jinja2 import Template
from apichanges.icons import ICON_SERVICE_MAP
CSS_BUILD = """
{% for name, path in icons.items() %}
.{{ name }} {background-image: url('/{{ path }}')}
{% endfor %}
"""
@click.command()
@click.option('-s', '--source', required=True, type=click.Path())
@click.option('-d', '--destination', required=True, type=click.Path())
@click.option('--size', type=int, default=128)
def main(source, destination, size):
source = Path(source).expanduser().resolve()
destination = Path(destination).expanduser().resolve()
count = 0
icons = {}
used = set()
icon_2_service = {
v: k for k, v in ICON_SERVICE_MAP.items()}
for dirpath, dirnames, filenames in os.walk(str(source)):
dirpath = Path(dirpath)
for f in filenames:
if not f.endswith('_dark-bg.svg'):
continue
origin = (dirpath / f)
n = origin.name
name = n[:n.find('_dark')].replace('.', '_')
service = icon_2_service.get(name)
if service is None:
continue
if name in icons:
continue
used.add(name)
target = destination / ("%s.png" % name.lower())
# if target.exists():
# continue
count += 1
target.parent.mkdir(parents=True, exist_ok=True)
# print('{} -> {}'.format(origin, target))
cairosvg.svg2png(
url=str(origin),
write_to=str(target),
output_width=size,
output_height=size)
if set(icon_2_service).difference(used):
print('missing service icons %s' % (', '.join(
set(icon_2_service).difference(used))))
print('copied %d icons' % count)
with (destination / 'icons.css').open('w') as fh:
icons = {k: "icons/%s.png" % v.lower()
for k, v in ICON_SERVICE_MAP.items()}
fh.write(Template(
CSS_BUILD, lstrip_blocks=True, trim_blocks=True).render(
icons=icons))
if __name__ == '__main__':
main()
| 29.185185 | 70 | 0.565567 |
# for now we use this to pickup the images we want, convert
# from svg to png in our preferred size
import click
import cairosvg
import os
from pathlib import Path
from jinja2 import Template
from apichanges.icons import ICON_SERVICE_MAP
CSS_BUILD = """
{% for name, path in icons.items() %}
.{{ name }} {background-image: url('/{{ path }}')}
{% endfor %}
"""
@click.command()
@click.option('-s', '--source', required=True, type=click.Path())
@click.option('-d', '--destination', required=True, type=click.Path())
@click.option('--size', type=int, default=128)
def main(source, destination, size):
source = Path(source).expanduser().resolve()
destination = Path(destination).expanduser().resolve()
count = 0
icons = {}
used = set()
icon_2_service = {
v: k for k, v in ICON_SERVICE_MAP.items()}
for dirpath, dirnames, filenames in os.walk(str(source)):
dirpath = Path(dirpath)
for f in filenames:
if not f.endswith('_dark-bg.svg'):
continue
origin = (dirpath / f)
n = origin.name
name = n[:n.find('_dark')].replace('.', '_')
service = icon_2_service.get(name)
if service is None:
continue
if name in icons:
continue
used.add(name)
target = destination / ("%s.png" % name.lower())
# if target.exists():
# continue
count += 1
target.parent.mkdir(parents=True, exist_ok=True)
# print('{} -> {}'.format(origin, target))
cairosvg.svg2png(
url=str(origin),
write_to=str(target),
output_width=size,
output_height=size)
if set(icon_2_service).difference(used):
print('missing service icons %s' % (', '.join(
set(icon_2_service).difference(used))))
print('copied %d icons' % count)
with (destination / 'icons.css').open('w') as fh:
icons = {k: "icons/%s.png" % v.lower()
for k, v in ICON_SERVICE_MAP.items()}
fh.write(Template(
CSS_BUILD, lstrip_blocks=True, trim_blocks=True).render(
icons=icons))
if __name__ == '__main__':
main()
| true | true |
f7398d657af6b28533dc61ff9b7e5a7af7a0e424 | 989 | py | Python | app/app/urls.py | umtdemr/recipe-app-api | e28b2a704106240b47672c4aa5fe2ac169adb002 | [
"MIT"
] | null | null | null | app/app/urls.py | umtdemr/recipe-app-api | e28b2a704106240b47672c4aa5fe2ac169adb002 | [
"MIT"
] | 6 | 2021-03-19T00:29:34.000Z | 2021-09-22T18:39:45.000Z | app/app/urls.py | umtdemr/recipe-app-api | e28b2a704106240b47672c4aa5fe2ac169adb002 | [
"MIT"
] | null | null | null | """app URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
path('admin/', admin.site.urls),
path('api/user/', include('user.urls')),
path('api/recipe/', include('recipe.urls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 36.62963 | 77 | 0.715875 | from django.contrib import admin
from django.urls import path, include
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
path('admin/', admin.site.urls),
path('api/user/', include('user.urls')),
path('api/recipe/', include('recipe.urls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| true | true |
f7398da7dea040c14e019dc29b22255bbd8b853b | 15,670 | py | Python | main_window.py | MKing301/password-generator | 65a2450887539f9453f426220d9c5c34f8e8d508 | [
"MIT"
] | null | null | null | main_window.py | MKing301/password-generator | 65a2450887539f9453f426220d9c5c34f8e8d508 | [
"MIT"
] | null | null | null | main_window.py | MKing301/password-generator | 65a2450887539f9453f426220d9c5c34f8e8d508 | [
"MIT"
] | null | null | null | import sys
import random
import string
import json
from PyQt5.QtWidgets import QApplication
from PyQt5.QtWidgets import QLabel
from PyQt5.QtWidgets import QMainWindow
from PyQt5.QtWidgets import QStatusBar
from PyQt5.QtWidgets import QToolBar
from PyQt5.QtWidgets import QWidget
from PyQt5.QtWidgets import QAction
from PyQt5.QtWidgets import QMessageBox
from PyQt5.QtWidgets import QPushButton
from PyQt5.QtWidgets import QRadioButton
from PyQt5.QtWidgets import QLineEdit
from PyQt5.QtWidgets import QLabel
from PyQt5.QtWidgets import QFileDialog
from PyQt5.QtWidgets import QTextEdit
from PyQt5.QtWidgets import QInputDialog
from PyQt5.QtWidgets import QScrollArea
from PyQt5.QtWidgets import QPlainTextEdit
from PyQt5.QtWidgets import QDialog
from PyQt5.QtWidgets import QDialogButtonBox
from PyQt5.QtWidgets import QFormLayout
from PyQt5.QtWidgets import QGridLayout
from PyQt5.QtWidgets import QGroupBox
from PyQt5.QtWidgets import QHBoxLayout
from PyQt5.QtWidgets import QPushButton
from PyQt5.QtWidgets import QVBoxLayout
from PyQt5.QtWidgets import QPlainTextEdit
class PasswordWindow(QMainWindow):
"""Password Window."""
def __init__(self, parent=None):
"""Initializer"""
super().__init__(parent)
self.setWindowTitle('Password List')
self.setGeometry(30, 30, 900, 400)
class Window(QMainWindow):
"""Main Window."""
def __init__(self, parent=None):
"""Initializer."""
super().__init__(parent)
self.setWindowTitle('Password Genearator')
self.setGeometry(10, 10, 1000, 800)
self._createMenu()
self._createStatusBar()
def _createMenu(self):
""" Create menubar. """
file_menu = self.menuBar().addMenu("File")
# File Menu - New
new_action = QAction("&New", self)
new_action.setStatusTip('Create a new password.')
new_action.setShortcut('Ctrl+Shift+N')
new_action.triggered.connect(self.file_new)
file_menu.addAction(new_action)
# File Menu - Open
open_action = QAction("Open", self)
open_action.setStatusTip('Open password file.')
open_action.setShortcut('Ctrl+Shift+O')
open_action.triggered.connect(self.file_open)
file_menu.addAction(open_action)
# File Menu - Save
save_action = QAction("&Save", self)
save_action.setStatusTip('Save password file.')
save_action.setShortcut('Ctrl+Shift+S')
save_action.triggered.connect(self.file_save)
file_menu.addAction(save_action)
# File Menu - Delete
delete_action = QAction("Delete", self)
delete_action.setStatusTip('Delete password.')
delete_action.triggered.connect(self.file_delete)
file_menu.addAction(delete_action)
file_menu.addSeparator()
# File Menu - Exit
exit_action = QAction("&Exit", self)
exit_action.setStatusTip('Exit application.')
exit_action.setShortcut('Ctrl+Shift+Q')
exit_action.triggered.connect(self.close)
file_menu.addAction(exit_action)
# About Menu - Help
about_action = QAction("About", self)
about_action.setStatusTip('Brief summary about app.')
about_action.triggered.connect(self.about_help)
help_menu = self.menuBar().addMenu("Help")
help_menu.addAction(about_action)
def _createStatusBar(self):
""" Create Status Bar """
status = QStatusBar()
status.showMessage("Status Bar Area")
self.setStatusBar(status)
def file_new(self):
""" Display fields to capture user data to generate a password """
# Text box to capture length of password being requested
self.size_textbox = QLineEdit(self)
self.size_textbox.move(100, 50)
self.size_textbox.show()
# Label for text to capture password length
self.size_label = QLabel('Password Length', self)
self.size_label.resize(self.size_label.minimumSizeHint())
self.size_label.move(210, 50)
self.size_label.show()
# Radio button to request PIN
self.pin_radiobtn = QRadioButton('PIN', self)
self.pin_radiobtn.resize(self.pin_radiobtn.minimumSizeHint())
self.pin_radiobtn.move(100, 100)
self.pin_radiobtn.show()
# Radio button to request alphanumeric password
self.alphanum_radiobtn = QRadioButton('Alphanumeric', self)
self.alphanum_radiobtn.resize(self.alphanum_radiobtn.minimumSizeHint())
self.alphanum_radiobtn.move(100, 150)
self.alphanum_radiobtn.show()
''' Radio button to request alphanumeric password w/ special characters
password. '''
self.special_radiobtn = QRadioButton(
'Alphanumeric w/ special characters', self
)
self.special_radiobtn.resize(self.special_radiobtn.minimumSizeHint())
self.special_radiobtn.move(100, 200)
self.special_radiobtn.show()
# Button to generate the password when clicked
self.btn = QPushButton('Generate Password', self)
self.btn.resize(self.btn.minimumSizeHint())
self.btn.clicked.connect(self.on_generate_btn_clicked)
self.btn.move(250, 275)
self.btn.show()
# Non-editable text box to display password or error.
# This box is hide until password or error is returned.
self.generated_pwd = QLineEdit(self)
self.generated_pwd.move(60, 325)
self.generated_pwd.setEnabled(False)
self.generated_pwd.resize(700, 32)
self.generated_pwd.hide()
def file_open(self):
""" Open file containing passwords. """
self.textEdit = QTextEdit()
self.setCentralWidget(self.textEdit)
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
fname = QFileDialog.getOpenFileName(
self,
'QFileDialog.getOpenFileNames()',
'/home/mfsd1809/Dev/FullStackWebDeveloper/GUI/pass-gen/Files',
'JSON Files (*.json)',
options=options)
if fname[0]:
f = open(fname[0], 'r')
with f:
data = json.load(f)
self.pw = PasswordWindow()
self.app_label = QLabel(self.pw)
self.app_label.setText('Application - Password')
self.app_label.resize(self.app_label.minimumSizeHint())
self.app_label.move(20, 20)
self.b = QPlainTextEdit(self.pw)
count = 1
for app, pwd in data.items():
self.b.insertPlainText(str(count) +
'. ' +
app +
' - ' +
pwd +
'\n')
count = count + 1
self.b.setReadOnly(True)
self.b.move(20, 50)
self.b.resize(850, 250)
self.pw.move(100, 150)
self.pw.show()
def file_save(self):
""" Save application name and password to passwords file """
app, okPressed = QInputDialog.getText(self,
"Application Name",
"Enter App Name:",
QLineEdit.Normal,
"")
app = app.lower().strip()
try:
if okPressed and app != '' and self.generated_pwd.text() != '':
# Read JSON file
with open(
'/home/mfsd1809/Dev/FullStackWebDeveloper/GUI/pass-gen/'
'Files/passwords.json'
) as r_file:
data = json.load(r_file)
# print(json.dumps(data, indent=4))
# Add/Update JSON file
data.update({app: self.generated_pwd.text()})
# Write update to JSON file
with open(
'/home/mfsd1809/Dev/FullStackWebDeveloper/GUI/pass-gen/'
'Files/passwords.json', 'w'
) as w_file:
json.dump(data, w_file, indent=4)
self.get_msg_box(QMessageBox.Information,
'Success',
'Password saved.')
elif (okPressed and app == ''):
self.get_msg_box(QMessageBox.Warning,
'Insufficient Data',
'You did not enter an app name.')
except Exception:
self.get_msg_box(QMessageBox.Warning,
'Insufficient Data',
'No password provided for the app you entered.')
def file_delete(self):
""" Delete application name and password from passwords file """
app, okPressed = QInputDialog.getText(self,
"Application Name",
"Enter App Name:",
QLineEdit.Normal,
"")
app = app.lower().strip()
try:
if okPressed and app != '':
# Read JSON file
with open(
'/home/mfsd1809/Dev/FullStackWebDeveloper/GUI/pass-gen/'
'Files/passwords.json'
) as r_file:
data = json.load(r_file)
# print(json.dumps(data, indent=4))
# Delete app/password from JSON file
del data[app]
# Write update to JSON file
with open(
'/home/mfsd1809/Dev/FullStackWebDeveloper/GUI/pass-gen/'
'Files/passwords.json', 'w'
) as w_file:
json.dump(data, w_file, indent=4)
self.get_msg_box(QMessageBox.Information,
'Success',
'Password deleted.')
elif (okPressed and app == ''):
self.get_msg_box(QMessageBox.Warning,
'Insufficient Data',
'You did not enter an app name.')
except Exception:
self.get_msg_box(QMessageBox.Warning,
'Insufficient Data',
'No password provided for the app you entered.')
def about_help(self):
""" Display information about application when Help-->About menu item
is clicked """
self.get_msg_box(QMessageBox.Information,
'About',
'This application was created using Python and '
'PyQt5. It is used to generate a password based '
'on the user input of length and selected type.')
def get_msg_box(self, icon, title, msg):
""" Message box"""
self.mb = QMessageBox(self)
self.mb.setIcon(icon)
self.mb.setWindowTitle(title)
self.mb.setText(msg)
self.mb.setStandardButtons(QMessageBox.Ok)
self.mb.show()
def get_pin(self, size):
"""Generate random password, numbers only."""
# Check if input an integer
if(isinstance(size, int)):
# Check if input is positive
if(size < 0):
self.get_response('You did not enter a positive integer!',
'error')
elif (size < 4):
self.get_response('The number must be greater or equal to 4.',
'error')
else:
digits = string.digits
self.get_response(
''.join(random.choice(digits) for i in range(size)),
'success')
else:
self.get_response('You did NOT enter an integer!', 'error')
def get_alphanumeric_password(self, size):
""" Generate random password, alphanumeric only """
# Check if input an integer
if(isinstance(size, int)):
# Check if input is positive
if(size < 0):
self.get_response('You did not enter a positive integer!',
'error')
elif (size < 8):
self.get_response('The number must be greater or equal to 8.',
'error')
else:
letters_and_digits = string.ascii_letters + string.digits
self.get_response(
''.join(random.choice(
letters_and_digits) for i in range(size)
),
'success')
else:
self.get_response('You did NOT enter an integer!', 'error')
def get_alphanumeric_with_symbols_password(self, size):
""" Generate random password, alphanumeric with symbols included """
# Check if input an integer
if(isinstance(size, int)):
# Check if input is positive
if(size < 0):
self.get_response('You did not enter a positive integer!',
'error')
elif (size < 8):
self.get_response('The number must be greater or equal to 8.',
'error')
else:
letters_digits_symbols = (string.ascii_letters +
string.digits +
string.punctuation)
self.get_response(
''.join(random.choice(
letters_digits_symbols) for i in range(size)
),
'success')
else:
self.get_response('You did NOT enter an integer!', 'error')
def get_response(self, text, result_type):
""" Display response from request; password or error in non-editable
text box """
self.generated_pwd.setText(text)
if(result_type == 'error'):
self.generated_pwd.setStyleSheet("color: rgb(255, 0, 0)")
else:
self.generated_pwd.setStyleSheet("color: rgb(0, 153, 0)")
self.generated_pwd.show()
def on_generate_btn_clicked(self):
""" Execute request to generate password with validations """
try:
input = self.size_textbox.text().strip()
if(input == ''):
self.get_response('You did not enter a value for password '
'length.',
'error')
else:
input = int(input)
if(self.pin_radiobtn.isChecked()):
self.get_pin(input)
elif(self.alphanum_radiobtn.isChecked()):
self.get_alphanumeric_password(input)
elif(self.special_radiobtn.isChecked()):
self.get_alphanumeric_with_symbols_password(input)
else:
self.get_response('You must select a password option.',
'error')
except ValueError:
self.get_response('You entered a string.',
'error')
if __name__ == '__main__':
app = QApplication(sys.argv)
win = Window()
win.show()
sys.exit(app.exec_())
| 39.771574 | 79 | 0.54365 | import sys
import random
import string
import json
from PyQt5.QtWidgets import QApplication
from PyQt5.QtWidgets import QLabel
from PyQt5.QtWidgets import QMainWindow
from PyQt5.QtWidgets import QStatusBar
from PyQt5.QtWidgets import QToolBar
from PyQt5.QtWidgets import QWidget
from PyQt5.QtWidgets import QAction
from PyQt5.QtWidgets import QMessageBox
from PyQt5.QtWidgets import QPushButton
from PyQt5.QtWidgets import QRadioButton
from PyQt5.QtWidgets import QLineEdit
from PyQt5.QtWidgets import QLabel
from PyQt5.QtWidgets import QFileDialog
from PyQt5.QtWidgets import QTextEdit
from PyQt5.QtWidgets import QInputDialog
from PyQt5.QtWidgets import QScrollArea
from PyQt5.QtWidgets import QPlainTextEdit
from PyQt5.QtWidgets import QDialog
from PyQt5.QtWidgets import QDialogButtonBox
from PyQt5.QtWidgets import QFormLayout
from PyQt5.QtWidgets import QGridLayout
from PyQt5.QtWidgets import QGroupBox
from PyQt5.QtWidgets import QHBoxLayout
from PyQt5.QtWidgets import QPushButton
from PyQt5.QtWidgets import QVBoxLayout
from PyQt5.QtWidgets import QPlainTextEdit
class PasswordWindow(QMainWindow):
def __init__(self, parent=None):
super().__init__(parent)
self.setWindowTitle('Password List')
self.setGeometry(30, 30, 900, 400)
class Window(QMainWindow):
def __init__(self, parent=None):
super().__init__(parent)
self.setWindowTitle('Password Genearator')
self.setGeometry(10, 10, 1000, 800)
self._createMenu()
self._createStatusBar()
def _createMenu(self):
file_menu = self.menuBar().addMenu("File")
new_action = QAction("&New", self)
new_action.setStatusTip('Create a new password.')
new_action.setShortcut('Ctrl+Shift+N')
new_action.triggered.connect(self.file_new)
file_menu.addAction(new_action)
open_action = QAction("Open", self)
open_action.setStatusTip('Open password file.')
open_action.setShortcut('Ctrl+Shift+O')
open_action.triggered.connect(self.file_open)
file_menu.addAction(open_action)
save_action = QAction("&Save", self)
save_action.setStatusTip('Save password file.')
save_action.setShortcut('Ctrl+Shift+S')
save_action.triggered.connect(self.file_save)
file_menu.addAction(save_action)
delete_action = QAction("Delete", self)
delete_action.setStatusTip('Delete password.')
delete_action.triggered.connect(self.file_delete)
file_menu.addAction(delete_action)
file_menu.addSeparator()
exit_action = QAction("&Exit", self)
exit_action.setStatusTip('Exit application.')
exit_action.setShortcut('Ctrl+Shift+Q')
exit_action.triggered.connect(self.close)
file_menu.addAction(exit_action)
about_action = QAction("About", self)
about_action.setStatusTip('Brief summary about app.')
about_action.triggered.connect(self.about_help)
help_menu = self.menuBar().addMenu("Help")
help_menu.addAction(about_action)
def _createStatusBar(self):
status = QStatusBar()
status.showMessage("Status Bar Area")
self.setStatusBar(status)
def file_new(self):
self.size_textbox = QLineEdit(self)
self.size_textbox.move(100, 50)
self.size_textbox.show()
self.size_label = QLabel('Password Length', self)
self.size_label.resize(self.size_label.minimumSizeHint())
self.size_label.move(210, 50)
self.size_label.show()
self.pin_radiobtn = QRadioButton('PIN', self)
self.pin_radiobtn.resize(self.pin_radiobtn.minimumSizeHint())
self.pin_radiobtn.move(100, 100)
self.pin_radiobtn.show()
self.alphanum_radiobtn = QRadioButton('Alphanumeric', self)
self.alphanum_radiobtn.resize(self.alphanum_radiobtn.minimumSizeHint())
self.alphanum_radiobtn.move(100, 150)
self.alphanum_radiobtn.show()
self.special_radiobtn = QRadioButton(
'Alphanumeric w/ special characters', self
)
self.special_radiobtn.resize(self.special_radiobtn.minimumSizeHint())
self.special_radiobtn.move(100, 200)
self.special_radiobtn.show()
self.btn = QPushButton('Generate Password', self)
self.btn.resize(self.btn.minimumSizeHint())
self.btn.clicked.connect(self.on_generate_btn_clicked)
self.btn.move(250, 275)
self.btn.show()
self.generated_pwd = QLineEdit(self)
self.generated_pwd.move(60, 325)
self.generated_pwd.setEnabled(False)
self.generated_pwd.resize(700, 32)
self.generated_pwd.hide()
def file_open(self):
self.textEdit = QTextEdit()
self.setCentralWidget(self.textEdit)
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
fname = QFileDialog.getOpenFileName(
self,
'QFileDialog.getOpenFileNames()',
'/home/mfsd1809/Dev/FullStackWebDeveloper/GUI/pass-gen/Files',
'JSON Files (*.json)',
options=options)
if fname[0]:
f = open(fname[0], 'r')
with f:
data = json.load(f)
self.pw = PasswordWindow()
self.app_label = QLabel(self.pw)
self.app_label.setText('Application - Password')
self.app_label.resize(self.app_label.minimumSizeHint())
self.app_label.move(20, 20)
self.b = QPlainTextEdit(self.pw)
count = 1
for app, pwd in data.items():
self.b.insertPlainText(str(count) +
'. ' +
app +
' - ' +
pwd +
'\n')
count = count + 1
self.b.setReadOnly(True)
self.b.move(20, 50)
self.b.resize(850, 250)
self.pw.move(100, 150)
self.pw.show()
def file_save(self):
app, okPressed = QInputDialog.getText(self,
"Application Name",
"Enter App Name:",
QLineEdit.Normal,
"")
app = app.lower().strip()
try:
if okPressed and app != '' and self.generated_pwd.text() != '':
with open(
'/home/mfsd1809/Dev/FullStackWebDeveloper/GUI/pass-gen/'
'Files/passwords.json'
) as r_file:
data = json.load(r_file)
data.update({app: self.generated_pwd.text()})
with open(
'/home/mfsd1809/Dev/FullStackWebDeveloper/GUI/pass-gen/'
'Files/passwords.json', 'w'
) as w_file:
json.dump(data, w_file, indent=4)
self.get_msg_box(QMessageBox.Information,
'Success',
'Password saved.')
elif (okPressed and app == ''):
self.get_msg_box(QMessageBox.Warning,
'Insufficient Data',
'You did not enter an app name.')
except Exception:
self.get_msg_box(QMessageBox.Warning,
'Insufficient Data',
'No password provided for the app you entered.')
def file_delete(self):
app, okPressed = QInputDialog.getText(self,
"Application Name",
"Enter App Name:",
QLineEdit.Normal,
"")
app = app.lower().strip()
try:
if okPressed and app != '':
with open(
'/home/mfsd1809/Dev/FullStackWebDeveloper/GUI/pass-gen/'
'Files/passwords.json'
) as r_file:
data = json.load(r_file)
del data[app]
with open(
'/home/mfsd1809/Dev/FullStackWebDeveloper/GUI/pass-gen/'
'Files/passwords.json', 'w'
) as w_file:
json.dump(data, w_file, indent=4)
self.get_msg_box(QMessageBox.Information,
'Success',
'Password deleted.')
elif (okPressed and app == ''):
self.get_msg_box(QMessageBox.Warning,
'Insufficient Data',
'You did not enter an app name.')
except Exception:
self.get_msg_box(QMessageBox.Warning,
'Insufficient Data',
'No password provided for the app you entered.')
def about_help(self):
self.get_msg_box(QMessageBox.Information,
'About',
'This application was created using Python and '
'PyQt5. It is used to generate a password based '
'on the user input of length and selected type.')
def get_msg_box(self, icon, title, msg):
self.mb = QMessageBox(self)
self.mb.setIcon(icon)
self.mb.setWindowTitle(title)
self.mb.setText(msg)
self.mb.setStandardButtons(QMessageBox.Ok)
self.mb.show()
def get_pin(self, size):
if(isinstance(size, int)):
if(size < 0):
self.get_response('You did not enter a positive integer!',
'error')
elif (size < 4):
self.get_response('The number must be greater or equal to 4.',
'error')
else:
digits = string.digits
self.get_response(
''.join(random.choice(digits) for i in range(size)),
'success')
else:
self.get_response('You did NOT enter an integer!', 'error')
def get_alphanumeric_password(self, size):
if(isinstance(size, int)):
if(size < 0):
self.get_response('You did not enter a positive integer!',
'error')
elif (size < 8):
self.get_response('The number must be greater or equal to 8.',
'error')
else:
letters_and_digits = string.ascii_letters + string.digits
self.get_response(
''.join(random.choice(
letters_and_digits) for i in range(size)
),
'success')
else:
self.get_response('You did NOT enter an integer!', 'error')
def get_alphanumeric_with_symbols_password(self, size):
if(isinstance(size, int)):
if(size < 0):
self.get_response('You did not enter a positive integer!',
'error')
elif (size < 8):
self.get_response('The number must be greater or equal to 8.',
'error')
else:
letters_digits_symbols = (string.ascii_letters +
string.digits +
string.punctuation)
self.get_response(
''.join(random.choice(
letters_digits_symbols) for i in range(size)
),
'success')
else:
self.get_response('You did NOT enter an integer!', 'error')
def get_response(self, text, result_type):
self.generated_pwd.setText(text)
if(result_type == 'error'):
self.generated_pwd.setStyleSheet("color: rgb(255, 0, 0)")
else:
self.generated_pwd.setStyleSheet("color: rgb(0, 153, 0)")
self.generated_pwd.show()
def on_generate_btn_clicked(self):
try:
input = self.size_textbox.text().strip()
if(input == ''):
self.get_response('You did not enter a value for password '
'length.',
'error')
else:
input = int(input)
if(self.pin_radiobtn.isChecked()):
self.get_pin(input)
elif(self.alphanum_radiobtn.isChecked()):
self.get_alphanumeric_password(input)
elif(self.special_radiobtn.isChecked()):
self.get_alphanumeric_with_symbols_password(input)
else:
self.get_response('You must select a password option.',
'error')
except ValueError:
self.get_response('You entered a string.',
'error')
if __name__ == '__main__':
app = QApplication(sys.argv)
win = Window()
win.show()
sys.exit(app.exec_())
| true | true |
f7398ddbab50e8ca13952c4009e17f1a8769e970 | 3,407 | py | Python | HDhomerunProxy.py | JortdeBokx/NPO-streams | 1ad35751f41f6cd4019be3cb3d73ab019d08efe0 | [
"MIT"
] | 2 | 2019-01-31T18:19:22.000Z | 2019-11-07T09:05:40.000Z | HDhomerunProxy.py | JortdeBokx/NPO-streams | 1ad35751f41f6cd4019be3cb3d73ab019d08efe0 | [
"MIT"
] | 4 | 2019-01-02T20:17:29.000Z | 2019-01-05T10:52:57.000Z | HDhomerunProxy.py | JortdeBokx/NPO-streams | 1ad35751f41f6cd4019be3cb3d73ab019d08efe0 | [
"MIT"
] | null | null | null | import logging
from flask import jsonify, request, abort, Response, stream_with_context
# Code similar to https://github.com/jkaberg/tvhProxy
from util.Helpers import generate_stream_ffmpeg
def setup_hdhrproxy(app, stream_handlers):
lineup = []
for sh in stream_handlers:
name = sh.__class__.__name__
lineup += sh.get_lineup("http://" + app.config["HOST"] + ":" + str(app.config["PORT"]) + "/" + name)
logging.info('Lineup: ' + str(lineup))
@app.route('/<class_name>/<key>')
def stream_stuff(class_name, key):
sh = None
for i in stream_handlers:
if i.__class__.__name__ == class_name:
sh = i
if not sh:
abort(404)
if not sh.valid_key(key):
abort(404)
stream_url = sh.get_live_m3u8(str(key), quality=app.config["QUALITY"])
if not stream_url:
logging.error("Could not get stream url")
abort(404)
return Response(stream_with_context(generate_stream_ffmpeg(stream_url)), mimetype="video/mp2t")
@app.route('/discover.json')
def discover():
discover_data = {
'FriendlyName': 'NPOproxy',
'Manufacturer': 'Silicondust',
'ModelNumber': 'HDTC-2US',
'FirmwareName': 'hdhomeruntc_atsc',
'TunerCount': 1,
'FirmwareVersion': '20150826',
'DeviceID': '12345678',
'DeviceAuth': 'test1234',
'BaseURL': '%s' % request.host_url,
'LineupURL': '%slineup.json' % request.host_url
}
return jsonify(discover_data)
@app.route('/lineup_status.json')
def status():
return jsonify({
'ScanInProgress': 0,
'ScanPossible': 1,
'Source': "Cable",
'SourceList': ['Cable']
})
@app.route('/lineup.json')
def give_lineup():
return jsonify(lineup)
@app.route('/lineup.post', methods=['GET', 'POST'])
def lineup_post():
return ''
@app.route('/')
@app.route('/device.xml')
def device():
discover_data = {
'FriendlyName': 'NPOproxy',
'Manufacturer': 'Silicondust',
'ModelNumber': 'HDTC-2US',
'FirmwareName': 'hdhomeruntc_atsc',
'TunerCount': 1,
'FirmwareVersion': '20150826',
'DeviceID': '12345678',
'DeviceAuth': 'test1234',
'BaseURL': '%s' % request.host_url,
'LineupURL': '%slineup.json' % request.host_url
}
return """
<root xmlns="urn:schemas-upnp-org:device-1-0">
<specVersion>
<major>1</major>
<minor>0</minor>
</specVersion>
<URLBase>""" + discover_data["BaseURL"] + """"</URLBase>
<device>
<deviceType>urn:schemas-upnp-org:device:MediaServer:1</deviceType>
<friendlyName>""" + discover_data["FriendlyName"] + """"</friendlyName>
<manufacturer>""" + discover_data["Manufacturer"] + """"</manufacturer>
<modelName>""" + discover_data["ModelNumber"] + """"</modelName>
<modelNumber>""" + discover_data["ModelNumber"] + """"</modelNumber>
<serialNumber></serialNumber>
<UDN>uuid:""" + discover_data["DeviceID"] + """"</UDN>
</device>
</root>
""", {'Content-Type': 'application/xml'}
| 33.732673 | 108 | 0.550338 | import logging
from flask import jsonify, request, abort, Response, stream_with_context
from util.Helpers import generate_stream_ffmpeg
def setup_hdhrproxy(app, stream_handlers):
lineup = []
for sh in stream_handlers:
name = sh.__class__.__name__
lineup += sh.get_lineup("http://" + app.config["HOST"] + ":" + str(app.config["PORT"]) + "/" + name)
logging.info('Lineup: ' + str(lineup))
@app.route('/<class_name>/<key>')
def stream_stuff(class_name, key):
sh = None
for i in stream_handlers:
if i.__class__.__name__ == class_name:
sh = i
if not sh:
abort(404)
if not sh.valid_key(key):
abort(404)
stream_url = sh.get_live_m3u8(str(key), quality=app.config["QUALITY"])
if not stream_url:
logging.error("Could not get stream url")
abort(404)
return Response(stream_with_context(generate_stream_ffmpeg(stream_url)), mimetype="video/mp2t")
@app.route('/discover.json')
def discover():
discover_data = {
'FriendlyName': 'NPOproxy',
'Manufacturer': 'Silicondust',
'ModelNumber': 'HDTC-2US',
'FirmwareName': 'hdhomeruntc_atsc',
'TunerCount': 1,
'FirmwareVersion': '20150826',
'DeviceID': '12345678',
'DeviceAuth': 'test1234',
'BaseURL': '%s' % request.host_url,
'LineupURL': '%slineup.json' % request.host_url
}
return jsonify(discover_data)
@app.route('/lineup_status.json')
def status():
return jsonify({
'ScanInProgress': 0,
'ScanPossible': 1,
'Source': "Cable",
'SourceList': ['Cable']
})
@app.route('/lineup.json')
def give_lineup():
return jsonify(lineup)
@app.route('/lineup.post', methods=['GET', 'POST'])
def lineup_post():
return ''
@app.route('/')
@app.route('/device.xml')
def device():
discover_data = {
'FriendlyName': 'NPOproxy',
'Manufacturer': 'Silicondust',
'ModelNumber': 'HDTC-2US',
'FirmwareName': 'hdhomeruntc_atsc',
'TunerCount': 1,
'FirmwareVersion': '20150826',
'DeviceID': '12345678',
'DeviceAuth': 'test1234',
'BaseURL': '%s' % request.host_url,
'LineupURL': '%slineup.json' % request.host_url
}
return """
<root xmlns="urn:schemas-upnp-org:device-1-0">
<specVersion>
<major>1</major>
<minor>0</minor>
</specVersion>
<URLBase>""" + discover_data["BaseURL"] + """"</URLBase>
<device>
<deviceType>urn:schemas-upnp-org:device:MediaServer:1</deviceType>
<friendlyName>""" + discover_data["FriendlyName"] + """"</friendlyName>
<manufacturer>""" + discover_data["Manufacturer"] + """"</manufacturer>
<modelName>""" + discover_data["ModelNumber"] + """"</modelName>
<modelNumber>""" + discover_data["ModelNumber"] + """"</modelNumber>
<serialNumber></serialNumber>
<UDN>uuid:""" + discover_data["DeviceID"] + """"</UDN>
</device>
</root>
""", {'Content-Type': 'application/xml'}
| true | true |
f7398eddf540bc00dfc4f86bff555a653deea03d | 5,783 | py | Python | cell2location/plt/plot_factor_spatial.py | nadavyayon/cell2location | 54141fb85d4b0d64825dfdb6d1bf147b025c856b | [
"Apache-2.0"
] | 127 | 2020-06-22T16:50:00.000Z | 2022-03-23T09:48:30.000Z | cell2location/plt/plot_factor_spatial.py | nadavyayon/cell2location | 54141fb85d4b0d64825dfdb6d1bf147b025c856b | [
"Apache-2.0"
] | 70 | 2020-06-24T01:31:28.000Z | 2022-03-29T13:40:05.000Z | cell2location/plt/plot_factor_spatial.py | nadavyayon/cell2location | 54141fb85d4b0d64825dfdb6d1bf147b025c856b | [
"Apache-2.0"
] | 36 | 2020-06-19T16:41:27.000Z | 2022-03-25T02:40:51.000Z | #!pip install plotnine
import numpy as np
import pandas as pd
import plotnine
def plot_factor_spatial(
adata,
fact,
cluster_names,
fact_ind=[0],
trans="log",
sample_name=None,
samples_col="sample",
obs_x="imagecol",
obs_y="imagerow",
n_columns=6,
max_col=5000,
col_breaks=[0.1, 100, 1000, 3000],
figure_size=(24, 5.7),
point_size=0.8,
text_size=9,
):
r"""Plot expression of factors / cell types in space.
Convenient but not as powerful as scanpy plotting.
:param adata: anndata object with spatial data
:param fact: pd.DataFrame with spatial expression of factors (W), e.g. mod.spot_factors_df
:param cluster_names: names of those factors to show on a plot
:param fact_ind: index of factors to plot
:param trans: transform colorscale? passed to plotnine.scale_color_cmap
:param sample_name: if anndata object contains multiple samples specify which sample to plot (no warning given if not)
:param samples_col: if anndata object contains multiple which .obs columns specifies sample?
:param obs_x: which .obs columns specifies x coordinate?
:param obs_y: which .obs columns specifies y coordinate?
:param n_columns: how many factors / clusters to plot in each row (plotnine.facet_grid)
:param max_col: colorscale maximum expression in fact
:param col_breaks: colorscale breaks
:param figure_size: figures size works weirdly (only x axis has an effect, use 24 for 6-column plot, 12 for 3, 8 for 2 ...).
:param point_size: point size of spots
:param text_size: text size
"""
if sample_name is not None:
sample_ind = np.isin(adata.obs[samples_col], sample_name)
else:
sample_ind = np.repeat(True, adata.shape[0])
# adata.obsm['X_spatial'][:,0] vs adata.obs['imagecol'] & adata.obs['imagerow']
for_plot = np.concatenate(
(
adata.obs[obs_x].values.reshape((adata.obs.shape[0], 1)),
-adata.obs[obs_y].values.reshape((adata.obs.shape[0], 1)),
fact.iloc[:, fact_ind[0]].values.reshape((adata.obs.shape[0], 1)),
np.array([cluster_names[fact_ind[0]] for j in range(adata.obs.shape[0])]).reshape((adata.obs.shape[0], 1)),
),
1,
)
for_plot = pd.DataFrame(for_plot, index=adata.obs.index, columns=["imagecol", "imagerow", "weights", "cluster"])
# select only correct sample
for_plot = for_plot.loc[sample_ind, :]
for i in fact_ind[1:]:
for_plot1 = np.concatenate(
(
adata.obs[obs_x].values.reshape((adata.obs.shape[0], 1)),
-adata.obs[obs_y].values.reshape((adata.obs.shape[0], 1)),
fact.iloc[:, i].values.reshape((adata.obs.shape[0], 1)),
np.array([cluster_names[i] for j in range(adata.obs.shape[0])]).reshape((adata.obs.shape[0], 1)),
),
1,
)
for_plot1 = pd.DataFrame(
for_plot1, index=adata.obs.index, columns=["imagecol", "imagerow", "weights", "cluster"]
)
# select only correct sample
for_plot1 = for_plot1.loc[sample_ind, :]
for_plot = pd.concat((for_plot, for_plot1))
for_plot["imagecol"] = pd.to_numeric(for_plot["imagecol"])
for_plot["imagerow"] = pd.to_numeric(for_plot["imagerow"])
for_plot["weights"] = pd.to_numeric(for_plot["weights"])
for_plot["cluster"] = pd.Categorical(for_plot["cluster"], categories=cluster_names[fact_ind], ordered=True)
# print(np.log(np.max(for_plot['weights'])))
ax = (
plotnine.ggplot(for_plot, plotnine.aes("imagecol", "imagerow", color="weights"))
+ plotnine.geom_point(size=point_size)
+ plotnine.scale_color_cmap("magma", trans=trans, limits=[0.1, max_col], breaks=col_breaks + [max_col])
+ plotnine.coord_fixed()
+ plotnine.theme_bw()
+ plotnine.theme(
panel_background=plotnine.element_rect(fill="black", colour="black", size=0, linetype="solid"),
panel_grid_major=plotnine.element_line(size=0, linetype="solid", colour="black"),
panel_grid_minor=plotnine.element_line(size=0, linetype="solid", colour="black"),
strip_text=plotnine.element_text(size=text_size),
)
+ plotnine.facet_wrap("~cluster", ncol=n_columns)
+ plotnine.ggtitle("nUMI from each cell type")
+ plotnine.theme(figure_size=figure_size)
)
return ax
def plot_categ_spatial(mod, adata, sample_col, color, n_columns=2, figure_size=(24, 5.7), point_size=0.8, text_size=9):
for_plot = adata.obs[["imagecol", "imagerow", sample_col]]
for_plot["color"] = color
# fix types
for_plot["color"] = pd.Categorical(for_plot["color"], ordered=True)
# for_plot['color'] = pd.to_numeric(for_plot['color'])
for_plot["sample"] = pd.Categorical(for_plot[sample_col], ordered=False)
for_plot["imagecol"] = pd.to_numeric(for_plot["imagecol"])
for_plot["imagerow"] = -pd.to_numeric(for_plot["imagerow"])
ax = (
plotnine.ggplot(for_plot, plotnine.aes(x="imagecol", y="imagerow", color="color"))
+ plotnine.geom_point(size=point_size) # + plotnine.scale_color_cmap()
+ plotnine.coord_fixed()
+ plotnine.theme_bw()
+ plotnine.theme(
panel_background=plotnine.element_rect(fill="black", colour="black", size=0, linetype="solid"),
panel_grid_major=plotnine.element_line(size=0, linetype="solid", colour="black"),
panel_grid_minor=plotnine.element_line(size=0, linetype="solid", colour="black"),
strip_text=plotnine.element_text(size=text_size),
)
+ plotnine.facet_wrap("~sample", ncol=n_columns)
+ plotnine.theme(figure_size=figure_size)
)
return ax
| 43.156716 | 128 | 0.652948 |
import numpy as np
import pandas as pd
import plotnine
def plot_factor_spatial(
adata,
fact,
cluster_names,
fact_ind=[0],
trans="log",
sample_name=None,
samples_col="sample",
obs_x="imagecol",
obs_y="imagerow",
n_columns=6,
max_col=5000,
col_breaks=[0.1, 100, 1000, 3000],
figure_size=(24, 5.7),
point_size=0.8,
text_size=9,
):
if sample_name is not None:
sample_ind = np.isin(adata.obs[samples_col], sample_name)
else:
sample_ind = np.repeat(True, adata.shape[0])
for_plot = np.concatenate(
(
adata.obs[obs_x].values.reshape((adata.obs.shape[0], 1)),
-adata.obs[obs_y].values.reshape((adata.obs.shape[0], 1)),
fact.iloc[:, fact_ind[0]].values.reshape((adata.obs.shape[0], 1)),
np.array([cluster_names[fact_ind[0]] for j in range(adata.obs.shape[0])]).reshape((adata.obs.shape[0], 1)),
),
1,
)
for_plot = pd.DataFrame(for_plot, index=adata.obs.index, columns=["imagecol", "imagerow", "weights", "cluster"])
for_plot = for_plot.loc[sample_ind, :]
for i in fact_ind[1:]:
for_plot1 = np.concatenate(
(
adata.obs[obs_x].values.reshape((adata.obs.shape[0], 1)),
-adata.obs[obs_y].values.reshape((adata.obs.shape[0], 1)),
fact.iloc[:, i].values.reshape((adata.obs.shape[0], 1)),
np.array([cluster_names[i] for j in range(adata.obs.shape[0])]).reshape((adata.obs.shape[0], 1)),
),
1,
)
for_plot1 = pd.DataFrame(
for_plot1, index=adata.obs.index, columns=["imagecol", "imagerow", "weights", "cluster"]
)
for_plot1 = for_plot1.loc[sample_ind, :]
for_plot = pd.concat((for_plot, for_plot1))
for_plot["imagecol"] = pd.to_numeric(for_plot["imagecol"])
for_plot["imagerow"] = pd.to_numeric(for_plot["imagerow"])
for_plot["weights"] = pd.to_numeric(for_plot["weights"])
for_plot["cluster"] = pd.Categorical(for_plot["cluster"], categories=cluster_names[fact_ind], ordered=True)
ax = (
plotnine.ggplot(for_plot, plotnine.aes("imagecol", "imagerow", color="weights"))
+ plotnine.geom_point(size=point_size)
+ plotnine.scale_color_cmap("magma", trans=trans, limits=[0.1, max_col], breaks=col_breaks + [max_col])
+ plotnine.coord_fixed()
+ plotnine.theme_bw()
+ plotnine.theme(
panel_background=plotnine.element_rect(fill="black", colour="black", size=0, linetype="solid"),
panel_grid_major=plotnine.element_line(size=0, linetype="solid", colour="black"),
panel_grid_minor=plotnine.element_line(size=0, linetype="solid", colour="black"),
strip_text=plotnine.element_text(size=text_size),
)
+ plotnine.facet_wrap("~cluster", ncol=n_columns)
+ plotnine.ggtitle("nUMI from each cell type")
+ plotnine.theme(figure_size=figure_size)
)
return ax
def plot_categ_spatial(mod, adata, sample_col, color, n_columns=2, figure_size=(24, 5.7), point_size=0.8, text_size=9):
for_plot = adata.obs[["imagecol", "imagerow", sample_col]]
for_plot["color"] = color
for_plot["color"] = pd.Categorical(for_plot["color"], ordered=True)
for_plot["sample"] = pd.Categorical(for_plot[sample_col], ordered=False)
for_plot["imagecol"] = pd.to_numeric(for_plot["imagecol"])
for_plot["imagerow"] = -pd.to_numeric(for_plot["imagerow"])
ax = (
plotnine.ggplot(for_plot, plotnine.aes(x="imagecol", y="imagerow", color="color"))
+ plotnine.geom_point(size=point_size)
+ plotnine.coord_fixed()
+ plotnine.theme_bw()
+ plotnine.theme(
panel_background=plotnine.element_rect(fill="black", colour="black", size=0, linetype="solid"),
panel_grid_major=plotnine.element_line(size=0, linetype="solid", colour="black"),
panel_grid_minor=plotnine.element_line(size=0, linetype="solid", colour="black"),
strip_text=plotnine.element_text(size=text_size),
)
+ plotnine.facet_wrap("~sample", ncol=n_columns)
+ plotnine.theme(figure_size=figure_size)
)
return ax
| true | true |
f7399158ec2ad40e4861202ef1f4ddf096aadabd | 455 | py | Python | tst/types/condition_with_args.py | TST-Group-BE/flax-blockchain | ed850df4f28ef4b6f71c175c8b6d07d27f7b3cd5 | [
"Apache-2.0"
] | null | null | null | tst/types/condition_with_args.py | TST-Group-BE/flax-blockchain | ed850df4f28ef4b6f71c175c8b6d07d27f7b3cd5 | [
"Apache-2.0"
] | null | null | null | tst/types/condition_with_args.py | TST-Group-BE/flax-blockchain | ed850df4f28ef4b6f71c175c8b6d07d27f7b3cd5 | [
"Apache-2.0"
] | null | null | null | from dataclasses import dataclass
from typing import List
from tst.types.condition_opcodes import ConditionOpcode
from tst.util.streamable import Streamable, streamable
@dataclass(frozen=True)
@streamable
class ConditionWithArgs(Streamable):
"""
This structure is used to store parsed CLVM conditions
Conditions in CLVM have either format of (opcode, var1) or (opcode, var1, var2)
"""
opcode: ConditionOpcode
vars: List[bytes]
| 25.277778 | 83 | 0.764835 | from dataclasses import dataclass
from typing import List
from tst.types.condition_opcodes import ConditionOpcode
from tst.util.streamable import Streamable, streamable
@dataclass(frozen=True)
@streamable
class ConditionWithArgs(Streamable):
opcode: ConditionOpcode
vars: List[bytes]
| true | true |
f73991b6463eb0e74d4aad55c6ff58d054e62fc4 | 3,843 | py | Python | Plots/Scatter/NCL_scatter_5.py | NCAR/GeoCAT-examples | fba1b045ba5145fa48cf2f3c1e3b3c7c863b0b5b | [
"Apache-2.0"
] | 42 | 2020-03-03T16:19:30.000Z | 2022-03-18T09:03:26.000Z | Plots/Scatter/NCL_scatter_5.py | netgodz/GeoCAT-examples | 5ed9a1d68b69a921d0f1fee1160e109853926ed9 | [
"Apache-2.0"
] | 351 | 2019-12-20T22:10:47.000Z | 2022-03-16T20:46:09.000Z | Plots/Scatter/NCL_scatter_5.py | netgodz/GeoCAT-examples | 5ed9a1d68b69a921d0f1fee1160e109853926ed9 | [
"Apache-2.0"
] | 32 | 2020-01-06T21:18:48.000Z | 2022-03-31T13:45:01.000Z | """
NCL_scatter_5.py
================
This script illustrates the following concepts:
- Drawing a scatter plot with markers of different colors
- Generating dummy data using "random.normal"
- Manually creating a legend using markers and text
- Customizing the label locations in a legend
- Changing the orientation of a legend
- Drawing a legend outside an XY plot
- Changing the markers in an XY plot
- Changing the marker color in an XY plot
- Changing the marker size in an XY plot
See following URLs to see the reproduced NCL plot & script:
- Original NCL script: https://www.ncl.ucar.edu/Applications/Scripts/scatter_5.ncl
- Original NCL plot: https://www.ncl.ucar.edu/Applications/Images/scatter_5_lg.png
"""
##############################################################################
# Import packages:
import numpy as np
import matplotlib.pyplot as plt
from cycler import cycler
from geocat.viz import util as gvutil
##############################################################################
# Generate random data from a normal (Gaussian) distribution with a mean of 10 and standard deviation of 3
npts = 300
random = np.random.default_rng(seed=1)
data = random.normal(loc=10, scale=3, size=npts)
##############################################################################
# Specify colors and markers
colors = [
'darkgoldenrod', 'darkgreen', 'coral', 'cyan', 'firebrick', 'darkslateblue',
'limegreen', 'goldenrod'
]
markers = ['+', '*', 'o', 'x', 's', '^', 'v', 'D']
# This line cycles which color is used to plot the markers
plt.rcParams['axes.prop_cycle'] = cycler(color=colors)
##############################################################################
# Plot
fig = plt.figure(figsize=(8, 8))
# Adjust the axes size to accommodate the legend at the bottom
ax = plt.axes([0.15, 0.2, 0.75, 0.70])
# Divide data into 8 bins and plot
numBins = 8
indices = np.arange(0, 300)
partitions = np.linspace(0, 20, numBins + 1)
label = "{start:g}:{end:g}"
for x in range(0, numBins):
bins = np.where(data > partitions[x], data, np.nan)
with np.errstate(
invalid='ignore'
): # Indeed not needed, just to get rid of warnings about numpy's NaN comparisons
bins = np.where(bins < partitions[x + 1], bins, np.nan)
indices = np.where(bins != np.nan, indices, np.nan)
plt.plot(indices,
bins,
marker=markers[x],
fillstyle='none',
linewidth=0,
label=label.format(start=partitions[x], end=partitions[x + 1]))
# `ncol` being equal to the number of labels makes it appear horizontal
legend = ax.legend(bbox_to_anchor=(-0.075, -0.2),
ncol=numBins,
loc='lower left',
columnspacing=0.5,
frameon=False)
for txt in legend.get_texts():
txt.set_ha("center") # horizontal alignment of text item
txt.set_va("center") # vertical alignment of text item
# Move label text so it is centered under the marker
txt.set_x(-25) # x-position
txt.set_y(-20) # y-position
# Use geocat.viz.util convenience function to set axes parameters
gvutil.set_axes_limits_and_ticks(ax,
xlim=(0, 300),
ylim=(0, 21),
xticks=range(0, 301, 50),
yticks=range(0, 22, 3))
# Use geocat.viz.util convenience function to add minor and major tick lines
gvutil.add_major_minor_ticks(ax,
x_minor_per_major=5,
y_minor_per_major=3,
labelsize=14)
# Use geocat.viz.util convenience function to set titles and labels
gvutil.set_titles_and_labels(ax, maintitle="Scatter plot with grouped markers")
plt.show()
| 37.676471 | 106 | 0.586261 | true | true | |
f73993c541a696c9041c73c61c7ff09aee664cf9 | 821 | py | Python | apps/TCPB_-_ShadowServer_Binary_Whitelist_Search/app.py | cvahid/threatconnect-playbooks | 2a862a1739750104785aedd1af29de868363140a | [
"Apache-2.0"
] | null | null | null | apps/TCPB_-_ShadowServer_Binary_Whitelist_Search/app.py | cvahid/threatconnect-playbooks | 2a862a1739750104785aedd1af29de868363140a | [
"Apache-2.0"
] | null | null | null | apps/TCPB_-_ShadowServer_Binary_Whitelist_Search/app.py | cvahid/threatconnect-playbooks | 2a862a1739750104785aedd1af29de868363140a | [
"Apache-2.0"
] | 2 | 2021-09-23T01:47:03.000Z | 2022-03-10T04:25:43.000Z | # -*- coding: utf-8 -*-
""" ThreatConnect Playbook App """
import requests
# Import default Playbook Class (Required)
from playbook_app import PlaybookApp
class App(PlaybookApp):
""" Playbook App """
def run(self):
""" Run the App main logic.
This method should contain the core logic of the App.
"""
url = 'http://bin-test.shadowserver.org/api?md5={}'
md5 = self.tcex.playbook.read(self.args.md5_hash)
self.tcex.log.info('Querying the ShadowServer API for md5: {}'.format(md5))
response = requests.get(url.format(md5)).text
response = response.replace(md5, '', 1).strip()
self.tcex.log.info('Response from ShadowServer API: {}'.format(response))
self.tcex.playbook.create_output('shadowServerBinaryCheckResponse', response)
| 31.576923 | 85 | 0.65408 |
import requests
from playbook_app import PlaybookApp
class App(PlaybookApp):
def run(self):
url = 'http://bin-test.shadowserver.org/api?md5={}'
md5 = self.tcex.playbook.read(self.args.md5_hash)
self.tcex.log.info('Querying the ShadowServer API for md5: {}'.format(md5))
response = requests.get(url.format(md5)).text
response = response.replace(md5, '', 1).strip()
self.tcex.log.info('Response from ShadowServer API: {}'.format(response))
self.tcex.playbook.create_output('shadowServerBinaryCheckResponse', response)
| true | true |
f73996a071303ab71db0b925e94fd8af9779a846 | 967 | py | Python | test/test_gross_price_component_dto.py | Dangl-IT/avacloud-client-python | 66f555096bbbc87d02d02e4e2dfb0c6accb18f95 | [
"RSA-MD"
] | 1 | 2019-01-12T18:10:24.000Z | 2019-01-12T18:10:24.000Z | test/test_gross_price_component_dto.py | Dangl-IT/avacloud-client-python | 66f555096bbbc87d02d02e4e2dfb0c6accb18f95 | [
"RSA-MD"
] | null | null | null | test/test_gross_price_component_dto.py | Dangl-IT/avacloud-client-python | 66f555096bbbc87d02d02e4e2dfb0c6accb18f95 | [
"RSA-MD"
] | null | null | null | # coding: utf-8
"""
AVACloud API 1.17.3
AVACloud API specification # noqa: E501
OpenAPI spec version: 1.17.3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import avacloud_client_python
from avacloud_client_python.models.gross_price_component_dto import GrossPriceComponentDto # noqa: E501
from avacloud_client_python.rest import ApiException
class TestGrossPriceComponentDto(unittest.TestCase):
"""GrossPriceComponentDto unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testGrossPriceComponentDto(self):
"""Test GrossPriceComponentDto"""
# FIXME: construct object with mandatory attributes with example values
# model = avacloud_client_python.models.gross_price_component_dto.GrossPriceComponentDto() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 23.585366 | 112 | 0.731127 |
from __future__ import absolute_import
import unittest
import avacloud_client_python
from avacloud_client_python.models.gross_price_component_dto import GrossPriceComponentDto
from avacloud_client_python.rest import ApiException
class TestGrossPriceComponentDto(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testGrossPriceComponentDto(self):
s
if __name__ == '__main__':
unittest.main()
| true | true |
f73997c890180a37039bd408c2806815f4657fdb | 6,950 | py | Python | mcitems.py | Mustard2/MenuCreator | c5d942ae0fc5997ed32da3b9e4dc6e9e6271d4b3 | [
"MIT"
] | 19 | 2020-12-17T05:19:20.000Z | 2022-03-28T23:19:39.000Z | mcitems.py | DJHartley/MenuCreator | c5d942ae0fc5997ed32da3b9e4dc6e9e6271d4b3 | [
"MIT"
] | 7 | 2021-02-07T18:49:14.000Z | 2021-05-29T12:19:21.000Z | mcitems.py | DJHartley/MenuCreator | c5d942ae0fc5997ed32da3b9e4dc6e9e6271d4b3 | [
"MIT"
] | 1 | 2021-02-22T23:35:30.000Z | 2021-02-22T23:35:30.000Z |
import bpy
from . import mcdata
# Class to store collections for section informations
class MCCollectionItem(bpy.types.PropertyGroup):
collection : bpy.props.PointerProperty(name="Collection",type=bpy.types.Collection)
# Class to store section informations
class MCSectionItem(bpy.types.PropertyGroup):
# Properties and update functions
# Function to update the collapsed status if the collapsed section property is changed
def mc_sections_collapsed_update(self, context):
if not self.collapsable:
self.collapsed = False
return
# Function to create an array of tuples for enum collections
def mc_collections_list(self, context):
items = []
for el in self.collections:
if hasattr(el.collection, 'name'):
items.append( (el.collection.name,el.collection.name,el.collection.name) )
return sorted(items)
# Function to update global collection properties
def mc_collections_list_update(self, context):
for collection in self.collections:
if collection.collection.name == self.collections_list:
collection.collection.hide_viewport = False
collection.collection.hide_render = False
else:
collection.collection.hide_viewport = True
collection.collection.hide_render = True
def mc_collections_global_options_update(self, context):
items = []
i = 0
for el in self.collections:
for obj in el.collection.objects:
if obj.type == "MESH":
obj.data.use_auto_smooth = self.collections_global_normalautosmooth
for modifier in obj.modifiers:
if modifier.type == "CORRECTIVE_SMOOTH":
modifier.show_viewport = self.collections_global_smoothcorrection
modifier.show_render = self.collections_global_smoothcorrection
elif modifier.type == "MASK":
modifier.show_viewport = self.collections_global_mask
modifier.show_render = self.collections_global_mask
elif modifier.type == "SHRINKWRAP":
modifier.show_viewport = self.collections_global_shrinkwrap
modifier.show_render = self.collections_global_shrinkwrap
if self.outfit_enable:
for modifier in self.outfit_body.modifiers:
if modifier.type == "MASK":
if not self.collections_global_mask:
modifier.show_viewport = False
modifier.show_render = False
else:
for el in self.collections:
for obj in el.collection.objects:
if obj.name in modifier.name and not obj.hide_viewport:
modifier.show_viewport = True
modifier.show_render = True
return
# Poll function for the selection of mesh only in pointer properties
def mc_poll_mesh(self, object):
return object.type == 'MESH'
# Global section options
id : bpy.props.IntProperty(name="Section ID")
name : bpy.props.StringProperty(name="Section Name")
icon : bpy.props.StringProperty(name="Section Icon", default="")
type : bpy.props.StringProperty(name="Section Type", default="DEFAULT")
collapsable : bpy.props.BoolProperty(name="Section Collapsable", default=False, update=mc_sections_collapsed_update)
# Global section option enforcer
collapsed : bpy.props.BoolProperty(name="", default = False, description="")
# COLLECTION type options
collections_enable_global_smoothcorrection: bpy.props.BoolProperty(default=False)
collections_enable_global_shrinkwrap: bpy.props.BoolProperty(default=False)
collections_enable_global_mask: bpy.props.BoolProperty(default=False)
collections_enable_global_normalautosmooth: bpy.props.BoolProperty(default=False)
# COLLECTION type data
collections: bpy.props.CollectionProperty(name="Section Collection List", type=MCCollectionItem)
collections_list: bpy.props.EnumProperty(name="Section Collection List", items = mc_collections_list, update=mc_collections_list_update)
collections_global_smoothcorrection: bpy.props.BoolProperty(name="Smooth Correction", default=True, update=mc_collections_global_options_update)
collections_global_shrinkwrap: bpy.props.BoolProperty(name="Shrinkwrap", default=True, update=mc_collections_global_options_update)
collections_global_mask: bpy.props.BoolProperty(name="Mask", default=True, update=mc_collections_global_options_update)
collections_global_normalautosmooth: bpy.props.BoolProperty(name="Normals Auto Smooth", default=True, update=mc_collections_global_options_update)
# Outfit variant
outfit_enable : bpy.props.BoolProperty(name="Outfit", default=False)
outfit_body : bpy.props.PointerProperty(name="Outfit Body", description = "The masks of this object will be switched on/off depending on which elements of the collections visibility", type=bpy.types.Object, poll=mc_poll_mesh)
# Class to store linked properties informations
class MCLinkedPropertyItem(bpy.types.PropertyGroup):
path: bpy.props.StringProperty(name="Property Path")
id : bpy.props.StringProperty(name="Property Identifier")
# Class to store properties informations
class MCPropertyItem(bpy.types.PropertyGroup):
mc_id : bpy.props.IntProperty(name="Section ID")
name : bpy.props.StringProperty(name="Property Name")
path: bpy.props.StringProperty(name="Property Path")
id : bpy.props.StringProperty(name="Property Identifier")
icon : bpy.props.EnumProperty(name="Property Icon", default="NONE",items=mcdata.mc_icon_list)
section : bpy.props.StringProperty(name="Section", default="Unsorted")
hide : bpy.props.BoolProperty(name="Hide Property", default=False)
linked_props: bpy.props.CollectionProperty(name="Linked properties", type=MCLinkedPropertyItem)
def register():
bpy.utils.register_class(MCCollectionItem)
bpy.utils.register_class(MCSectionItem)
bpy.utils.register_class(MCLinkedPropertyItem)
bpy.utils.register_class(MCPropertyItem)
bpy.types.Object.mc_properties = bpy.props.CollectionProperty(type=MCPropertyItem)
bpy.types.Object.mc_sections = bpy.props.CollectionProperty(type=MCSectionItem)
def unregister():
del bpy.types.Object.mc_properties
del bpy.types.Object.mc_sections
bpy.utils.unregister_class(MCSectionItem)
bpy.utils.unregister_class(MCLinkedPropertyItem)
bpy.utils.unregister_class(MCPropertyItem)
bpy.utils.unregister_class(MCCollectionItem) | 48.263889 | 229 | 0.690072 |
import bpy
from . import mcdata
class MCCollectionItem(bpy.types.PropertyGroup):
collection : bpy.props.PointerProperty(name="Collection",type=bpy.types.Collection)
class MCSectionItem(bpy.types.PropertyGroup):
def mc_sections_collapsed_update(self, context):
if not self.collapsable:
self.collapsed = False
return
def mc_collections_list(self, context):
items = []
for el in self.collections:
if hasattr(el.collection, 'name'):
items.append( (el.collection.name,el.collection.name,el.collection.name) )
return sorted(items)
def mc_collections_list_update(self, context):
for collection in self.collections:
if collection.collection.name == self.collections_list:
collection.collection.hide_viewport = False
collection.collection.hide_render = False
else:
collection.collection.hide_viewport = True
collection.collection.hide_render = True
def mc_collections_global_options_update(self, context):
items = []
i = 0
for el in self.collections:
for obj in el.collection.objects:
if obj.type == "MESH":
obj.data.use_auto_smooth = self.collections_global_normalautosmooth
for modifier in obj.modifiers:
if modifier.type == "CORRECTIVE_SMOOTH":
modifier.show_viewport = self.collections_global_smoothcorrection
modifier.show_render = self.collections_global_smoothcorrection
elif modifier.type == "MASK":
modifier.show_viewport = self.collections_global_mask
modifier.show_render = self.collections_global_mask
elif modifier.type == "SHRINKWRAP":
modifier.show_viewport = self.collections_global_shrinkwrap
modifier.show_render = self.collections_global_shrinkwrap
if self.outfit_enable:
for modifier in self.outfit_body.modifiers:
if modifier.type == "MASK":
if not self.collections_global_mask:
modifier.show_viewport = False
modifier.show_render = False
else:
for el in self.collections:
for obj in el.collection.objects:
if obj.name in modifier.name and not obj.hide_viewport:
modifier.show_viewport = True
modifier.show_render = True
return
def mc_poll_mesh(self, object):
return object.type == 'MESH'
id : bpy.props.IntProperty(name="Section ID")
name : bpy.props.StringProperty(name="Section Name")
icon : bpy.props.StringProperty(name="Section Icon", default="")
type : bpy.props.StringProperty(name="Section Type", default="DEFAULT")
collapsable : bpy.props.BoolProperty(name="Section Collapsable", default=False, update=mc_sections_collapsed_update)
collapsed : bpy.props.BoolProperty(name="", default = False, description="")
collections_enable_global_smoothcorrection: bpy.props.BoolProperty(default=False)
collections_enable_global_shrinkwrap: bpy.props.BoolProperty(default=False)
collections_enable_global_mask: bpy.props.BoolProperty(default=False)
collections_enable_global_normalautosmooth: bpy.props.BoolProperty(default=False)
collections: bpy.props.CollectionProperty(name="Section Collection List", type=MCCollectionItem)
collections_list: bpy.props.EnumProperty(name="Section Collection List", items = mc_collections_list, update=mc_collections_list_update)
collections_global_smoothcorrection: bpy.props.BoolProperty(name="Smooth Correction", default=True, update=mc_collections_global_options_update)
collections_global_shrinkwrap: bpy.props.BoolProperty(name="Shrinkwrap", default=True, update=mc_collections_global_options_update)
collections_global_mask: bpy.props.BoolProperty(name="Mask", default=True, update=mc_collections_global_options_update)
collections_global_normalautosmooth: bpy.props.BoolProperty(name="Normals Auto Smooth", default=True, update=mc_collections_global_options_update)
outfit_enable : bpy.props.BoolProperty(name="Outfit", default=False)
outfit_body : bpy.props.PointerProperty(name="Outfit Body", description = "The masks of this object will be switched on/off depending on which elements of the collections visibility", type=bpy.types.Object, poll=mc_poll_mesh)
class MCLinkedPropertyItem(bpy.types.PropertyGroup):
path: bpy.props.StringProperty(name="Property Path")
id : bpy.props.StringProperty(name="Property Identifier")
class MCPropertyItem(bpy.types.PropertyGroup):
mc_id : bpy.props.IntProperty(name="Section ID")
name : bpy.props.StringProperty(name="Property Name")
path: bpy.props.StringProperty(name="Property Path")
id : bpy.props.StringProperty(name="Property Identifier")
icon : bpy.props.EnumProperty(name="Property Icon", default="NONE",items=mcdata.mc_icon_list)
section : bpy.props.StringProperty(name="Section", default="Unsorted")
hide : bpy.props.BoolProperty(name="Hide Property", default=False)
linked_props: bpy.props.CollectionProperty(name="Linked properties", type=MCLinkedPropertyItem)
def register():
bpy.utils.register_class(MCCollectionItem)
bpy.utils.register_class(MCSectionItem)
bpy.utils.register_class(MCLinkedPropertyItem)
bpy.utils.register_class(MCPropertyItem)
bpy.types.Object.mc_properties = bpy.props.CollectionProperty(type=MCPropertyItem)
bpy.types.Object.mc_sections = bpy.props.CollectionProperty(type=MCSectionItem)
def unregister():
del bpy.types.Object.mc_properties
del bpy.types.Object.mc_sections
bpy.utils.unregister_class(MCSectionItem)
bpy.utils.unregister_class(MCLinkedPropertyItem)
bpy.utils.unregister_class(MCPropertyItem)
bpy.utils.unregister_class(MCCollectionItem) | true | true |
f739986e7e42db987a37377b934c4b267d477613 | 730 | py | Python | music_blog/forms.py | sashis/music-blog | c0ed13e3df4d25606345c4d8e064833fa86eb0c3 | [
"MIT"
] | null | null | null | music_blog/forms.py | sashis/music-blog | c0ed13e3df4d25606345c4d8e064833fa86eb0c3 | [
"MIT"
] | null | null | null | music_blog/forms.py | sashis/music-blog | c0ed13e3df4d25606345c4d8e064833fa86eb0c3 | [
"MIT"
] | null | null | null | from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField
from wtforms.validators import DataRequired, Email, EqualTo
class LoginForm(FlaskForm):
username = StringField('Логин', validators=[DataRequired()])
password = PasswordField('Пароль', validators=[DataRequired()])
class RegisterForm(FlaskForm):
username = StringField('Логин', validators=[DataRequired()])
email = StringField('Почта', validators=[DataRequired(), Email(message="Некорректный адрес электронной почты")])
password = PasswordField('Пароль', validators=[
DataRequired(),
EqualTo('confirm', message='Пароли не совпадают')
])
confirm = PasswordField('Снова пароль', validators=[DataRequired()]) | 40.555556 | 116 | 0.736986 | from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField
from wtforms.validators import DataRequired, Email, EqualTo
class LoginForm(FlaskForm):
username = StringField('Логин', validators=[DataRequired()])
password = PasswordField('Пароль', validators=[DataRequired()])
class RegisterForm(FlaskForm):
username = StringField('Логин', validators=[DataRequired()])
email = StringField('Почта', validators=[DataRequired(), Email(message="Некорректный адрес электронной почты")])
password = PasswordField('Пароль', validators=[
DataRequired(),
EqualTo('confirm', message='Пароли не совпадают')
])
confirm = PasswordField('Снова пароль', validators=[DataRequired()]) | true | true |
f7399aff48c6113e779047cdf465f74efc5e0bdf | 5,761 | py | Python | h1/models/agent_resource_event.py | hyperonecom/h1-client-python | 4ce355852ba3120ec1b8f509ab5894a5c08da730 | [
"MIT"
] | null | null | null | h1/models/agent_resource_event.py | hyperonecom/h1-client-python | 4ce355852ba3120ec1b8f509ab5894a5c08da730 | [
"MIT"
] | null | null | null | h1/models/agent_resource_event.py | hyperonecom/h1-client-python | 4ce355852ba3120ec1b8f509ab5894a5c08da730 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
HyperOne
HyperOne API # noqa: E501
The version of the OpenAPI document: 0.1.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from h1.configuration import Configuration
class AgentResourceEvent(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'id': 'str',
'name': 'str',
'state': 'str',
'created_on': 'str',
'project': 'str'
}
attribute_map = {
'id': 'id',
'name': 'name',
'state': 'state',
'created_on': 'createdOn',
'project': 'project'
}
def __init__(self, id=None, name=None, state=None, created_on=None, project=None, local_vars_configuration=None): # noqa: E501
"""AgentResourceEvent - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._id = None
self._name = None
self._state = None
self._created_on = None
self._project = None
self.discriminator = None
if id is not None:
self.id = id
if name is not None:
self.name = name
if state is not None:
self.state = state
if created_on is not None:
self.created_on = created_on
if project is not None:
self.project = project
@property
def id(self):
"""Gets the id of this AgentResourceEvent. # noqa: E501
:return: The id of this AgentResourceEvent. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this AgentResourceEvent.
:param id: The id of this AgentResourceEvent. # noqa: E501
:type: str
"""
self._id = id
@property
def name(self):
"""Gets the name of this AgentResourceEvent. # noqa: E501
:return: The name of this AgentResourceEvent. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this AgentResourceEvent.
:param name: The name of this AgentResourceEvent. # noqa: E501
:type: str
"""
self._name = name
@property
def state(self):
"""Gets the state of this AgentResourceEvent. # noqa: E501
:return: The state of this AgentResourceEvent. # noqa: E501
:rtype: str
"""
return self._state
@state.setter
def state(self, state):
"""Sets the state of this AgentResourceEvent.
:param state: The state of this AgentResourceEvent. # noqa: E501
:type: str
"""
self._state = state
@property
def created_on(self):
"""Gets the created_on of this AgentResourceEvent. # noqa: E501
:return: The created_on of this AgentResourceEvent. # noqa: E501
:rtype: str
"""
return self._created_on
@created_on.setter
def created_on(self, created_on):
"""Sets the created_on of this AgentResourceEvent.
:param created_on: The created_on of this AgentResourceEvent. # noqa: E501
:type: str
"""
self._created_on = created_on
@property
def project(self):
"""Gets the project of this AgentResourceEvent. # noqa: E501
:return: The project of this AgentResourceEvent. # noqa: E501
:rtype: str
"""
return self._project
@project.setter
def project(self, project):
"""Sets the project of this AgentResourceEvent.
:param project: The project of this AgentResourceEvent. # noqa: E501
:type: str
"""
self._project = project
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AgentResourceEvent):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, AgentResourceEvent):
return True
return self.to_dict() != other.to_dict()
| 25.604444 | 131 | 0.563444 |
import pprint
import re
import six
from h1.configuration import Configuration
class AgentResourceEvent(object):
openapi_types = {
'id': 'str',
'name': 'str',
'state': 'str',
'created_on': 'str',
'project': 'str'
}
attribute_map = {
'id': 'id',
'name': 'name',
'state': 'state',
'created_on': 'createdOn',
'project': 'project'
}
def __init__(self, id=None, name=None, state=None, created_on=None, project=None, local_vars_configuration=None):
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._id = None
self._name = None
self._state = None
self._created_on = None
self._project = None
self.discriminator = None
if id is not None:
self.id = id
if name is not None:
self.name = name
if state is not None:
self.state = state
if created_on is not None:
self.created_on = created_on
if project is not None:
self.project = project
@property
def id(self):
return self._id
@id.setter
def id(self, id):
self._id = id
@property
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = name
@property
def state(self):
return self._state
@state.setter
def state(self, state):
self._state = state
@property
def created_on(self):
return self._created_on
@created_on.setter
def created_on(self, created_on):
self._created_on = created_on
@property
def project(self):
return self._project
@project.setter
def project(self, project):
self._project = project
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, AgentResourceEvent):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
if not isinstance(other, AgentResourceEvent):
return True
return self.to_dict() != other.to_dict()
| true | true |
f7399b220a705df30b220b604450037bb08ad392 | 9,471 | py | Python | resources/lib/basictypes/callable.py | torstehu/Transmission-XBMC | 10f319deb0d9e5839e62f86c8c6ed5a9175d26fc | [
"MIT"
] | 22 | 2015-02-04T19:58:02.000Z | 2021-07-29T05:25:08.000Z | resources/lib/basictypes/callable.py | torstehu/Transmission-XBMC | 10f319deb0d9e5839e62f86c8c6ed5a9175d26fc | [
"MIT"
] | 14 | 2015-01-07T00:08:28.000Z | 2019-10-24T00:27:48.000Z | resources/lib/basictypes/callable.py | torstehu/Transmission-XBMC | 10f319deb0d9e5839e62f86c8c6ed5a9175d26fc | [
"MIT"
] | 15 | 2015-02-26T15:01:04.000Z | 2020-12-02T09:14:44.000Z | """Preliminary callable-object modelling classes"""
from basicproperty import propertied, basic, common
import inspect
from basictypes import list_types
__NULL__ = []
class Argument( propertied.Propertied ):
"""Representation of a single argument on a callable object"""
name = common.StringLocaleProperty(
'name', """The argument's name, as a simple string""",
)
default = basic.BasicProperty(
'default', """Default-value for the argument, may be NULL/unavailable""",
)
baseType = basic.BasicProperty(
'baseType', """Base data-type for the argument, may be NULL/unavailable""",
)
def __init__(self, name, default =__NULL__, baseType=__NULL__, **named):
"""Initialize the Callable object
name -- the argument name
default -- if provided, will provide the default value
for the argument
baseType -- if provided, will allow for type checking
and coercion of arguments before calling the callable
object.
"""
if default is not __NULL__:
named ["default"] = default
if baseType is not __NULL__:
named ["baseType"] = baseType
super (Argument, self).__init__(
name = name,
**named
)
def __str__(self,):
"""Create a friendly string representation"""
fragments = [repr(self.name)]
if hasattr( self, "default"):
fragments.append (repr(self.default))
if hasattr( self, "baseType"):
fragments.append (repr(self.baseType))
return """%s(%s)"""%(
self.__class__.__name__,
", ".join(fragments),
)
__repr__=__str__
def __eq__( self, other ):
"""Determine whether other is our equivalent
returns true if other is of the same class, with
the same primary attributes
"""
if self.__class__ is not other.__class__:
return 0
NULL = []
for nm in ['name','default','baseType']:
if hasattr( self, nm) and not hasattr( other, nm):
return 0
elif not hasattr( self, nm) and hasattr( other, nm):
return 0
elif hasattr( self, nm ):
if getattr( self, nm) != getattr(other,nm):
return 0
return 1
### Data-type API
def check( cls, value ):
"""Strict check to see if value is an instance of cls"""
return isinstance( value, cls)
check = classmethod(check)
def coerce( cls, value ):
"""Coerce value to a cls instance
Accepted forms:
("name",)
("name",default)
("name",default,baseType)
"name"
{ ** } # passed to the initialiser
"""
if cls.check( value ):
return value
if isinstance( value, (tuple, list)) and value and len(value) < 4:
items = {}
for item,name in zip(value,['name','default','baseType'][:len(value)]):
items[name] = item
return cls( **items )
elif isinstance( value, str ):
return cls( name = value )
elif isinstance( value, dict ):
return cls( **value )
raise TypeError( """Don't know how to convert %r to a %s object"""%( value, cls.__name__))
coerce = classmethod(coerce)
listof_Arguments = list_types.listof(
Argument,
name = "listof_Arguments",
dataType = 'list.Arguments',
)
class Callable( propertied.Propertied ):
"""Modelling of a callable Python object"""
name = common.StringProperty(
'name', """The callable object's-name (may be different from underlying object)""",
)
implementation = basic.BasicProperty(
"implementation", """The underlying implementation (callable Python object)""",
)
arguments = common.ListProperty(
'arguments', """Argument-list for the callable object""",
baseType = listof_Arguments,
)
shortHelp = common.StringProperty(
'shortHelp', """Short help-string suitable for tooltips/status-bars""",
)
longHelp = common.StringProperty(
'longHelp', """Longer help-string suitable for context-sensitive help""",
)
coerce = common.BooleanProperty (
"coerce","""Whether to coerce arguments if possible""",
defaultValue = 0,
)
def __init__(
self, implementation, name=__NULL__,
arguments=__NULL__,
shortHelp = __NULL__, longHelp=__NULL__,
**named
):
"""Initialize the Callable object
implementation -- a callable python object
name -- if provided, will override the given name
arguments -- if provided, will override calculated arguments
shortHelp -- short help-string, first line of __doc__ if not given
longHelp -- long help-string, entire __doc__ string if not given
"""
if name is __NULL__:
name = self._name( implementation )
if arguments is __NULL__:
arguments = self._arguments (implementation)
if shortHelp is __NULL__:
shortHelp = self._shortHelp(implementation)
if longHelp is __NULL__:
longHelp = self._longHelp(implementation)
super (Callable, self).__init__(
implementation = implementation,
name = name,
arguments = arguments,
**named
)
def __str__(self):
"""Return a friendly string representation"""
return """%s( %s )"""% (self.__class__.__name__, self.implementation)
def __call__( self, *arguments, **named ):
"""Do the actual calling of the callable object"""
set = {}
for argument,value in zip(arguments,self.arguments):
set[argument.name] = (argument,value)
# XXX potentially there are missing positional arguments!
if named:
nameSet = dict([(arg.name,arg) for arg in self.arguments])
for key,value in named.items():
if set.has_key( key ):
raise ValueError("""Redefinition of argument order for argument %s"""%(set.get(key)))
else:
# note that argument may be None
set [key] = nameSet.get(key), value
for key,(argument,value) in set.items():
if self.coerce and argument and argument.baseType and hasattr(argument.baseType, "coerce"):
value = argument.baseType.coerce(argument)
set[key] = value
# XXX Should keep arguments in order to allow for *args set :(
return self.implementation( **set )
def getArgument( self, name ):
"""Retieve an argument by name"""
for argument in self.arguments:
if argument.name == name:
return argument
raise KeyError( """%r object doesn't have a %s argument"""%(self, name))
def _name( self, value ):
"""Try to find a decent name for a callable object"""
name = "<unknown>"
for attribute in [ '__name__','name','func_name','co_name','__file__',"friendlyName"]:
if hasattr( value, attribute):
v = getattr( value, attribute)
if isinstance( v, (str,unicode)):
name = v
if '.' in name:
return name.split('.')[-1]
return name
def _shortHelp( self, value ):
"""Try to find the short-docstring for an object"""
if hasattr( value, '__doc__') and value.__doc__:
return value.__doc__.split( '\n')[0]
else:
return ""
def _longHelp( self, value ):
"""Try to find the short-docstring for an object"""
if hasattr( value, '__doc__') and value.__doc__:
return value.__doc__
else:
return ""
def _useCall( self, value ):
"""Can we use __call__ to call this object?
returns true if we should be able to use it
"""
return (
# must have __call__
hasattr( value, '__call__') and
(
# call should be a function or method...
hasattr( value.__call__, 'im_func') or
hasattr( value.__call__, 'im_code')
)
)
def _arguments( self, value ):
"""Get a list of arguments for a callable object"""
if self._useCall( value ):
value = value.__call__
if hasattr(value, 'im_func'):
# receiver is a method. Drop the first argument, usually 'self'.
func = value.im_func
arguments = inspect.getargspec( func )
if value.im_self is not None:
# a bound instance or class method
arguments = inspect.getargspec( func )
del arguments[0][0]
else:
# an un-bound method
pass
elif hasattr(value, 'func_code') or hasattr(value, 'im_code'):
# receiver is a function.
func = value
arguments = inspect.getargspec( func )
else:
raise ValueError('unknown reciever type %s %s'%(receiver, type(receiver)))
names, vararg, varnamed, defaults = arguments
defaults = defaults or ()
result = [ Argument( name = name ) for name in names ]
for name,default in zip( names[-len(defaults):],defaults):
for item in result:
if item.name == name:
item.default = default
return result
def check( cls, value ):
"""Strict check to see if value is an instance of cls"""
return isinstance( value, cls)
check = classmethod(check)
def coerce( cls, value ):
"""Coerce value to a Callable-object"""
if cls.check( value ):
return value
if callable( value ):
return cls(
implementation = value,
)
else:
raise TypeError( "Don't know how to convert %r to a %s object"%(
value, cls.__name__,
))
coerce = classmethod(coerce)
def __eq__( self, other ):
"""Determine whether other is our equivalent
returns true if other is of the same class, with
the same primary attributes
"""
if self.__class__ is not other.__class__:
return 0
NULL = []
for nm in ['name','implementation','arguments']:
if hasattr( self, nm) and not hasattr( other, nm):
return 0
elif not hasattr( self, nm) and hasattr( other, nm):
return 0
elif hasattr( self, nm ):
if getattr( self, nm) != getattr(other,nm):
return 0
return 1
Callables = list_types.listof(
Callable,
name = "Callables",
dataType = 'list.Callables',
)
##class Curry( propertied.Propertied ):
## """A curried Callable with particular arguments pre-set"""
## values = common.DictionaryProperty(
## "values", """Partial value-set to be applied to callable""",
## )
## implementation = basic.BasicProperty(
## 'implementation', """The underlying implementation of the curry""",
## baseType = callable.Callable,
## )
##
| 30.551613 | 94 | 0.679548 | from basicproperty import propertied, basic, common
import inspect
from basictypes import list_types
__NULL__ = []
class Argument( propertied.Propertied ):
name = common.StringLocaleProperty(
'name', """The argument's name, as a simple string""",
)
default = basic.BasicProperty(
'default', """Default-value for the argument, may be NULL/unavailable""",
)
baseType = basic.BasicProperty(
'baseType', """Base data-type for the argument, may be NULL/unavailable""",
)
def __init__(self, name, default =__NULL__, baseType=__NULL__, **named):
if default is not __NULL__:
named ["default"] = default
if baseType is not __NULL__:
named ["baseType"] = baseType
super (Argument, self).__init__(
name = name,
**named
)
def __str__(self,):
fragments = [repr(self.name)]
if hasattr( self, "default"):
fragments.append (repr(self.default))
if hasattr( self, "baseType"):
fragments.append (repr(self.baseType))
return """%s(%s)"""%(
self.__class__.__name__,
", ".join(fragments),
)
__repr__=__str__
def __eq__( self, other ):
if self.__class__ is not other.__class__:
return 0
NULL = []
for nm in ['name','default','baseType']:
if hasattr( self, nm) and not hasattr( other, nm):
return 0
elif not hasattr( self, nm) and hasattr( other, nm):
return 0
elif hasattr( self, nm ):
if getattr( self, nm) != getattr(other,nm):
return 0
return 1
### Data-type API
def check( cls, value ):
return isinstance( value, cls)
check = classmethod(check)
def coerce( cls, value ):
if cls.check( value ):
return value
if isinstance( value, (tuple, list)) and value and len(value) < 4:
items = {}
for item,name in zip(value,['name','default','baseType'][:len(value)]):
items[name] = item
return cls( **items )
elif isinstance( value, str ):
return cls( name = value )
elif isinstance( value, dict ):
return cls( **value )
raise TypeError( """Don't know how to convert %r to a %s object"""%( value, cls.__name__))
coerce = classmethod(coerce)
listof_Arguments = list_types.listof(
Argument,
name = "listof_Arguments",
dataType = 'list.Arguments',
)
class Callable( propertied.Propertied ):
name = common.StringProperty(
'name', """The callable object's-name (may be different from underlying object)""",
)
implementation = basic.BasicProperty(
"implementation", """The underlying implementation (callable Python object)""",
)
arguments = common.ListProperty(
'arguments', """Argument-list for the callable object""",
baseType = listof_Arguments,
)
shortHelp = common.StringProperty(
'shortHelp', """Short help-string suitable for tooltips/status-bars""",
)
longHelp = common.StringProperty(
'longHelp', """Longer help-string suitable for context-sensitive help""",
)
coerce = common.BooleanProperty (
"coerce","""Whether to coerce arguments if possible""",
defaultValue = 0,
)
def __init__(
self, implementation, name=__NULL__,
arguments=__NULL__,
shortHelp = __NULL__, longHelp=__NULL__,
**named
):
if name is __NULL__:
name = self._name( implementation )
if arguments is __NULL__:
arguments = self._arguments (implementation)
if shortHelp is __NULL__:
shortHelp = self._shortHelp(implementation)
if longHelp is __NULL__:
longHelp = self._longHelp(implementation)
super (Callable, self).__init__(
implementation = implementation,
name = name,
arguments = arguments,
**named
)
def __str__(self):
return """%s( %s )"""% (self.__class__.__name__, self.implementation)
def __call__( self, *arguments, **named ):
set = {}
for argument,value in zip(arguments,self.arguments):
set[argument.name] = (argument,value)
# XXX potentially there are missing positional arguments!
if named:
nameSet = dict([(arg.name,arg) for arg in self.arguments])
for key,value in named.items():
if set.has_key( key ):
raise ValueError("""Redefinition of argument order for argument %s"""%(set.get(key)))
else:
# note that argument may be None
set [key] = nameSet.get(key), value
for key,(argument,value) in set.items():
if self.coerce and argument and argument.baseType and hasattr(argument.baseType, "coerce"):
value = argument.baseType.coerce(argument)
set[key] = value
# XXX Should keep arguments in order to allow for *args set :(
return self.implementation( **set )
def getArgument( self, name ):
for argument in self.arguments:
if argument.name == name:
return argument
raise KeyError( """%r object doesn't have a %s argument"""%(self, name))
def _name( self, value ):
name = "<unknown>"
for attribute in [ '__name__','name','func_name','co_name','__file__',"friendlyName"]:
if hasattr( value, attribute):
v = getattr( value, attribute)
if isinstance( v, (str,unicode)):
name = v
if '.' in name:
return name.split('.')[-1]
return name
def _shortHelp( self, value ):
if hasattr( value, '__doc__') and value.__doc__:
return value.__doc__.split( '\n')[0]
else:
return ""
def _longHelp( self, value ):
if hasattr( value, '__doc__') and value.__doc__:
return value.__doc__
else:
return ""
def _useCall( self, value ):
return (
hasattr( value, '__call__') and
(
hasattr( value.__call__, 'im_func') or
hasattr( value.__call__, 'im_code')
)
)
def _arguments( self, value ):
if self._useCall( value ):
value = value.__call__
if hasattr(value, 'im_func'):
func = value.im_func
arguments = inspect.getargspec( func )
if value.im_self is not None:
arguments = inspect.getargspec( func )
del arguments[0][0]
else:
pass
elif hasattr(value, 'func_code') or hasattr(value, 'im_code'):
func = value
arguments = inspect.getargspec( func )
else:
raise ValueError('unknown reciever type %s %s'%(receiver, type(receiver)))
names, vararg, varnamed, defaults = arguments
defaults = defaults or ()
result = [ Argument( name = name ) for name in names ]
for name,default in zip( names[-len(defaults):],defaults):
for item in result:
if item.name == name:
item.default = default
return result
def check( cls, value ):
return isinstance( value, cls)
check = classmethod(check)
def coerce( cls, value ):
if cls.check( value ):
return value
if callable( value ):
return cls(
implementation = value,
)
else:
raise TypeError( "Don't know how to convert %r to a %s object"%(
value, cls.__name__,
))
coerce = classmethod(coerce)
def __eq__( self, other ):
if self.__class__ is not other.__class__:
return 0
NULL = []
for nm in ['name','implementation','arguments']:
if hasattr( self, nm) and not hasattr( other, nm):
return 0
elif not hasattr( self, nm) and hasattr( other, nm):
return 0
elif hasattr( self, nm ):
if getattr( self, nm) != getattr(other,nm):
return 0
return 1
Callables = list_types.listof(
Callable,
name = "Callables",
dataType = 'list.Callables',
)
##class Curry( propertied.Propertied ):
## """A curried Callable with particular arguments pre-set"""
## values = common.DictionaryProperty(
## "values", """Partial value-set to be applied to callable""",
## )
## implementation = basic.BasicProperty(
## 'implementation', """The underlying implementation of the curry""",
## baseType = callable.Callable,
## )
##
| true | true |
f7399c13194cace8b3bdd6347bccd6952f0d850d | 24,918 | py | Python | {{cookiecutter.project_slug}}/core/management/commands/fastapi.py | claysllanxavier/django-cookiecutter | 97de7ff4ed3dc94c32bf756a57aee0664a888cbc | [
"BSD-3-Clause"
] | null | null | null | {{cookiecutter.project_slug}}/core/management/commands/fastapi.py | claysllanxavier/django-cookiecutter | 97de7ff4ed3dc94c32bf756a57aee0664a888cbc | [
"BSD-3-Clause"
] | null | null | null | {{cookiecutter.project_slug}}/core/management/commands/fastapi.py | claysllanxavier/django-cookiecutter | 97de7ff4ed3dc94c32bf756a57aee0664a888cbc | [
"BSD-3-Clause"
] | null | null | null | import os
import platform
import subprocess
import sys
from pathlib import Path
from core.management.commands.utils import Utils
from django.apps import apps
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = """Manager responsável por analisar as classes de modelos do projeto Django para gerar os arquivos
do projeto FastAPI correspondente às apps do Django"""
def __init__(self):
super().__init__()
self.path_root = os.getcwd()
self.path_core = os.path.join(self.BASE_DIR, "core")
self.operation_system = platform.system().lower()
self.project = 'fastapi'
self.fastapi_dir = os.path.join(self.BASE_DIR, '..', "fastapi")
self.fastapi_project = os.path.join(self.path_core, "management/commands/snippets/fastapi_project")
self.snippet_dir = "{}/{}".format(self.path_core, "management/commands/snippets/fastapi/")
self.current_app_model = None
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
_django_types = ["SmallAutoField", "AutoField", "BLANK_CHOICE_DASH", "BigAutoField", "BigIntegerField",
"BinaryField", "BooleanField", "CharField", "CommaSeparatedIntegerField", "DateField",
"DateTimeField", "DecimalField", "DurationField", "EmailField", "Empty", "FileField", "Field",
"FieldDoesNotExist", "FilePathField", "FloatField", "GenericIPAddressField", "IPAddressField",
"IntegerField", "FieldFile", "NOT_PROVIDED", "NullBooleanField", "ImageField",
"PositiveIntegerField", "PositiveSmallIntegerField", "SlugField", "SmallIntegerField", "TextField",
"TimeField", "URLField", "UUIDField", "ForeignKey", "OneToOneField"]
_schemas_types = ["int", "int", "BLANK_CHOICE_DASH", "int", "int", "str", "bool", "str", "str", "datetime.date",
"datetime.datetime", "float", "int", "EmailStr", "str", "str", "str", "str", "str", "float",
"str", "str", "int", "str", "str", "bool", "str", "int", "int", "str", "int",
"str", "DateTime", "str", "str", "int", "int",]
_models_types = ["Integer", "Integer", "BLANK_CHOICE_DASH", "Integer", "Integer", "String", "Boolean", "String", "String", "Date",
"Datetime", "Float", "Integer", "String", "String", "String", "String", "String", "String", "Float",
"String", "String", "Integer", "String", "String", "Boolean", "String", "Integer", "Integer", "String", "Integer",
"String", "DateTime", "String", "String", "Integer", "Integer", ]
def add_arguments(self, parser):
parser.add_argument("App", type=str, nargs="?")
parser.add_argument("Model", type=str, nargs="?")
parser.add_argument("--app", action="store_true", dest="app", help="Criar a App e seus models")
parser.add_argument("--app_model", action="store_true", dest="app_model",
help="Criar a App e o Model informado")
# Parâmetro opcionais
parser.add_argument(
'--schemas',
action='store_true',
dest='schemas',
help='Criar apenas os Schemas'
)
parser.add_argument(
'--api',
action='store_true',
dest='api',
help='Criar apenas as rotas da api'
)
parser.add_argument(
'--cruds',
action='store_true',
dest='cruds',
help='Criar apenas os cruds'
)
parser.add_argument(
'--models',
action='store_true',
dest='models',
help='Criar apenas os models'
)
def _check_dir(self, path) -> bool:
"""Método responsável por verificar se o diretório já existe."""
return Utils.check_dir(path)
def _check_file(self, path):
"""Método responsável por verificar se o arquivo já existe no caminho informado."""
return Utils.check_file(path)
def __check_content(self, path, text_check):
"""Método responsável por verificar se o texto passado com parâmetro existe no conteúdo do arquivo."""
return Utils.check_content(path, text_check)
def __ignore_base_fields(self, field):
"""Método responsável por remover da análise do models os atributos herdados da classe pai Base
Arguments:
field {String} -- Nome do atributo
Returns:
bool -- True se o atributo for um dos atributos da classe pai, caso contrário False.
"""
try:
__ignore_fields = ["id", "deleted", "created_on", "updated_on" ]
return field in __ignore_fields
except Exception as error:
Utils.show_message(f"Error in __ignore_base_fields: {error}", error=True)
def __get_snippet(self, path=None, file_name=None, state_manager=False):
"""Método para recuperar o valor do arquivo de snippet a ser convertido pela substituição com os valores
baseados em modelos do projeto Django
Arguments:
path {str} - Caminho do arquivo snippet a ser utilizado como padrão para gerar o arquivo resultante.
file_name {str} - Nome do arquivo snippet a ser lido
state_manager {bool} - Booleano para determinar se o snippet a ser lido é de algum dos pacotes
de gerência de estado do projeto Fastapi (deprecated)
Returns:
str -- Texto base a ser utilizado para geração dos arquivos resultantes da conversão
"""
try:
if os.path.isfile(path):
with open(path, encoding="utf-8") as arquivo:
return arquivo.read()
except Exception as e:
Utils.show_message(f"Error in get_snippet {e}", error=True)
sys.exit()
def __init_fastapi(self):
"""Método para iniciar o projeto Fastapi
"""
try:
if not Utils.check_dir(self.fastapi_dir):
Utils.show_message("Criando o projeto Fastapi.")
print(self.fastapi_project)
__cmd_fastapi_create = "cp -R {} {}".format(self.fastapi_project, self.fastapi_dir)
subprocess.call(__cmd_fastapi_create, shell=True)
Utils.show_message("Projeto criado com sucesso.")
except Exception as error:
Utils.show_message(f"Error in __init_Fastapi: {error}", error=True)
def __init_app(self, app_path):
"""Método para iniciar o projeto Fastapi
"""
try:
if not Utils.check_dir(app_path):
Utils.show_message("Criando diretório da app")
os.makedirs(app_path)
Utils.show_message("Diretório criado com sucesso")
except Exception as error:
Utils.show_message(f"Error in __init_Fastapi: {error}", error=True)
def __apply_pep(self, path):
try:
os.system('autopep8 --in-place --aggressive --aggressive {}'.format(path))
os.system('isort {}'.format(path))
except Exception as error:
Utils.show_message(f"Ocorreu o erro : {error}")
pass
def __manage_schema(self):
"""Método responsável por criar/configurar o arquivo de schema para a FastAPI """
try:
Utils.show_message("Trabalhando na configuração do Schema do model {}".format(self.model))
content = self.__get_snippet(Path(
f"{self.path_core}/management/commands/snippets/fastapi/schema.txt"))
# Interpolando os dados
content = content.replace("$ModelClass$", self.model)
model = self.app_instance.get_model(self.model)
fields = model._meta.fields
result = ''
for field in iter(fields):
item = {}
item["app"], item["model"], item["name"] = str(field).split('.')
item["type"] = (str(
str(type(field)).split('.')[-1:])
.replace("[\"", "").replace("\'>\"]", ""))
if item["type"] not in self._django_types:
print('Campo {} desconhecido'.format(field))
continue
if not self.__ignore_base_fields(item['name']):
attribute = self._schemas_types[self._django_types.index(item['type'])]
field_name = item.get('name')
if (getattr(field, 'null', None)):
attribute = f"Optional[{attribute}]"
if (field.get_default() is not None and field.get_default() != ""):
attribute += f" = {field.get_default()}"
if (item.get("type") in ('ForeignKey', 'OneToOneField')):
field_name = field.get_attname_column()[1]
result += f"\t {field_name}: {attribute}\n"
content = content.replace("$fields$", result)
# Verificando se o arquivo forms.py existe
if self._check_file(self.path_schema) is False:
# Criando o arquivo com o conteúdo da interpolação
with open(self.path_schema, 'w') as arquivo:
arquivo.write(content)
self.__apply_pep(self.path_schema)
return
# Verificando se já existe configuração no forms para o
# Models informado
if self.__check_content(
self.path_schema, "class {}".format(self.model)):
Utils.show_message("O model informado já possui schema configurado.")
return
with open(self.path_schema, 'a') as schema:
schema.write("\n")
schema.write(content)
self.__apply_pep(self.path_schema)
except Exception as error:
Utils.show_message(f"Error in __manage_schema: {error}", error=True)
def __manage_model(self):
"""Método responsável por criar/configurar o arquivo de schema para a FastAPI """
try:
Utils.show_message("Trabalhando na configuração do Model do model {}".format(self.model))
content = self.__get_snippet(Path(
f"{self.path_core}/management/commands/snippets/fastapi/model.txt"))
# Interpolando os dados
content = content.replace("$ModelClass$", self.model)
model = self.app_instance.get_model(self.model)
content = content.replace("$table$", model._meta.db_table)
fields = model._meta.fields
related_fields = model._meta.many_to_many
result = ''
imports = ""
many_to_many = ""
for field in iter(fields):
item = {}
item["app"], item["model"], item["name"] = str(field).split('.')
item["type"] = (str(
str(type(field)).split('.')[-1:])
.replace("[\"", "").replace("\'>\"]", ""))
if item["type"] not in self._django_types:
print('Campo {} desconhecido'.format(field))
continue
if not self.__ignore_base_fields(item['name']):
attribute = self._models_types[self._django_types.index(item['type'])]
field_name = item.get('name')
relationship = None
if (field.max_length):
attribute += f"({field.max_length})"
if (item.get("type") in ('ForeignKey', 'OneToOneField')):
field_name = field.get_attname_column()[1]
__model = field.related_model._meta
attribute = f"ForeignKey('{__model.db_table}.id')"
if __model.app_label != item.get('app'):
imports += f"from {__model.app_label}.models import {__model.object_name}\n"
relationship = f"\t {item.get('name')} = relationship('{__model.object_name}')\n"
attribute = f"{attribute}, nullable={(getattr(field, 'null', None))}"
if (field.has_default()):
attribute += f" ,default={field.get_default()}"
if (field.unique):
attribute += f" ,unique={field.unique}"
result += f"\t {field_name} = Column({attribute})\n"
if relationship is not None:
result += relationship
for field in iter(related_fields):
item = {}
item["app"], item["model"], item["name"] = str(field).split('.')
item["type"] = (str(
str(type(field)).split('.')[-1:])
.replace("[\"", "").replace("\'>\"]", ""))
if (item.get("type") == "ManyToManyField"):
_model_name = field.model._meta.model_name
_app_name = field.model._meta.app_label
_related_model_name = field.related_model._meta.model_name
_related_model_app = field.related_model._meta.app_label
__model = field.related_model._meta
table = f"{item.get('app')}_{_model_name}_{field.related_model._meta.model_name}"
many_to_many += f"{table} = Table('{table}', Base.metadata,"
many_to_many += f"Column('id', Integer, primary_key=True, index=True),"
many_to_many += f"Column('{_model_name}_id', ForeignKey('{_app_name}_{_model_name}.id')),"
many_to_many += f"Column('{_related_model_name}_id', ForeignKey('{_related_model_app}_{_related_model_name}.id')))\n"
result += f"\t {item.get('name')} = relationship('{__model.object_name}', secondary={table})\n"
content = content.replace("$columns$", result)
content = content.replace("$imports$", imports)
content = content.replace("$manyToMany$", many_to_many)
# Verificando se o arquivo forms.py existe
if self._check_file(self.path_model_fastapi) is False:
# Criando o arquivo com o conteúdo da interpolação
with open(self.path_model_fastapi, 'w') as arquivo:
arquivo.write(content)
self.__apply_pep(self.path_model_fastapi)
return
# Verificando se já existe configuração no forms para o
# Models informado
if self.__check_content(
self.path_model_fastapi, "class {}".format(self.model)):
Utils.show_message("O model informado já possui model configurado.")
return
with open(self.path_model_fastapi, 'a') as schema:
schema.write("\n")
schema.write(content)
self.__apply_pep(self.path_model_fastapi)
except Exception as error:
Utils.show_message(f"Error in __manage_model: {error}", error=True)
def __manage_cruds(self):
"""Método responsável por criar/configurar o arquivo de cruds para a FastAPI """
try:
Utils.show_message("Trabalhando na configuração do Crud do model {}".format(self.model))
content = self.__get_snippet(Path(
f"{self.path_core}/management/commands/snippets/fastapi/cruds.txt"))
# Interpolando os dados
content = content.replace("$ModelClass$", self.model)
content = content.replace("$app$", self.app)
content = content.replace("$model$", self.model_lower)
# Verificando se o arquivo forms.py existe
if self._check_file(self.path_crud) is False:
# Criando o arquivo com o conteúdo da interpolação
with open(self.path_crud, 'w') as arquivo:
arquivo.write(content)
self.__apply_pep(self.path_crud)
return
# Verificando se já existe configuração no forms para o
# Models informado
if self.__check_content(
self.path_crud, "class {}".format(self.model)):
Utils.show_message("O model informado já possui schema configurado.")
return
with open(self.path_crud, 'a') as crud:
crud.write("\n")
crud.write(content)
self.__apply_pep(self.path_crud)
except Exception as error:
Utils.show_message(f"Error in __manage_crud: {error}", error=True)
def __manage_api(self):
"""Método responsável por criar/configurar o arquivo de cruds para a FastAPI """
try:
Utils.show_message("Trabalhando na configuração das Rotas do model {}".format(self.model))
content = self.__get_snippet(Path(
f"{self.path_core}/management/commands/snippets/fastapi/api.txt"))
# Interpolando os dados
content = content.replace("$ModelClass$", self.model)
content = content.replace("$app$", self.app)
content = content.replace("$model$", self.model_lower)
# Verificando se o arquivo forms.py existe
if self._check_file(self.path_api) is False:
# Criando o arquivo com o conteúdo da interpolação
with open(self.path_api, 'w') as arquivo:
arquivo.write(content)
self.__apply_pep(self.path_api)
return
# Verificando se já existe configuração no forms para o
# Models informado
if self.__check_content(
self.path_api, "class {}".format(self.model)):
Utils.show_message("O model informado já possui schema configurado.")
return
if self.__check_content(self.path_api,
"router = APIRouter()"):
content = content.replace("router = APIRouter()", "")
with open(self.path_api, 'a') as crud:
crud.write("\n")
crud.write(content)
self.__apply_pep(self.path_api)
except Exception as error:
Utils.show_message(f"Error in __manage_crud: {error}", error=True)
def call_methods(self, options):
"""
Método que identifica qual comando foi solicitado pelo usuário para ser executado, antes de chamar o método,
as entradas informadas pelo usuário são validadas, evitando erros de execução do programa devido à ausência de
parâmetros obrigatórios.
Por uma questão de padrão de projeto as possibilidades de escolha do pacote de gerência
de estados para o projeto Fastapi foram alteradas, agora todo projeto gerado utiliza como pacote de gerência
de estado o pacote o Cubit/Bloc
"""
# Verificando se foram passados parâmetros opcionais
if options['cruds']:
Utils.show_message("Trabalhando apenas os cruds.")
self.__manage_cruds()
return
elif options['api']:
Utils.show_message("Trabalhando apenas a api.")
self.__manage_api()
return
elif options['schemas']:
Utils.show_message("Trabalhando apenas os schemas.")
self.__manage_schema()
return
elif options['models']:
Utils.show_message("Trabalhando apenas os models.")
self.__manage_model()
return
else:
# Chamando o método para tratar os api
self.__manage_api()
# Chamando o método para tratar as schemas
self.__manage_schema()
# Chamando o método para tratar o models
self.__manage_model()
# Chamando o método para tratar as cruds
self.__manage_cruds()
return
def handle(self, *args, **options):
app = options["App"] or None
model = options["Model"] or None
if app is None and model is None:
Utils.show_message(
f"Você não informou uma APP para ser gerada.",
error=True)
return
if app and Utils.contain_number(app):
Utils.show_message(f"Nome da app contendo números")
return
# Removendo os espaços em branco
self.app = app.strip()
# Pegando o diretório absoluto atual do projeto.
self.path_root = os.path.normpath(os.getcwd() + os.sep)
# Criando o path para a APP informada.
self.path_app = os.path.join(self.fastapi_dir, app)
self.path_app_local = os.path.join(self.path_root, app)
# Criando o path para a APP Core.
self.path_core = os.path.join(self.BASE_DIR, "core")
# Criando o path para os models baseado no App informada.
self.path_model = os.path.join(self.path_app_local, "models.py")
# Convertendo os nomes para caracteres minúsculo.
# para serem usado nos locais que necessitem dos nomes
# em minúsculo.
self.app_lower = app.lower()
# Criando o path para os forms baseado na App informada.
self.path_schema= os.path.join(self.path_app, "schemas.py")
self.path_model_fastapi = os.path.join(self.path_app, "models.py")
self.path_crud = os.path.join(self.path_app, "cruds.py")
self.path_api = os.path.join(self.path_app, "api.py")
# Verificando se o diretório do fast informada existe
if self._check_dir(self.fastapi_dir) is False:
self.__init_fastapi()
# Verifica se app esta instalada, pois precisa dela
# para recuperar as instancias dos models
if apps.is_installed(self.app_lower) is False:
Utils.show_message(
"Você deve colocar sua app no INSTALLED_APPS do settings.")
return
if self._check_dir(self.path_app) is False:
self.__init_app(self.path_app)
# Criando uma instancia da app
self.app_instance = apps.get_app_config(self.app_lower)
# Verificando se o usuário passou o nome do model
if options['Model']:
model = options['Model'] or None
if Utils.contain_number(model) is False:
# Removendo os espaços em branco
self.model = model.strip()
# Verificando se existe no models.py o Model informado
if self.__check_content(
self.path_model,
'class {}'.format(self.model)) is False:
Utils.show_message("Model informado não encontrado.")
return
try:
# Verifica se o model está na app informada
# Se o model for abstract ela retornará uma exceção
# LookupError
self.app_instance.get_model(self.model)
Utils.show_message(
"Gerando arquivos para o model {}".format(self.model))
# Convertendo os nomes para caracteres minúsculo.
# para serem usado nos locais que necessitem dos nomes
# em minúsculo.
self.model_lower = model.lower()
self.call_methods(options)
Utils.show_message("Processo concluído.")
except LookupError:
Utils.show_message(
"Esse model é abastrato. "
"Não vão ser gerados os arquivos.")
else:
# recupera todos os models da app
# print(self.app_instance.get_models())
for model in self.app_instance.get_models():
model = model.__name__
# Removendo os espaços em branco
self.model = model.strip()
Utils.show_message(
"Gerando arquivos para o model {}".format(self.model))
# Convertendo os nomes para caracteres minúsculo.
# para serem usado nos locais que necessitem dos nomes
# em minúsculo.
self.model_lower = model.lower()
# Chama os métodos de geração de arquivos
self.call_methods(options)
Utils.show_message(
"Processo concluído para o model {}.".format(
self.model))
Utils.show_message("Processo concluído.")
return
| 47.644359 | 141 | 0.566017 | import os
import platform
import subprocess
import sys
from pathlib import Path
from core.management.commands.utils import Utils
from django.apps import apps
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = """Manager responsável por analisar as classes de modelos do projeto Django para gerar os arquivos
do projeto FastAPI correspondente às apps do Django"""
def __init__(self):
super().__init__()
self.path_root = os.getcwd()
self.path_core = os.path.join(self.BASE_DIR, "core")
self.operation_system = platform.system().lower()
self.project = 'fastapi'
self.fastapi_dir = os.path.join(self.BASE_DIR, '..', "fastapi")
self.fastapi_project = os.path.join(self.path_core, "management/commands/snippets/fastapi_project")
self.snippet_dir = "{}/{}".format(self.path_core, "management/commands/snippets/fastapi/")
self.current_app_model = None
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
_django_types = ["SmallAutoField", "AutoField", "BLANK_CHOICE_DASH", "BigAutoField", "BigIntegerField",
"BinaryField", "BooleanField", "CharField", "CommaSeparatedIntegerField", "DateField",
"DateTimeField", "DecimalField", "DurationField", "EmailField", "Empty", "FileField", "Field",
"FieldDoesNotExist", "FilePathField", "FloatField", "GenericIPAddressField", "IPAddressField",
"IntegerField", "FieldFile", "NOT_PROVIDED", "NullBooleanField", "ImageField",
"PositiveIntegerField", "PositiveSmallIntegerField", "SlugField", "SmallIntegerField", "TextField",
"TimeField", "URLField", "UUIDField", "ForeignKey", "OneToOneField"]
_schemas_types = ["int", "int", "BLANK_CHOICE_DASH", "int", "int", "str", "bool", "str", "str", "datetime.date",
"datetime.datetime", "float", "int", "EmailStr", "str", "str", "str", "str", "str", "float",
"str", "str", "int", "str", "str", "bool", "str", "int", "int", "str", "int",
"str", "DateTime", "str", "str", "int", "int",]
_models_types = ["Integer", "Integer", "BLANK_CHOICE_DASH", "Integer", "Integer", "String", "Boolean", "String", "String", "Date",
"Datetime", "Float", "Integer", "String", "String", "String", "String", "String", "String", "Float",
"String", "String", "Integer", "String", "String", "Boolean", "String", "Integer", "Integer", "String", "Integer",
"String", "DateTime", "String", "String", "Integer", "Integer", ]
def add_arguments(self, parser):
parser.add_argument("App", type=str, nargs="?")
parser.add_argument("Model", type=str, nargs="?")
parser.add_argument("--app", action="store_true", dest="app", help="Criar a App e seus models")
parser.add_argument("--app_model", action="store_true", dest="app_model",
help="Criar a App e o Model informado")
parser.add_argument(
'--schemas',
action='store_true',
dest='schemas',
help='Criar apenas os Schemas'
)
parser.add_argument(
'--api',
action='store_true',
dest='api',
help='Criar apenas as rotas da api'
)
parser.add_argument(
'--cruds',
action='store_true',
dest='cruds',
help='Criar apenas os cruds'
)
parser.add_argument(
'--models',
action='store_true',
dest='models',
help='Criar apenas os models'
)
def _check_dir(self, path) -> bool:
return Utils.check_dir(path)
def _check_file(self, path):
return Utils.check_file(path)
def __check_content(self, path, text_check):
return Utils.check_content(path, text_check)
def __ignore_base_fields(self, field):
try:
__ignore_fields = ["id", "deleted", "created_on", "updated_on" ]
return field in __ignore_fields
except Exception as error:
Utils.show_message(f"Error in __ignore_base_fields: {error}", error=True)
def __get_snippet(self, path=None, file_name=None, state_manager=False):
try:
if os.path.isfile(path):
with open(path, encoding="utf-8") as arquivo:
return arquivo.read()
except Exception as e:
Utils.show_message(f"Error in get_snippet {e}", error=True)
sys.exit()
def __init_fastapi(self):
try:
if not Utils.check_dir(self.fastapi_dir):
Utils.show_message("Criando o projeto Fastapi.")
print(self.fastapi_project)
__cmd_fastapi_create = "cp -R {} {}".format(self.fastapi_project, self.fastapi_dir)
subprocess.call(__cmd_fastapi_create, shell=True)
Utils.show_message("Projeto criado com sucesso.")
except Exception as error:
Utils.show_message(f"Error in __init_Fastapi: {error}", error=True)
def __init_app(self, app_path):
try:
if not Utils.check_dir(app_path):
Utils.show_message("Criando diretório da app")
os.makedirs(app_path)
Utils.show_message("Diretório criado com sucesso")
except Exception as error:
Utils.show_message(f"Error in __init_Fastapi: {error}", error=True)
def __apply_pep(self, path):
try:
os.system('autopep8 --in-place --aggressive --aggressive {}'.format(path))
os.system('isort {}'.format(path))
except Exception as error:
Utils.show_message(f"Ocorreu o erro : {error}")
pass
def __manage_schema(self):
try:
Utils.show_message("Trabalhando na configuração do Schema do model {}".format(self.model))
content = self.__get_snippet(Path(
f"{self.path_core}/management/commands/snippets/fastapi/schema.txt"))
content = content.replace("$ModelClass$", self.model)
model = self.app_instance.get_model(self.model)
fields = model._meta.fields
result = ''
for field in iter(fields):
item = {}
item["app"], item["model"], item["name"] = str(field).split('.')
item["type"] = (str(
str(type(field)).split('.')[-1:])
.replace("[\"", "").replace("\'>\"]", ""))
if item["type"] not in self._django_types:
print('Campo {} desconhecido'.format(field))
continue
if not self.__ignore_base_fields(item['name']):
attribute = self._schemas_types[self._django_types.index(item['type'])]
field_name = item.get('name')
if (getattr(field, 'null', None)):
attribute = f"Optional[{attribute}]"
if (field.get_default() is not None and field.get_default() != ""):
attribute += f" = {field.get_default()}"
if (item.get("type") in ('ForeignKey', 'OneToOneField')):
field_name = field.get_attname_column()[1]
result += f"\t {field_name}: {attribute}\n"
content = content.replace("$fields$", result)
# Verificando se o arquivo forms.py existe
if self._check_file(self.path_schema) is False:
# Criando o arquivo com o conteúdo da interpolação
with open(self.path_schema, 'w') as arquivo:
arquivo.write(content)
self.__apply_pep(self.path_schema)
return
# Verificando se já existe configuração no forms para o
# Models informado
if self.__check_content(
self.path_schema, "class {}".format(self.model)):
Utils.show_message("O model informado já possui schema configurado.")
return
with open(self.path_schema, 'a') as schema:
schema.write("\n")
schema.write(content)
self.__apply_pep(self.path_schema)
except Exception as error:
Utils.show_message(f"Error in __manage_schema: {error}", error=True)
def __manage_model(self):
try:
Utils.show_message("Trabalhando na configuração do Model do model {}".format(self.model))
content = self.__get_snippet(Path(
f"{self.path_core}/management/commands/snippets/fastapi/model.txt"))
# Interpolando os dados
content = content.replace("$ModelClass$", self.model)
model = self.app_instance.get_model(self.model)
content = content.replace("$table$", model._meta.db_table)
fields = model._meta.fields
related_fields = model._meta.many_to_many
result = ''
imports = ""
many_to_many = ""
for field in iter(fields):
item = {}
item["app"], item["model"], item["name"] = str(field).split('.')
item["type"] = (str(
str(type(field)).split('.')[-1:])
.replace("[\"", "").replace("\'>\"]", ""))
if item["type"] not in self._django_types:
print('Campo {} desconhecido'.format(field))
continue
if not self.__ignore_base_fields(item['name']):
attribute = self._models_types[self._django_types.index(item['type'])]
field_name = item.get('name')
relationship = None
if (field.max_length):
attribute += f"({field.max_length})"
if (item.get("type") in ('ForeignKey', 'OneToOneField')):
field_name = field.get_attname_column()[1]
__model = field.related_model._meta
attribute = f"ForeignKey('{__model.db_table}.id')"
if __model.app_label != item.get('app'):
imports += f"from {__model.app_label}.models import {__model.object_name}\n"
relationship = f"\t {item.get('name')} = relationship('{__model.object_name}')\n"
attribute = f"{attribute}, nullable={(getattr(field, 'null', None))}"
if (field.has_default()):
attribute += f" ,default={field.get_default()}"
if (field.unique):
attribute += f" ,unique={field.unique}"
result += f"\t {field_name} = Column({attribute})\n"
if relationship is not None:
result += relationship
for field in iter(related_fields):
item = {}
item["app"], item["model"], item["name"] = str(field).split('.')
item["type"] = (str(
str(type(field)).split('.')[-1:])
.replace("[\"", "").replace("\'>\"]", ""))
if (item.get("type") == "ManyToManyField"):
_model_name = field.model._meta.model_name
_app_name = field.model._meta.app_label
_related_model_name = field.related_model._meta.model_name
_related_model_app = field.related_model._meta.app_label
__model = field.related_model._meta
table = f"{item.get('app')}_{_model_name}_{field.related_model._meta.model_name}"
many_to_many += f"{table} = Table('{table}', Base.metadata,"
many_to_many += f"Column('id', Integer, primary_key=True, index=True),"
many_to_many += f"Column('{_model_name}_id', ForeignKey('{_app_name}_{_model_name}.id')),"
many_to_many += f"Column('{_related_model_name}_id', ForeignKey('{_related_model_app}_{_related_model_name}.id')))\n"
result += f"\t {item.get('name')} = relationship('{__model.object_name}', secondary={table})\n"
content = content.replace("$columns$", result)
content = content.replace("$imports$", imports)
content = content.replace("$manyToMany$", many_to_many)
# Verificando se o arquivo forms.py existe
if self._check_file(self.path_model_fastapi) is False:
# Criando o arquivo com o conteúdo da interpolação
with open(self.path_model_fastapi, 'w') as arquivo:
arquivo.write(content)
self.__apply_pep(self.path_model_fastapi)
return
# Verificando se já existe configuração no forms para o
# Models informado
if self.__check_content(
self.path_model_fastapi, "class {}".format(self.model)):
Utils.show_message("O model informado já possui model configurado.")
return
with open(self.path_model_fastapi, 'a') as schema:
schema.write("\n")
schema.write(content)
self.__apply_pep(self.path_model_fastapi)
except Exception as error:
Utils.show_message(f"Error in __manage_model: {error}", error=True)
def __manage_cruds(self):
try:
Utils.show_message("Trabalhando na configuração do Crud do model {}".format(self.model))
content = self.__get_snippet(Path(
f"{self.path_core}/management/commands/snippets/fastapi/cruds.txt"))
# Interpolando os dados
content = content.replace("$ModelClass$", self.model)
content = content.replace("$app$", self.app)
content = content.replace("$model$", self.model_lower)
# Verificando se o arquivo forms.py existe
if self._check_file(self.path_crud) is False:
# Criando o arquivo com o conteúdo da interpolação
with open(self.path_crud, 'w') as arquivo:
arquivo.write(content)
self.__apply_pep(self.path_crud)
return
# Verificando se já existe configuração no forms para o
# Models informado
if self.__check_content(
self.path_crud, "class {}".format(self.model)):
Utils.show_message("O model informado já possui schema configurado.")
return
with open(self.path_crud, 'a') as crud:
crud.write("\n")
crud.write(content)
self.__apply_pep(self.path_crud)
except Exception as error:
Utils.show_message(f"Error in __manage_crud: {error}", error=True)
def __manage_api(self):
try:
Utils.show_message("Trabalhando na configuração das Rotas do model {}".format(self.model))
content = self.__get_snippet(Path(
f"{self.path_core}/management/commands/snippets/fastapi/api.txt"))
# Interpolando os dados
content = content.replace("$ModelClass$", self.model)
content = content.replace("$app$", self.app)
content = content.replace("$model$", self.model_lower)
# Verificando se o arquivo forms.py existe
if self._check_file(self.path_api) is False:
# Criando o arquivo com o conteúdo da interpolação
with open(self.path_api, 'w') as arquivo:
arquivo.write(content)
self.__apply_pep(self.path_api)
return
# Verificando se já existe configuração no forms para o
# Models informado
if self.__check_content(
self.path_api, "class {}".format(self.model)):
Utils.show_message("O model informado já possui schema configurado.")
return
if self.__check_content(self.path_api,
"router = APIRouter()"):
content = content.replace("router = APIRouter()", "")
with open(self.path_api, 'a') as crud:
crud.write("\n")
crud.write(content)
self.__apply_pep(self.path_api)
except Exception as error:
Utils.show_message(f"Error in __manage_crud: {error}", error=True)
def call_methods(self, options):
# Verificando se foram passados parâmetros opcionais
if options['cruds']:
Utils.show_message("Trabalhando apenas os cruds.")
self.__manage_cruds()
return
elif options['api']:
Utils.show_message("Trabalhando apenas a api.")
self.__manage_api()
return
elif options['schemas']:
Utils.show_message("Trabalhando apenas os schemas.")
self.__manage_schema()
return
elif options['models']:
Utils.show_message("Trabalhando apenas os models.")
self.__manage_model()
return
else:
# Chamando o método para tratar os api
self.__manage_api()
# Chamando o método para tratar as schemas
self.__manage_schema()
# Chamando o método para tratar o models
self.__manage_model()
# Chamando o método para tratar as cruds
self.__manage_cruds()
return
def handle(self, *args, **options):
app = options["App"] or None
model = options["Model"] or None
if app is None and model is None:
Utils.show_message(
f"Você não informou uma APP para ser gerada.",
error=True)
return
if app and Utils.contain_number(app):
Utils.show_message(f"Nome da app contendo números")
return
# Removendo os espaços em branco
self.app = app.strip()
# Pegando o diretório absoluto atual do projeto.
self.path_root = os.path.normpath(os.getcwd() + os.sep)
# Criando o path para a APP informada.
self.path_app = os.path.join(self.fastapi_dir, app)
self.path_app_local = os.path.join(self.path_root, app)
# Criando o path para a APP Core.
self.path_core = os.path.join(self.BASE_DIR, "core")
# Criando o path para os models baseado no App informada.
self.path_model = os.path.join(self.path_app_local, "models.py")
# Convertendo os nomes para caracteres minúsculo.
# para serem usado nos locais que necessitem dos nomes
# em minúsculo.
self.app_lower = app.lower()
# Criando o path para os forms baseado na App informada.
self.path_schema= os.path.join(self.path_app, "schemas.py")
self.path_model_fastapi = os.path.join(self.path_app, "models.py")
self.path_crud = os.path.join(self.path_app, "cruds.py")
self.path_api = os.path.join(self.path_app, "api.py")
# Verificando se o diretório do fast informada existe
if self._check_dir(self.fastapi_dir) is False:
self.__init_fastapi()
# Verifica se app esta instalada, pois precisa dela
# para recuperar as instancias dos models
if apps.is_installed(self.app_lower) is False:
Utils.show_message(
"Você deve colocar sua app no INSTALLED_APPS do settings.")
return
if self._check_dir(self.path_app) is False:
self.__init_app(self.path_app)
# Criando uma instancia da app
self.app_instance = apps.get_app_config(self.app_lower)
# Verificando se o usuário passou o nome do model
if options['Model']:
model = options['Model'] or None
if Utils.contain_number(model) is False:
# Removendo os espaços em branco
self.model = model.strip()
# Verificando se existe no models.py o Model informado
if self.__check_content(
self.path_model,
'class {}'.format(self.model)) is False:
Utils.show_message("Model informado não encontrado.")
return
try:
# Verifica se o model está na app informada
# Se o model for abstract ela retornará uma exceção
# LookupError
self.app_instance.get_model(self.model)
Utils.show_message(
"Gerando arquivos para o model {}".format(self.model))
# Convertendo os nomes para caracteres minúsculo.
# para serem usado nos locais que necessitem dos nomes
# em minúsculo.
self.model_lower = model.lower()
self.call_methods(options)
Utils.show_message("Processo concluído.")
except LookupError:
Utils.show_message(
"Esse model é abastrato. "
"Não vão ser gerados os arquivos.")
else:
# recupera todos os models da app
# print(self.app_instance.get_models())
for model in self.app_instance.get_models():
model = model.__name__
# Removendo os espaços em branco
self.model = model.strip()
Utils.show_message(
"Gerando arquivos para o model {}".format(self.model))
# Convertendo os nomes para caracteres minúsculo.
# para serem usado nos locais que necessitem dos nomes
# em minúsculo.
self.model_lower = model.lower()
# Chama os métodos de geração de arquivos
self.call_methods(options)
Utils.show_message(
"Processo concluído para o model {}.".format(
self.model))
Utils.show_message("Processo concluído.")
return
| true | true |
f7399da1a6f2badfb0f8ee94970291153318b5fd | 1,572 | py | Python | j1939/message_id.py | rliebscher/python-can-j1939 | a53b410ab05edd5f6ccd16b6888f1d770dd0df18 | [
"MIT"
] | null | null | null | j1939/message_id.py | rliebscher/python-can-j1939 | a53b410ab05edd5f6ccd16b6888f1d770dd0df18 | [
"MIT"
] | null | null | null | j1939/message_id.py | rliebscher/python-can-j1939 | a53b410ab05edd5f6ccd16b6888f1d770dd0df18 | [
"MIT"
] | null | null | null |
class MessageId:
"""The CAN MessageId of an PDU.
The MessageId consists of three parts:
* Priority
* Parameter Group Number
* Source Address
"""
def __init__(self, **kwargs): #priority=0, parameter_group_number=0, source_address=0):
"""
:param priority:
3-bit Priority
:param parameter_group_number:
18-bit Parameter Group Number
:param source_address:
8-bit Source Address
There is a total of 253 addresses available and every address must
be unique within the network.
:param can_id:
A 29-bit CAN-Id the MessageId should be parsed from.
"""
if 'can_id' in kwargs:
# let the property can_id parse the given value
self.can_id = kwargs.get('can_id')
else:
self.priority = kwargs.get('priority', 0) & 7
self.parameter_group_number = kwargs.get('parameter_group_number', 0) & 0x3FFFF
self.source_address = kwargs.get('source_address', 0) & 0xFF
@property
def can_id(self):
"""Transforms the MessageId object to a 29 bit CAN-Id"""
return (self.priority << 26) | (self.parameter_group_number << 8) | (self.source_address)
@can_id.setter
def can_id(self, can_id):
"""Fill the MessageId with the information given in the 29 bit CAN-Id"""
self.source_address = can_id & 0xFF
self.parameter_group_number = (can_id >> 8) & 0x3FFFF
self.priority = (can_id >> 26) & 0x7
| 34.933333 | 97 | 0.60369 |
class MessageId:
def __init__(self, **kwargs):
if 'can_id' in kwargs:
self.can_id = kwargs.get('can_id')
else:
self.priority = kwargs.get('priority', 0) & 7
self.parameter_group_number = kwargs.get('parameter_group_number', 0) & 0x3FFFF
self.source_address = kwargs.get('source_address', 0) & 0xFF
@property
def can_id(self):
return (self.priority << 26) | (self.parameter_group_number << 8) | (self.source_address)
@can_id.setter
def can_id(self, can_id):
self.source_address = can_id & 0xFF
self.parameter_group_number = (can_id >> 8) & 0x3FFFF
self.priority = (can_id >> 26) & 0x7
| true | true |
f7399ef9fe1cd64a45630716948933386260b1aa | 4,612 | py | Python | igibson/examples/demo/generate_data_semseg_lidar.py | Nick-AhSen/iGibson | c6854f11eec5d935fa3ef3d6d4852c6571beab4b | [
"MIT"
] | null | null | null | igibson/examples/demo/generate_data_semseg_lidar.py | Nick-AhSen/iGibson | c6854f11eec5d935fa3ef3d6d4852c6571beab4b | [
"MIT"
] | null | null | null | igibson/examples/demo/generate_data_semseg_lidar.py | Nick-AhSen/iGibson | c6854f11eec5d935fa3ef3d6d4852c6571beab4b | [
"MIT"
] | null | null | null | import os
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import igibson
from igibson.envs.igibson_env import iGibsonEnv
def get_lidar_sampling_pattern():
lidar_vertical_low = -15 / 180.0 * np.pi
lidar_vertical_high = 15 / 180.0 * np.pi
lidar_vertical_n_beams = 16
lidar_vertical_beams = np.arange(
lidar_vertical_low,
lidar_vertical_high + (lidar_vertical_high - lidar_vertical_low) / (lidar_vertical_n_beams - 1),
(lidar_vertical_high - lidar_vertical_low) / (lidar_vertical_n_beams - 1),
)
lidar_horizontal_low = -45 / 180.0 * np.pi
lidar_horizontal_high = 45 / 180.0 * np.pi
lidar_horizontal_n_beams = 468
lidar_horizontal_beams = np.arange(
lidar_horizontal_low,
lidar_horizontal_high,
(lidar_horizontal_high - lidar_horizontal_low) / (lidar_horizontal_n_beams),
)
xx, yy = np.meshgrid(lidar_vertical_beams, lidar_horizontal_beams)
xx = xx.flatten()
yy = yy.flatten()
height = 128
x_samples = (np.tan(xx) / np.cos(yy) * height // 2 + height // 2).astype(np.int)
y_samples = (np.tan(yy) * height // 2 + height // 2).astype(np.int)
x_samples = x_samples.flatten()
y_samples = y_samples.flatten()
return x_samples, y_samples
x_samples, y_samples = get_lidar_sampling_pattern()
def generate_data_lidar(nav_env, num_samples=3):
rgb_all = []
lidar_all = []
lidar_all_2 = []
label_all = []
point = nav_env.scene.get_random_point()[1]
for _ in range(num_samples):
new_point = nav_env.scene.get_random_point()[1]
while np.linalg.norm(new_point - point) > 1:
new_point = nav_env.scene.get_random_point()[1]
delta_pos = new_point - point
delta_pos = np.array([delta_pos[1], delta_pos[2], delta_pos[0]])
# print(delta_pos)
nav_env.robots[0].set_position(new_point)
pano_rgb = nav_env.simulator.renderer.get_cube(mode="rgb", use_robot_camera=True)
pano_3d = nav_env.simulator.renderer.get_cube(mode="3d", use_robot_camera=True)
pano_seg = nav_env.simulator.renderer.get_cube(mode="seg", use_robot_camera=True)
r3 = np.array(
[[np.cos(-np.pi / 2), 0, -np.sin(-np.pi / 2)], [0, 1, 0], [np.sin(-np.pi / 2), 0, np.cos(-np.pi / 2)]]
)
transformatiom_matrix = np.eye(3)
for i in range(4):
lidar_all.append(pano_3d[i][:, :, :3].dot(transformatiom_matrix)[x_samples, y_samples] - delta_pos[None, :])
rgb_all.append(pano_rgb[i][:, :, :3][x_samples, y_samples])
label_all.append(pano_seg[i][:, :, 0][x_samples, y_samples] * 255.0)
lidar_all_2.append(
pano_3d[i][:, :, :3].dot(transformatiom_matrix)[x_samples, y_samples] * 0.9 - delta_pos[None, :]
)
transformatiom_matrix = r3.dot(transformatiom_matrix)
lidar_all = np.concatenate(lidar_all, 0).astype(np.float32)
lidar_all_2 = np.concatenate(lidar_all_2, 0).astype(np.float32)
rgb_all = np.concatenate(rgb_all, 0).astype(np.float32)
label_all = np.concatenate(label_all, 0).astype(np.int32)
assert len(label_all) == len(label_all)
direction = lidar_all - lidar_all_2
direction = direction / (np.linalg.norm(direction, axis=1)[:, None] + 1e-5)
print(lidar_all.shape, direction.shape, rgb_all.shape, label_all.shape)
return lidar_all, direction, rgb_all, label_all
def generate_data_from_scene(scene_id):
mode = "headless"
config = os.path.join(igibson.example_path, "configs/fetch_room_rearrangement.yaml")
nav_env = iGibsonEnv(
config_file=config, mode=mode, scene_id=scene_id, action_timestep=1.0 / 120.0, physics_timestep=1.0 / 120.0
)
# data = []
# for i in tqdm(range(5)):
# data.append(generate_data_lidar(nav_env))
# lidar_all = [item[0] for item in data]
# direction = [item[1] for item in data]
# rgb_all = [item[2] for item in data]
# label_all = [item[3] for item in data]
pts, direction, color, label = generate_data_lidar(nav_env)
fig = plt.figure()
ax = Axes3D(fig)
ax.scatter(pts[:, 0], pts[:, 2], pts[:, 1], s=3, c=color[:, :3])
plt.show()
# np.savez('/data2/point_cloud/data10_v2_{}.npz'.format(scene_id), lidar=lidar_all, direction=direction, rgb=rgb_all, label=label_all)
if __name__ == "__main__":
generate_data_from_scene("Rs_int")
# scenes = []
# with open('scene_list', 'r') as f:
# for line in f:
# scenes.append(line.strip())
# p = Pool(2)
# p.map(generate_data_from_scene, scenes)
| 34.676692 | 138 | 0.6585 | import os
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import igibson
from igibson.envs.igibson_env import iGibsonEnv
def get_lidar_sampling_pattern():
lidar_vertical_low = -15 / 180.0 * np.pi
lidar_vertical_high = 15 / 180.0 * np.pi
lidar_vertical_n_beams = 16
lidar_vertical_beams = np.arange(
lidar_vertical_low,
lidar_vertical_high + (lidar_vertical_high - lidar_vertical_low) / (lidar_vertical_n_beams - 1),
(lidar_vertical_high - lidar_vertical_low) / (lidar_vertical_n_beams - 1),
)
lidar_horizontal_low = -45 / 180.0 * np.pi
lidar_horizontal_high = 45 / 180.0 * np.pi
lidar_horizontal_n_beams = 468
lidar_horizontal_beams = np.arange(
lidar_horizontal_low,
lidar_horizontal_high,
(lidar_horizontal_high - lidar_horizontal_low) / (lidar_horizontal_n_beams),
)
xx, yy = np.meshgrid(lidar_vertical_beams, lidar_horizontal_beams)
xx = xx.flatten()
yy = yy.flatten()
height = 128
x_samples = (np.tan(xx) / np.cos(yy) * height // 2 + height // 2).astype(np.int)
y_samples = (np.tan(yy) * height // 2 + height // 2).astype(np.int)
x_samples = x_samples.flatten()
y_samples = y_samples.flatten()
return x_samples, y_samples
x_samples, y_samples = get_lidar_sampling_pattern()
def generate_data_lidar(nav_env, num_samples=3):
rgb_all = []
lidar_all = []
lidar_all_2 = []
label_all = []
point = nav_env.scene.get_random_point()[1]
for _ in range(num_samples):
new_point = nav_env.scene.get_random_point()[1]
while np.linalg.norm(new_point - point) > 1:
new_point = nav_env.scene.get_random_point()[1]
delta_pos = new_point - point
delta_pos = np.array([delta_pos[1], delta_pos[2], delta_pos[0]])
nav_env.robots[0].set_position(new_point)
pano_rgb = nav_env.simulator.renderer.get_cube(mode="rgb", use_robot_camera=True)
pano_3d = nav_env.simulator.renderer.get_cube(mode="3d", use_robot_camera=True)
pano_seg = nav_env.simulator.renderer.get_cube(mode="seg", use_robot_camera=True)
r3 = np.array(
[[np.cos(-np.pi / 2), 0, -np.sin(-np.pi / 2)], [0, 1, 0], [np.sin(-np.pi / 2), 0, np.cos(-np.pi / 2)]]
)
transformatiom_matrix = np.eye(3)
for i in range(4):
lidar_all.append(pano_3d[i][:, :, :3].dot(transformatiom_matrix)[x_samples, y_samples] - delta_pos[None, :])
rgb_all.append(pano_rgb[i][:, :, :3][x_samples, y_samples])
label_all.append(pano_seg[i][:, :, 0][x_samples, y_samples] * 255.0)
lidar_all_2.append(
pano_3d[i][:, :, :3].dot(transformatiom_matrix)[x_samples, y_samples] * 0.9 - delta_pos[None, :]
)
transformatiom_matrix = r3.dot(transformatiom_matrix)
lidar_all = np.concatenate(lidar_all, 0).astype(np.float32)
lidar_all_2 = np.concatenate(lidar_all_2, 0).astype(np.float32)
rgb_all = np.concatenate(rgb_all, 0).astype(np.float32)
label_all = np.concatenate(label_all, 0).astype(np.int32)
assert len(label_all) == len(label_all)
direction = lidar_all - lidar_all_2
direction = direction / (np.linalg.norm(direction, axis=1)[:, None] + 1e-5)
print(lidar_all.shape, direction.shape, rgb_all.shape, label_all.shape)
return lidar_all, direction, rgb_all, label_all
def generate_data_from_scene(scene_id):
mode = "headless"
config = os.path.join(igibson.example_path, "configs/fetch_room_rearrangement.yaml")
nav_env = iGibsonEnv(
config_file=config, mode=mode, scene_id=scene_id, action_timestep=1.0 / 120.0, physics_timestep=1.0 / 120.0
)
pts, direction, color, label = generate_data_lidar(nav_env)
fig = plt.figure()
ax = Axes3D(fig)
ax.scatter(pts[:, 0], pts[:, 2], pts[:, 1], s=3, c=color[:, :3])
plt.show()
if __name__ == "__main__":
generate_data_from_scene("Rs_int")
| true | true |
f7399fbb5415937ee67219af20b12806764dc87c | 14,431 | py | Python | salt/cloud/libcloudfuncs.py | lyft/salt | 2715908423a412f736253d0e5d3cfe185a0179a2 | [
"Apache-2.0"
] | 3 | 2015-04-16T18:42:35.000Z | 2017-10-30T16:57:49.000Z | salt/salt/cloud/libcloudfuncs.py | smallyear/linuxLearn | 342e5020bf24b5fac732c4275a512087b47e578d | [
"Apache-2.0"
] | 16 | 2015-11-18T00:44:03.000Z | 2018-10-29T20:48:27.000Z | salt/salt/cloud/libcloudfuncs.py | smallyear/linuxLearn | 342e5020bf24b5fac732c4275a512087b47e578d | [
"Apache-2.0"
] | 1 | 2017-01-27T21:33:36.000Z | 2017-01-27T21:33:36.000Z | # -*- coding: utf-8 -*-
'''
The generic libcloud template used to create the connections and deploy the
cloud virtual machines
'''
from __future__ import absolute_import
# Import python libs
import os
import logging
from salt.ext.six import string_types
import salt.ext.six as six
from salt.ext.six.moves import zip
# pylint: disable=W0611
# Import libcloud
try:
import libcloud
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.compute.deployment import (
MultiStepDeployment,
ScriptDeployment
)
HAS_LIBCLOUD = True
LIBCLOUD_VERSION_INFO = tuple([
int(part) for part in libcloud.__version__.replace('-', '.').split('.')[:3]
])
except ImportError:
HAS_LIBCLOUD = False
LIBCLOUD_VERSION_INFO = (1000,)
# pylint: enable=W0611
# Import salt libs
import salt.utils.event
import salt.client
# Import salt cloud libs
import salt.utils
import salt.utils.cloud
import salt.config as config
from salt.exceptions import SaltCloudNotFound, SaltCloudSystemExit
# Get logging started
log = logging.getLogger(__name__)
LIBCLOUD_MINIMAL_VERSION = (0, 14, 0)
def node_state(id_):
'''
Libcloud supported node states
'''
states = {0: 'RUNNING',
1: 'REBOOTING',
2: 'TERMINATED',
3: 'PENDING',
4: 'UNKNOWN',
5: 'STOPPED',
6: 'SUSPENDED',
7: 'ERROR',
8: 'PAUSED'}
return states[id_]
def check_libcloud_version(reqver=LIBCLOUD_MINIMAL_VERSION, why=None):
'''
Compare different libcloud versions
'''
if not HAS_LIBCLOUD:
return False
if not isinstance(reqver, (list, tuple)):
raise RuntimeError(
'\'reqver\' needs to passed as a tuple or list, i.e., (0, 14, 0)'
)
try:
import libcloud # pylint: disable=redefined-outer-name
except ImportError:
raise ImportError(
'salt-cloud requires >= libcloud {0} which is not installed'.format(
'.'.join([str(num) for num in reqver])
)
)
if LIBCLOUD_VERSION_INFO >= reqver:
return libcloud.__version__
errormsg = 'Your version of libcloud is {0}. '.format(libcloud.__version__)
errormsg += 'salt-cloud requires >= libcloud {0}'.format(
'.'.join([str(num) for num in reqver])
)
if why:
errormsg += ' for {0}'.format(why)
errormsg += '. Please upgrade.'
raise ImportError(errormsg)
def get_node(conn, name):
'''
Return a libcloud node for the named VM
'''
nodes = conn.list_nodes()
for node in nodes:
if node.name == name:
salt.utils.cloud.cache_node(salt.utils.cloud.simple_types_filter(node.__dict__), __active_provider_name__, __opts__)
return node
def avail_locations(conn=None, call=None):
'''
Return a dict of all available VM locations on the cloud provider with
relevant data
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_locations function must be called with '
'-f or --function, or with the --list-locations option'
)
if not conn:
conn = get_conn() # pylint: disable=E0602
locations = conn.list_locations()
ret = {}
for img in locations:
if isinstance(img.name, string_types):
img_name = img.name.encode('ascii', 'salt-cloud-force-ascii')
else:
img_name = str(img.name)
ret[img_name] = {}
for attr in dir(img):
if attr.startswith('_'):
continue
attr_value = getattr(img, attr)
if isinstance(attr_value, string_types):
attr_value = attr_value.encode(
'ascii', 'salt-cloud-force-ascii'
)
ret[img_name][attr] = attr_value
return ret
def avail_images(conn=None, call=None):
'''
Return a dict of all available VM images on the cloud provider with
relevant data
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_images function must be called with '
'-f or --function, or with the --list-images option'
)
if not conn:
conn = get_conn() # pylint: disable=E0602
images = conn.list_images()
ret = {}
for img in images:
if isinstance(img.name, string_types):
img_name = img.name.encode('ascii', 'salt-cloud-force-ascii')
else:
img_name = str(img.name)
ret[img_name] = {}
for attr in dir(img):
if attr.startswith('_'):
continue
attr_value = getattr(img, attr)
if isinstance(attr_value, string_types):
attr_value = attr_value.encode(
'ascii', 'salt-cloud-force-ascii'
)
ret[img_name][attr] = attr_value
return ret
def avail_sizes(conn=None, call=None):
'''
Return a dict of all available VM images on the cloud provider with
relevant data
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_sizes function must be called with '
'-f or --function, or with the --list-sizes option'
)
if not conn:
conn = get_conn() # pylint: disable=E0602
sizes = conn.list_sizes()
ret = {}
for size in sizes:
if isinstance(size.name, string_types):
size_name = size.name.encode('ascii', 'salt-cloud-force-ascii')
else:
size_name = str(size.name)
ret[size_name] = {}
for attr in dir(size):
if attr.startswith('_'):
continue
try:
attr_value = getattr(size, attr)
except Exception:
pass
if isinstance(attr_value, string_types):
attr_value = attr_value.encode(
'ascii', 'salt-cloud-force-ascii'
)
ret[size_name][attr] = attr_value
return ret
def get_location(conn, vm_):
'''
Return the location object to use
'''
locations = conn.list_locations()
vm_location = config.get_cloud_config_value('location', vm_, __opts__).encode(
'ascii', 'salt-cloud-force-ascii'
)
for img in locations:
if isinstance(img.id, string_types):
img_id = img.id.encode('ascii', 'salt-cloud-force-ascii')
else:
img_id = str(img.id)
if isinstance(img.name, string_types):
img_name = img.name.encode('ascii', 'salt-cloud-force-ascii')
else:
img_name = str(img.name)
if vm_location and vm_location in (img_id, img_name):
return img
raise SaltCloudNotFound(
'The specified location, {0!r}, could not be found.'.format(
vm_location
)
)
def get_image(conn, vm_):
'''
Return the image object to use
'''
images = conn.list_images()
vm_image = config.get_cloud_config_value('image', vm_, __opts__).encode(
'ascii', 'salt-cloud-force-ascii'
)
for img in images:
if isinstance(img.id, string_types):
img_id = img.id.encode('ascii', 'salt-cloud-force-ascii')
else:
img_id = str(img.id)
if isinstance(img.name, string_types):
img_name = img.name.encode('ascii', 'salt-cloud-force-ascii')
else:
img_name = str(img.name)
if vm_image and vm_image in (img_id, img_name):
return img
raise SaltCloudNotFound(
'The specified image, {0!r}, could not be found.'.format(vm_image)
)
def get_size(conn, vm_):
'''
Return the VM's size object
'''
sizes = conn.list_sizes()
vm_size = config.get_cloud_config_value('size', vm_, __opts__)
if not vm_size:
return sizes[0]
for size in sizes:
if vm_size and str(vm_size) in (str(size.id), str(size.name)):
return size
raise SaltCloudNotFound(
'The specified size, {0!r}, could not be found.'.format(vm_size)
)
def script(vm_):
'''
Return the script deployment object
'''
return ScriptDeployment(
salt.utils.cloud.os_script(
config.get_cloud_config_value('os', vm_, __opts__),
vm_,
__opts__,
salt.utils.cloud.salt_config_to_yaml(
salt.utils.cloud.minion_config(__opts__, vm_)
)
)
)
def destroy(name, conn=None, call=None):
'''
Delete a single VM
'''
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
salt.utils.cloud.fire_event(
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
{'name': name},
transport=__opts__['transport']
)
if not conn:
conn = get_conn() # pylint: disable=E0602
node = get_node(conn, name)
profiles = get_configured_provider()['profiles'] # pylint: disable=E0602
if node is None:
log.error('Unable to find the VM {0}'.format(name))
profile = None
if 'metadata' in node.extra and 'profile' in node.extra['metadata']:
profile = node.extra['metadata']['profile']
flush_mine_on_destroy = False
if profile and profile in profiles and 'flush_mine_on_destroy' in profiles[profile]:
flush_mine_on_destroy = profiles[profile]['flush_mine_on_destroy']
if flush_mine_on_destroy:
log.info('Clearing Salt Mine: {0}'.format(name))
mopts_ = salt.config.DEFAULT_MINION_OPTS
conf_path = '/'.join(__opts__['conf_file'].split('/')[:-1])
mopts_.update(
salt.config.minion_config(os.path.join(conf_path, 'minion'))
)
client = salt.client.get_local_client(mopts_)
minions = client.cmd(name, 'mine.flush')
log.info('Clearing Salt Mine: {0}, {1}'.format(name, flush_mine_on_destroy))
log.info('Destroying VM: {0}'.format(name))
ret = conn.destroy_node(node)
if ret:
log.info('Destroyed VM: {0}'.format(name))
# Fire destroy action
salt.utils.cloud.fire_event(
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
{'name': name},
transport=__opts__['transport']
)
if __opts__['delete_sshkeys'] is True:
public_ips = getattr(node, __opts__.get('ssh_interface', 'public_ips'))
if public_ips:
salt.utils.cloud.remove_sshkey(public_ips[0])
private_ips = getattr(node, __opts__.get('ssh_interface', 'private_ips'))
if private_ips:
salt.utils.cloud.remove_sshkey(private_ips[0])
if __opts__.get('update_cachedir', False) is True:
salt.utils.cloud.delete_minion_cachedir(name, __active_provider_name__.split(':')[0], __opts__)
return True
log.error('Failed to Destroy VM: {0}'.format(name))
return False
def reboot(name, conn=None):
'''
Reboot a single VM
'''
if not conn:
conn = get_conn() # pylint: disable=E0602
node = get_node(conn, name)
if node is None:
log.error('Unable to find the VM {0}'.format(name))
log.info('Rebooting VM: {0}'.format(name))
ret = conn.reboot_node(node)
if ret:
log.info('Rebooted VM: {0}'.format(name))
# Fire reboot action
salt.utils.cloud.fire_event(
'event',
'{0} has been rebooted'.format(name), 'salt-cloud'
'salt/cloud/{0}/rebooting'.format(name),
{'name': name},
transport=__opts__['transport']
)
return True
log.error('Failed to reboot VM: {0}'.format(name))
return False
def list_nodes(conn=None, call=None):
'''
Return a list of the VMs that are on the provider
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes function must be called with -f or --function.'
)
if not conn:
conn = get_conn() # pylint: disable=E0602
nodes = conn.list_nodes()
ret = {}
for node in nodes:
ret[node.name] = {
'id': node.id,
'image': node.image,
'name': node.name,
'private_ips': node.private_ips,
'public_ips': node.public_ips,
'size': node.size,
'state': node_state(node.state)
}
return ret
def list_nodes_full(conn=None, call=None):
'''
Return a list of the VMs that are on the provider, with all fields
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_full function must be called with -f or --function.'
)
if not conn:
conn = get_conn() # pylint: disable=E0602
nodes = conn.list_nodes()
ret = {}
for node in nodes:
pairs = {}
for key, value in zip(node.__dict__, six.itervalues(node.__dict__)):
pairs[key] = value
ret[node.name] = pairs
del ret[node.name]['driver']
salt.utils.cloud.cache_node_list(ret, __active_provider_name__.split(':')[0], __opts__)
return ret
def list_nodes_select(conn=None, call=None):
'''
Return a list of the VMs that are on the provider, with select fields
'''
if not conn:
conn = get_conn() # pylint: disable=E0602
return salt.utils.cloud.list_nodes_select(
list_nodes_full(conn, 'function'), __opts__['query.selection'], call,
)
def show_instance(name, call=None):
'''
Show the details from the provider concerning an instance
'''
if call != 'action':
raise SaltCloudSystemExit(
'The show_instance action must be called with -a or --action.'
)
nodes = list_nodes_full()
salt.utils.cloud.cache_node(nodes[name], __active_provider_name__, __opts__)
return nodes[name]
def conn_has_method(conn, method_name):
'''
Find if the provided connection object has a specific method
'''
if method_name in dir(conn):
return True
log.error(
'Method {0!r} not yet supported!'.format(
method_name
)
)
return False
| 28.185547 | 128 | 0.59095 |
from __future__ import absolute_import
import os
import logging
from salt.ext.six import string_types
import salt.ext.six as six
from salt.ext.six.moves import zip
try:
import libcloud
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.compute.deployment import (
MultiStepDeployment,
ScriptDeployment
)
HAS_LIBCLOUD = True
LIBCLOUD_VERSION_INFO = tuple([
int(part) for part in libcloud.__version__.replace('-', '.').split('.')[:3]
])
except ImportError:
HAS_LIBCLOUD = False
LIBCLOUD_VERSION_INFO = (1000,)
import salt.utils.event
import salt.client
import salt.utils
import salt.utils.cloud
import salt.config as config
from salt.exceptions import SaltCloudNotFound, SaltCloudSystemExit
log = logging.getLogger(__name__)
LIBCLOUD_MINIMAL_VERSION = (0, 14, 0)
def node_state(id_):
states = {0: 'RUNNING',
1: 'REBOOTING',
2: 'TERMINATED',
3: 'PENDING',
4: 'UNKNOWN',
5: 'STOPPED',
6: 'SUSPENDED',
7: 'ERROR',
8: 'PAUSED'}
return states[id_]
def check_libcloud_version(reqver=LIBCLOUD_MINIMAL_VERSION, why=None):
if not HAS_LIBCLOUD:
return False
if not isinstance(reqver, (list, tuple)):
raise RuntimeError(
'\'reqver\' needs to passed as a tuple or list, i.e., (0, 14, 0)'
)
try:
import libcloud
except ImportError:
raise ImportError(
'salt-cloud requires >= libcloud {0} which is not installed'.format(
'.'.join([str(num) for num in reqver])
)
)
if LIBCLOUD_VERSION_INFO >= reqver:
return libcloud.__version__
errormsg = 'Your version of libcloud is {0}. '.format(libcloud.__version__)
errormsg += 'salt-cloud requires >= libcloud {0}'.format(
'.'.join([str(num) for num in reqver])
)
if why:
errormsg += ' for {0}'.format(why)
errormsg += '. Please upgrade.'
raise ImportError(errormsg)
def get_node(conn, name):
nodes = conn.list_nodes()
for node in nodes:
if node.name == name:
salt.utils.cloud.cache_node(salt.utils.cloud.simple_types_filter(node.__dict__), __active_provider_name__, __opts__)
return node
def avail_locations(conn=None, call=None):
if call == 'action':
raise SaltCloudSystemExit(
'The avail_locations function must be called with '
'-f or --function, or with the --list-locations option'
)
if not conn:
conn = get_conn()
locations = conn.list_locations()
ret = {}
for img in locations:
if isinstance(img.name, string_types):
img_name = img.name.encode('ascii', 'salt-cloud-force-ascii')
else:
img_name = str(img.name)
ret[img_name] = {}
for attr in dir(img):
if attr.startswith('_'):
continue
attr_value = getattr(img, attr)
if isinstance(attr_value, string_types):
attr_value = attr_value.encode(
'ascii', 'salt-cloud-force-ascii'
)
ret[img_name][attr] = attr_value
return ret
def avail_images(conn=None, call=None):
if call == 'action':
raise SaltCloudSystemExit(
'The avail_images function must be called with '
'-f or --function, or with the --list-images option'
)
if not conn:
conn = get_conn()
images = conn.list_images()
ret = {}
for img in images:
if isinstance(img.name, string_types):
img_name = img.name.encode('ascii', 'salt-cloud-force-ascii')
else:
img_name = str(img.name)
ret[img_name] = {}
for attr in dir(img):
if attr.startswith('_'):
continue
attr_value = getattr(img, attr)
if isinstance(attr_value, string_types):
attr_value = attr_value.encode(
'ascii', 'salt-cloud-force-ascii'
)
ret[img_name][attr] = attr_value
return ret
def avail_sizes(conn=None, call=None):
if call == 'action':
raise SaltCloudSystemExit(
'The avail_sizes function must be called with '
'-f or --function, or with the --list-sizes option'
)
if not conn:
conn = get_conn()
sizes = conn.list_sizes()
ret = {}
for size in sizes:
if isinstance(size.name, string_types):
size_name = size.name.encode('ascii', 'salt-cloud-force-ascii')
else:
size_name = str(size.name)
ret[size_name] = {}
for attr in dir(size):
if attr.startswith('_'):
continue
try:
attr_value = getattr(size, attr)
except Exception:
pass
if isinstance(attr_value, string_types):
attr_value = attr_value.encode(
'ascii', 'salt-cloud-force-ascii'
)
ret[size_name][attr] = attr_value
return ret
def get_location(conn, vm_):
locations = conn.list_locations()
vm_location = config.get_cloud_config_value('location', vm_, __opts__).encode(
'ascii', 'salt-cloud-force-ascii'
)
for img in locations:
if isinstance(img.id, string_types):
img_id = img.id.encode('ascii', 'salt-cloud-force-ascii')
else:
img_id = str(img.id)
if isinstance(img.name, string_types):
img_name = img.name.encode('ascii', 'salt-cloud-force-ascii')
else:
img_name = str(img.name)
if vm_location and vm_location in (img_id, img_name):
return img
raise SaltCloudNotFound(
'The specified location, {0!r}, could not be found.'.format(
vm_location
)
)
def get_image(conn, vm_):
images = conn.list_images()
vm_image = config.get_cloud_config_value('image', vm_, __opts__).encode(
'ascii', 'salt-cloud-force-ascii'
)
for img in images:
if isinstance(img.id, string_types):
img_id = img.id.encode('ascii', 'salt-cloud-force-ascii')
else:
img_id = str(img.id)
if isinstance(img.name, string_types):
img_name = img.name.encode('ascii', 'salt-cloud-force-ascii')
else:
img_name = str(img.name)
if vm_image and vm_image in (img_id, img_name):
return img
raise SaltCloudNotFound(
'The specified image, {0!r}, could not be found.'.format(vm_image)
)
def get_size(conn, vm_):
sizes = conn.list_sizes()
vm_size = config.get_cloud_config_value('size', vm_, __opts__)
if not vm_size:
return sizes[0]
for size in sizes:
if vm_size and str(vm_size) in (str(size.id), str(size.name)):
return size
raise SaltCloudNotFound(
'The specified size, {0!r}, could not be found.'.format(vm_size)
)
def script(vm_):
return ScriptDeployment(
salt.utils.cloud.os_script(
config.get_cloud_config_value('os', vm_, __opts__),
vm_,
__opts__,
salt.utils.cloud.salt_config_to_yaml(
salt.utils.cloud.minion_config(__opts__, vm_)
)
)
)
def destroy(name, conn=None, call=None):
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
salt.utils.cloud.fire_event(
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
{'name': name},
transport=__opts__['transport']
)
if not conn:
conn = get_conn()
node = get_node(conn, name)
profiles = get_configured_provider()['profiles']
if node is None:
log.error('Unable to find the VM {0}'.format(name))
profile = None
if 'metadata' in node.extra and 'profile' in node.extra['metadata']:
profile = node.extra['metadata']['profile']
flush_mine_on_destroy = False
if profile and profile in profiles and 'flush_mine_on_destroy' in profiles[profile]:
flush_mine_on_destroy = profiles[profile]['flush_mine_on_destroy']
if flush_mine_on_destroy:
log.info('Clearing Salt Mine: {0}'.format(name))
mopts_ = salt.config.DEFAULT_MINION_OPTS
conf_path = '/'.join(__opts__['conf_file'].split('/')[:-1])
mopts_.update(
salt.config.minion_config(os.path.join(conf_path, 'minion'))
)
client = salt.client.get_local_client(mopts_)
minions = client.cmd(name, 'mine.flush')
log.info('Clearing Salt Mine: {0}, {1}'.format(name, flush_mine_on_destroy))
log.info('Destroying VM: {0}'.format(name))
ret = conn.destroy_node(node)
if ret:
log.info('Destroyed VM: {0}'.format(name))
salt.utils.cloud.fire_event(
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
{'name': name},
transport=__opts__['transport']
)
if __opts__['delete_sshkeys'] is True:
public_ips = getattr(node, __opts__.get('ssh_interface', 'public_ips'))
if public_ips:
salt.utils.cloud.remove_sshkey(public_ips[0])
private_ips = getattr(node, __opts__.get('ssh_interface', 'private_ips'))
if private_ips:
salt.utils.cloud.remove_sshkey(private_ips[0])
if __opts__.get('update_cachedir', False) is True:
salt.utils.cloud.delete_minion_cachedir(name, __active_provider_name__.split(':')[0], __opts__)
return True
log.error('Failed to Destroy VM: {0}'.format(name))
return False
def reboot(name, conn=None):
if not conn:
conn = get_conn()
node = get_node(conn, name)
if node is None:
log.error('Unable to find the VM {0}'.format(name))
log.info('Rebooting VM: {0}'.format(name))
ret = conn.reboot_node(node)
if ret:
log.info('Rebooted VM: {0}'.format(name))
salt.utils.cloud.fire_event(
'event',
'{0} has been rebooted'.format(name), 'salt-cloud'
'salt/cloud/{0}/rebooting'.format(name),
{'name': name},
transport=__opts__['transport']
)
return True
log.error('Failed to reboot VM: {0}'.format(name))
return False
def list_nodes(conn=None, call=None):
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes function must be called with -f or --function.'
)
if not conn:
conn = get_conn()
nodes = conn.list_nodes()
ret = {}
for node in nodes:
ret[node.name] = {
'id': node.id,
'image': node.image,
'name': node.name,
'private_ips': node.private_ips,
'public_ips': node.public_ips,
'size': node.size,
'state': node_state(node.state)
}
return ret
def list_nodes_full(conn=None, call=None):
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_full function must be called with -f or --function.'
)
if not conn:
conn = get_conn()
nodes = conn.list_nodes()
ret = {}
for node in nodes:
pairs = {}
for key, value in zip(node.__dict__, six.itervalues(node.__dict__)):
pairs[key] = value
ret[node.name] = pairs
del ret[node.name]['driver']
salt.utils.cloud.cache_node_list(ret, __active_provider_name__.split(':')[0], __opts__)
return ret
def list_nodes_select(conn=None, call=None):
if not conn:
conn = get_conn()
return salt.utils.cloud.list_nodes_select(
list_nodes_full(conn, 'function'), __opts__['query.selection'], call,
)
def show_instance(name, call=None):
if call != 'action':
raise SaltCloudSystemExit(
'The show_instance action must be called with -a or --action.'
)
nodes = list_nodes_full()
salt.utils.cloud.cache_node(nodes[name], __active_provider_name__, __opts__)
return nodes[name]
def conn_has_method(conn, method_name):
if method_name in dir(conn):
return True
log.error(
'Method {0!r} not yet supported!'.format(
method_name
)
)
return False
| true | true |
f739a06af2228d7ae09588fa651ee5d70db09994 | 7,994 | py | Python | face_lib/sort.py | Spain-AI/dark_helper | c2a5d774b455b2a374d6ca5e2715f7a560f5fe5b | [
"Apache-2.0"
] | null | null | null | face_lib/sort.py | Spain-AI/dark_helper | c2a5d774b455b2a374d6ca5e2715f7a560f5fe5b | [
"Apache-2.0"
] | 8 | 2020-11-13T18:59:55.000Z | 2022-03-12T00:39:43.000Z | face_lib/sort.py | Spain-AI/dark_helper | c2a5d774b455b2a374d6ca5e2715f7a560f5fe5b | [
"Apache-2.0"
] | 1 | 2020-07-10T19:16:37.000Z | 2020-07-10T19:16:37.000Z | """
SORT: A Simple, Online and Realtime Tracker
Copyright (C) 2016-2020 Alex Bewley alex@bewley.ai
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from __future__ import print_function
import os
import numpy as np
from filterpy.kalman import KalmanFilter
try:
from numba import jit
except:
def jit(func):
return func
np.random.seed(0)
def linear_assignment(cost_matrix):
try:
import lap
_, x, y = lap.lapjv(cost_matrix, extend_cost=True)
return np.array([[y[i],i] for i in x if i >= 0]) #
except ImportError:
from scipy.optimize import linear_sum_assignment
x, y = linear_sum_assignment(cost_matrix)
return np.array(list(zip(x, y)))
@jit
def iou(bb_test, bb_gt):
"""
Computes IUO between two bboxes in the form [x1,y1,x2,y2]
"""
xx1 = np.maximum(bb_test[0], bb_gt[0])
yy1 = np.maximum(bb_test[1], bb_gt[1])
xx2 = np.minimum(bb_test[2], bb_gt[2])
yy2 = np.minimum(bb_test[3], bb_gt[3])
w = np.maximum(0., xx2 - xx1)
h = np.maximum(0., yy2 - yy1)
wh = w * h
o = wh / ((bb_test[2] - bb_test[0]) * (bb_test[3] - bb_test[1])
+ (bb_gt[2] - bb_gt[0]) * (bb_gt[3] - bb_gt[1]) - wh)
return(o)
def convert_bbox_to_z(bbox):
"""
Takes a bounding box in the form [x1,y1,x2,y2] and returns z in the form
[x,y,s,r] where x,y is the centre of the box and s is the scale/area and r is
the aspect ratio
"""
w = bbox[2] - bbox[0]
h = bbox[3] - bbox[1]
x = bbox[0] + w/2.
y = bbox[1] + h/2.
s = w * h #scale is just area
r = w / float(h)
return np.array([x, y, s, r]).reshape((4, 1))
def convert_x_to_bbox(x,score=None):
"""
Takes a bounding box in the centre form [x,y,s,r] and returns it in the form
[x1,y1,x2,y2] where x1,y1 is the top left and x2,y2 is the bottom right
"""
w = np.sqrt(x[2] * x[3])
h = x[2] / w
if(score==None):
return np.array([x[0]-w/2.,x[1]-h/2.,x[0]+w/2.,x[1]+h/2.]).reshape((1,4))
else:
return np.array([x[0]-w/2.,x[1]-h/2.,x[0]+w/2.,x[1]+h/2.,score]).reshape((1,5))
class KalmanBoxTracker(object):
"""
This class represents the internal state of individual tracked objects observed as bbox.
"""
count = 0
def __init__(self,bbox,emb):
"""
Initialises a tracker using initial bounding box.
"""
#define constant velocity model
self.kf = KalmanFilter(dim_x=7, dim_z=4)
self.kf.F = np.array([[1,0,0,0,1,0,0],[0,1,0,0,0,1,0],[0,0,1,0,0,0,1],[0,0,0,1,0,0,0], [0,0,0,0,1,0,0],[0,0,0,0,0,1,0],[0,0,0,0,0,0,1]])
self.kf.H = np.array([[1,0,0,0,0,0,0],[0,1,0,0,0,0,0],[0,0,1,0,0,0,0],[0,0,0,1,0,0,0]])
self.kf.R[2:,2:] *= 10.
self.kf.P[4:,4:] *= 1000. #give high uncertainty to the unobservable initial velocities
self.kf.P *= 10.
self.kf.Q[-1,-1] *= 0.01
self.kf.Q[4:,4:] *= 0.01
self.kf.x[:4] = convert_bbox_to_z(bbox)
self.time_since_update = 0
self.id = KalmanBoxTracker.count
KalmanBoxTracker.count += 1
self.history = []
self.hits = 0
self.hit_streak = 0
self.age = 0
self.emb = emb
def update(self, bbox, emb):
"""
Updates the state vector with observed bbox.
"""
self.time_since_update = 0
self.history = []
self.hits += 1
self.hit_streak += 1
self.kf.update(convert_bbox_to_z(bbox))
self.emb = 0.2 * emb + 0.8 * self.emb
def predict(self):
"""
Advances the state vector and returns the predicted bounding box estimate.
"""
if((self.kf.x[6]+self.kf.x[2])<=0):
self.kf.x[6] *= 0.0
self.kf.predict()
self.age += 1
self.time_since_update += 1
self.history.append(convert_x_to_bbox(self.kf.x))
return self.history[-1]
def get_state(self):
"""
Returns the current bounding box estimate.
"""
return convert_x_to_bbox(self.kf.x)
def associate_detections_to_trackers(detections,trackers,iou_threshold = 0.3):
"""
Assigns detections to tracked object (both represented as bounding boxes)
Returns 3 lists of matches, unmatched_detections and unmatched_trackers
"""
if(len(trackers)==0):
return np.empty((0,2),dtype=int), np.arange(len(detections)), np.empty((0,5),dtype=int)
iou_matrix = np.zeros((len(detections),len(trackers)),dtype=np.float32)
for d,det in enumerate(detections):
for t,trk in enumerate(trackers):
iou_matrix[d,t] = iou(det, trk)
if min(iou_matrix.shape) > 0:
a = (iou_matrix > iou_threshold).astype(np.int32)
if a.sum(1).max() == 1 and a.sum(0).max() == 1:
matched_indices = np.stack(np.where(a), axis=1)
else:
matched_indices = linear_assignment(-iou_matrix)
else:
matched_indices = np.empty(shape=(0,2))
unmatched_detections = []
for d, det in enumerate(detections):
if(d not in matched_indices[:,0]):
unmatched_detections.append(d)
unmatched_trackers = []
for t, trk in enumerate(trackers):
if(t not in matched_indices[:,1]):
unmatched_trackers.append(t)
#filter out matched with low IOU
matches = []
for m in matched_indices:
if(iou_matrix[m[0], m[1]]<iou_threshold):
unmatched_detections.append(m[0])
unmatched_trackers.append(m[1])
else:
matches.append(m.reshape(1,2))
if(len(matches)==0):
matches = np.empty((0,2),dtype=int)
else:
matches = np.concatenate(matches,axis=0)
return matches, np.array(unmatched_detections), np.array(unmatched_trackers)
class Sort(object):
def __init__(self, max_age=15, min_hits=3):
"""
Sets key parameters for SORT
"""
self.max_age = max_age
self.min_hits = min_hits
self.trackers = []
self.frame_count = 0
def update(self, dets=np.empty((0, 4)), embs=None):
"""
Params:
dets - a numpy array of detections in the format [[x1,y1,x2,y2],[x1,y1,x2,y2],...]
Requires: this method must be called once for each frame even with empty detections (use np.empty((0, 5)) for frames without detections).
Returns the a similar array, where the last column is the object ID.
NOTE: The number of objects returned may differ from the number of detections provided.
"""
self.frame_count += 1
# get predicted locations from existing trackers.
trks = np.zeros((len(self.trackers), 4))
to_del = []
ret = []
for t, trk in enumerate(trks):
pos = self.trackers[t].predict()[0]
trk[:] = [pos[0], pos[1], pos[2], pos[3]]
if np.any(np.isnan(pos)):
to_del.append(t)
trks = np.ma.compress_rows(np.ma.masked_invalid(trks))
for t in reversed(to_del):
self.trackers.pop(t)
matched, unmatched_dets, unmatched_trks = associate_detections_to_trackers(dets,trks)
# update matched trackers with assigned detections
for m in matched:
self.trackers[m[1]].update(dets[m[0], :], embs[m[0]])
# create and initialise new trackers for unmatched detections
for i in unmatched_dets:
trk = KalmanBoxTracker(dets[i,:], embs[i])
self.trackers.append(trk)
i = len(self.trackers)
for trk in reversed(self.trackers):
d = trk.get_state()[0]
if (trk.hit_streak >= self.min_hits or self.frame_count <= self.min_hits):
ret.append((np.concatenate((d, [trk.id + 1])), trk.emb)) # +1 as MOT benchmark requires positive
i -= 1
# remove dead tracklet
if(trk.time_since_update > self.max_age):
self.trackers.pop(i)
if(len(ret)>0):
return ret
return [] | 31.848606 | 141 | 0.641856 | from __future__ import print_function
import os
import numpy as np
from filterpy.kalman import KalmanFilter
try:
from numba import jit
except:
def jit(func):
return func
np.random.seed(0)
def linear_assignment(cost_matrix):
try:
import lap
_, x, y = lap.lapjv(cost_matrix, extend_cost=True)
return np.array([[y[i],i] for i in x if i >= 0])
except ImportError:
from scipy.optimize import linear_sum_assignment
x, y = linear_sum_assignment(cost_matrix)
return np.array(list(zip(x, y)))
@jit
def iou(bb_test, bb_gt):
xx1 = np.maximum(bb_test[0], bb_gt[0])
yy1 = np.maximum(bb_test[1], bb_gt[1])
xx2 = np.minimum(bb_test[2], bb_gt[2])
yy2 = np.minimum(bb_test[3], bb_gt[3])
w = np.maximum(0., xx2 - xx1)
h = np.maximum(0., yy2 - yy1)
wh = w * h
o = wh / ((bb_test[2] - bb_test[0]) * (bb_test[3] - bb_test[1])
+ (bb_gt[2] - bb_gt[0]) * (bb_gt[3] - bb_gt[1]) - wh)
return(o)
def convert_bbox_to_z(bbox):
w = bbox[2] - bbox[0]
h = bbox[3] - bbox[1]
x = bbox[0] + w/2.
y = bbox[1] + h/2.
s = w * h
r = w / float(h)
return np.array([x, y, s, r]).reshape((4, 1))
def convert_x_to_bbox(x,score=None):
w = np.sqrt(x[2] * x[3])
h = x[2] / w
if(score==None):
return np.array([x[0]-w/2.,x[1]-h/2.,x[0]+w/2.,x[1]+h/2.]).reshape((1,4))
else:
return np.array([x[0]-w/2.,x[1]-h/2.,x[0]+w/2.,x[1]+h/2.,score]).reshape((1,5))
class KalmanBoxTracker(object):
count = 0
def __init__(self,bbox,emb):
self.kf = KalmanFilter(dim_x=7, dim_z=4)
self.kf.F = np.array([[1,0,0,0,1,0,0],[0,1,0,0,0,1,0],[0,0,1,0,0,0,1],[0,0,0,1,0,0,0], [0,0,0,0,1,0,0],[0,0,0,0,0,1,0],[0,0,0,0,0,0,1]])
self.kf.H = np.array([[1,0,0,0,0,0,0],[0,1,0,0,0,0,0],[0,0,1,0,0,0,0],[0,0,0,1,0,0,0]])
self.kf.R[2:,2:] *= 10.
self.kf.P[4:,4:] *= 1000.
self.kf.P *= 10.
self.kf.Q[-1,-1] *= 0.01
self.kf.Q[4:,4:] *= 0.01
self.kf.x[:4] = convert_bbox_to_z(bbox)
self.time_since_update = 0
self.id = KalmanBoxTracker.count
KalmanBoxTracker.count += 1
self.history = []
self.hits = 0
self.hit_streak = 0
self.age = 0
self.emb = emb
def update(self, bbox, emb):
self.time_since_update = 0
self.history = []
self.hits += 1
self.hit_streak += 1
self.kf.update(convert_bbox_to_z(bbox))
self.emb = 0.2 * emb + 0.8 * self.emb
def predict(self):
if((self.kf.x[6]+self.kf.x[2])<=0):
self.kf.x[6] *= 0.0
self.kf.predict()
self.age += 1
self.time_since_update += 1
self.history.append(convert_x_to_bbox(self.kf.x))
return self.history[-1]
def get_state(self):
return convert_x_to_bbox(self.kf.x)
def associate_detections_to_trackers(detections,trackers,iou_threshold = 0.3):
if(len(trackers)==0):
return np.empty((0,2),dtype=int), np.arange(len(detections)), np.empty((0,5),dtype=int)
iou_matrix = np.zeros((len(detections),len(trackers)),dtype=np.float32)
for d,det in enumerate(detections):
for t,trk in enumerate(trackers):
iou_matrix[d,t] = iou(det, trk)
if min(iou_matrix.shape) > 0:
a = (iou_matrix > iou_threshold).astype(np.int32)
if a.sum(1).max() == 1 and a.sum(0).max() == 1:
matched_indices = np.stack(np.where(a), axis=1)
else:
matched_indices = linear_assignment(-iou_matrix)
else:
matched_indices = np.empty(shape=(0,2))
unmatched_detections = []
for d, det in enumerate(detections):
if(d not in matched_indices[:,0]):
unmatched_detections.append(d)
unmatched_trackers = []
for t, trk in enumerate(trackers):
if(t not in matched_indices[:,1]):
unmatched_trackers.append(t)
matches = []
for m in matched_indices:
if(iou_matrix[m[0], m[1]]<iou_threshold):
unmatched_detections.append(m[0])
unmatched_trackers.append(m[1])
else:
matches.append(m.reshape(1,2))
if(len(matches)==0):
matches = np.empty((0,2),dtype=int)
else:
matches = np.concatenate(matches,axis=0)
return matches, np.array(unmatched_detections), np.array(unmatched_trackers)
class Sort(object):
def __init__(self, max_age=15, min_hits=3):
self.max_age = max_age
self.min_hits = min_hits
self.trackers = []
self.frame_count = 0
def update(self, dets=np.empty((0, 4)), embs=None):
self.frame_count += 1
trks = np.zeros((len(self.trackers), 4))
to_del = []
ret = []
for t, trk in enumerate(trks):
pos = self.trackers[t].predict()[0]
trk[:] = [pos[0], pos[1], pos[2], pos[3]]
if np.any(np.isnan(pos)):
to_del.append(t)
trks = np.ma.compress_rows(np.ma.masked_invalid(trks))
for t in reversed(to_del):
self.trackers.pop(t)
matched, unmatched_dets, unmatched_trks = associate_detections_to_trackers(dets,trks)
for m in matched:
self.trackers[m[1]].update(dets[m[0], :], embs[m[0]])
for i in unmatched_dets:
trk = KalmanBoxTracker(dets[i,:], embs[i])
self.trackers.append(trk)
i = len(self.trackers)
for trk in reversed(self.trackers):
d = trk.get_state()[0]
if (trk.hit_streak >= self.min_hits or self.frame_count <= self.min_hits):
ret.append((np.concatenate((d, [trk.id + 1])), trk.emb))
i -= 1
if(trk.time_since_update > self.max_age):
self.trackers.pop(i)
if(len(ret)>0):
return ret
return [] | true | true |
f739a070c21ba2c681957348bca4279bc2086a5e | 8,998 | py | Python | utils/data_iterators/cifar100.py | ashishgaurav13/cl_safer_classifiers | a3df87a4bc863377485fa58a8a475991a4fc9800 | [
"MIT"
] | 1 | 2020-07-03T06:51:19.000Z | 2020-07-03T06:51:19.000Z | utils/data_iterators/cifar100.py | ashishgaurav13/cl_safer_classifiers | a3df87a4bc863377485fa58a8a475991a4fc9800 | [
"MIT"
] | 3 | 2021-06-08T20:55:17.000Z | 2022-03-12T00:14:23.000Z | utils/data_iterators/cifar100.py | ashishgaurav13/cl_safer_classifiers | a3df87a4bc863377485fa58a8a475991a4fc9800 | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
import os
from tensorflow.keras.models import load_model
from tensorflow.keras.backend import clear_session
from keras.utils import to_categorical
import tensorflow.keras as keras
from .common import save_pickle, load_pickle
from tqdm import tqdm
# utils/data_iterators/cifar100_ResNet44v1_model.171.h5 => flatten
# utils/data_iterators/cifar100_ResNet44v1_model.171.h5 => activation_42
class CIFAR100_DataIterator:
def __init__(self, train_data, test_data, batch_size = 32,
randomize = True, task_labels = None,
embedding_save_file = 'utils/data_iterators/cifar100_embedding.pkl',
embedding_model_file = 'utils/data_iterators/cifar100_ResNet44v1_model.171.h5',
embedding_model_layer = 'activation_42'): # 'flatten'):
assert(task_labels != None)
self.train_x, self.train_y = train_data
self.n = len(self.train_y)
print('Training examples = %d' % self.n)
self.test_x, self.test_y = test_data
self.tn = len(self.test_y)
print('Test examples = %d' % self.tn)
self.i = 0
self.batch_size = batch_size
print('Batch size = %d' % self.batch_size)
self.randomize = randomize
if randomize:
idx = np.random.permutation(self.n)
self.train_x = self.train_x[idx]
self.train_y = self.train_y[idx]
print('Shuffled training data')
self.orig_data = (np.copy(self.train_x), np.copy(self.train_y),
np.copy(self.test_x), np.copy(self.test_y))
self.embedding_save_file = embedding_save_file
self.embedding_model_file = embedding_model_file
self.embedding_model_layer = embedding_model_layer
self.reshape_dims = (64*8*8,) # (64,)
self.convert_to_embeddings()
self.n_tasks = len(task_labels)
self.task_labels = task_labels
self.n_labels_per_task = len(task_labels[0])
for t in self.task_labels: assert(len(t) == self.n_labels_per_task)
self.get_taskwise_data()
self.switch_task(0)
def img_fn_cifar100(img):
image = np.zeros((32,32,3), dtype=np.uint8)
image[...,0] = np.reshape(img[:1024], (32,32)) # Red channel
image[...,1] = np.reshape(img[1024:2048], (32,32)) # Green channel
image[...,2] = np.reshape(img[2048:], (32,32)) # Blue channel
return image
self.img_fn = img_fn_cifar100
def iterative_fn(self, fn, dataset, batches = 100):
ret = []
n = dataset.shape[0]
per_batch_size = n // batches
for i in tqdm(range(batches)):
if i+1 != batches:
ret += [fn(dataset[i*per_batch_size:(i+1)*per_batch_size])]
else:
ret += [fn(dataset[i*per_batch_size:])]
ret = np.vstack(ret)
return ret
def convert_to_embeddings(self):
if os.path.isfile(self.embedding_save_file):
print('Embedding file %s exists, skipping embedding generation.'
% self.embedding_save_file)
self.etrain_x, self.etest_x = load_pickle(self.embedding_save_file)
else:
assert(os.path.isfile(self.embedding_model_file))
model = load_model(self.embedding_model_file)
print("Loaded model: %s" % self.embedding_model_file)
train_x = self.train_x.astype('float32') / 255
train_x_mean = np.mean(train_x, axis = 0)
train_x -= train_x_mean
test_x = self.test_x.astype('float32') / 255
test_x -= train_x_mean
results = model.evaluate(test_x, to_categorical(self.test_y))
print("Test acc: %s" % results)
intermediate_layer = model.\
get_layer(self.embedding_model_layer).output
embedding_model = keras.Model(
inputs = model.input, outputs = intermediate_layer)
assert(len(self.reshape_dims) == 1)
dim = self.reshape_dims[0]
fn = lambda x: np.reshape(embedding_model.predict(x), [-1, dim])
self.etrain_x = self.iterative_fn(fn, train_x)
self.etest_x = self.iterative_fn(fn, test_x)
save_pickle([self.etrain_x, self.etest_x],
savefile = self.embedding_save_file)
clear_session()
print('Loaded embeddings.')
# Remap class labels eg. 33,2,4 => 0, 1, 2
def remap(self, x, classnums):
# print(x)
x = np.squeeze(x)
# curr_labels = np.unique(x)
# new_labels = {label: i for i, label in enumerate(curr_labels)}
new_labels = {label: i for i, label in enumerate(classnums)}
x_remapped = np.copy(x)
for i in range(x.shape[0]):
x_remapped[i] = new_labels[x[i]]
# print(np.unique(x), np.unique(x_remapped))
return x_remapped, new_labels
def get_taskwise_data(self):
self.tasks = {}
for i in range(self.n_tasks):
self.tasks[i] = {}
class_nums = self.task_labels[i]
tr_indices = np.array([np.where(self.train_y == class_num)[0] for \
class_num in class_nums]).flatten()
test_indices = np.array([np.where(self.test_y == class_num)[0] for \
class_num in class_nums]).flatten()
self.tasks[i]['train_x'] = self.etrain_x[tr_indices]
self.tasks[i]['img_train_x'] = self.train_x[tr_indices]
self.tasks[i]['train_y'], tr_labels = self.remap(self.train_y[tr_indices], class_nums)
self.tasks[i]['n'] = len(tr_indices)
if self.randomize:
idx = np.random.permutation(self.tasks[i]['n'])
self.tasks[i]['train_x'] = self.tasks[i]['train_x'][idx]
self.tasks[i]['img_train_x'] = self.tasks[i]['img_train_x'][idx]
self.tasks[i]['train_y'] = self.tasks[i]['train_y'][idx]
self.tasks[i]['test_x'] = self.etest_x[test_indices]
self.tasks[i]['img_test_x'] = self.test_x[test_indices]
self.tasks[i]['test_y'], test_labels = self.remap(self.test_y[test_indices], class_nums)
self.tasks[i]['tn'] = len(test_indices)
if self.randomize:
idx = np.random.permutation(self.tasks[i]['tn'])
self.tasks[i]['test_x'] = self.tasks[i]['test_x'][idx]
self.tasks[i]['img_test_x'] = self.tasks[i]['img_test_x'][idx]
self.tasks[i]['test_y'] = self.tasks[i]['test_y'][idx]
assert(tr_labels == test_labels)
def switch_task(self, new_task_idx):
assert(0 <= new_task_idx < self.n_tasks)
self.curr_idx = new_task_idx
self.n = self.tasks[self.curr_idx]['n']
self.tn = self.tasks[self.curr_idx]['tn']
self.train_x = self.tasks[self.curr_idx]['train_x']
self.img_train_x = self.tasks[self.curr_idx]['img_train_x']
self.train_y = np.squeeze(self.tasks[self.curr_idx]['train_y'])
self.test_x = self.tasks[self.curr_idx]['test_x']
self.img_test_x = self.tasks[self.curr_idx]['img_test_x']
self.test_y = np.squeeze(self.tasks[self.curr_idx]['test_y'])
# print('switch to %d: %s' % (new_task_idx, np.unique(self.test_y)))
def inspect(self):
print('inspect')
r, c = self.n_tasks, self.n_labels_per_task
xw = min(15, c)
yw = max(1.5*r, 10)
fig = plt.figure(figsize = (xw, yw))
subplot_i = 0
for task in range(self.n_tasks):
self.switch_task(task)
classes_to_show = np.unique(self.test_y)
all_indices = [np.where(self.test_y == class_num)[0] for class_num in classes_to_show]
n_ex = [len(item) for item in all_indices]
example_indices = [np.random.choice(item) for item in all_indices]
examples = self.img_test_x[example_indices]
for i, img_idx in enumerate(classes_to_show):
ax = fig.add_subplot(r, c, subplot_i+1)
ax.set_xticks(())
ax.set_yticks(())
label_human_readable = str(img_idx) # TODO
img = examples[img_idx]
ax.set_xlabel(label_human_readable)
plt.imshow(img, cmap='gray', interpolation='none')
subplot_i += 1
# plt.tight_layout(True)
plt.savefig("inspect.png")
plt.show()
def __iter__(self):
return self
def __next__(self):
if self.i+self.batch_size > self.n:
self.i = 0
ret_data = self.train_x[self.i:self.i+self.batch_size]
ret_labels = self.train_y[self.i:self.i+self.batch_size]
self.i += self.batch_size
return ret_data, ret_labels
def test(self, samples = 32):
idx = np.random.choice(self.tn, size = samples, replace = False)
return self.test_x[idx], self.test_y[idx] | 43.892683 | 100 | 0.599355 | import numpy as np
import matplotlib.pyplot as plt
import os
from tensorflow.keras.models import load_model
from tensorflow.keras.backend import clear_session
from keras.utils import to_categorical
import tensorflow.keras as keras
from .common import save_pickle, load_pickle
from tqdm import tqdm
class CIFAR100_DataIterator:
def __init__(self, train_data, test_data, batch_size = 32,
randomize = True, task_labels = None,
embedding_save_file = 'utils/data_iterators/cifar100_embedding.pkl',
embedding_model_file = 'utils/data_iterators/cifar100_ResNet44v1_model.171.h5',
embedding_model_layer = 'activation_42'):
assert(task_labels != None)
self.train_x, self.train_y = train_data
self.n = len(self.train_y)
print('Training examples = %d' % self.n)
self.test_x, self.test_y = test_data
self.tn = len(self.test_y)
print('Test examples = %d' % self.tn)
self.i = 0
self.batch_size = batch_size
print('Batch size = %d' % self.batch_size)
self.randomize = randomize
if randomize:
idx = np.random.permutation(self.n)
self.train_x = self.train_x[idx]
self.train_y = self.train_y[idx]
print('Shuffled training data')
self.orig_data = (np.copy(self.train_x), np.copy(self.train_y),
np.copy(self.test_x), np.copy(self.test_y))
self.embedding_save_file = embedding_save_file
self.embedding_model_file = embedding_model_file
self.embedding_model_layer = embedding_model_layer
self.reshape_dims = (64*8*8,)
self.convert_to_embeddings()
self.n_tasks = len(task_labels)
self.task_labels = task_labels
self.n_labels_per_task = len(task_labels[0])
for t in self.task_labels: assert(len(t) == self.n_labels_per_task)
self.get_taskwise_data()
self.switch_task(0)
def img_fn_cifar100(img):
image = np.zeros((32,32,3), dtype=np.uint8)
image[...,0] = np.reshape(img[:1024], (32,32))
image[...,1] = np.reshape(img[1024:2048], (32,32))
image[...,2] = np.reshape(img[2048:], (32,32))
return image
self.img_fn = img_fn_cifar100
def iterative_fn(self, fn, dataset, batches = 100):
ret = []
n = dataset.shape[0]
per_batch_size = n // batches
for i in tqdm(range(batches)):
if i+1 != batches:
ret += [fn(dataset[i*per_batch_size:(i+1)*per_batch_size])]
else:
ret += [fn(dataset[i*per_batch_size:])]
ret = np.vstack(ret)
return ret
def convert_to_embeddings(self):
if os.path.isfile(self.embedding_save_file):
print('Embedding file %s exists, skipping embedding generation.'
% self.embedding_save_file)
self.etrain_x, self.etest_x = load_pickle(self.embedding_save_file)
else:
assert(os.path.isfile(self.embedding_model_file))
model = load_model(self.embedding_model_file)
print("Loaded model: %s" % self.embedding_model_file)
train_x = self.train_x.astype('float32') / 255
train_x_mean = np.mean(train_x, axis = 0)
train_x -= train_x_mean
test_x = self.test_x.astype('float32') / 255
test_x -= train_x_mean
results = model.evaluate(test_x, to_categorical(self.test_y))
print("Test acc: %s" % results)
intermediate_layer = model.\
get_layer(self.embedding_model_layer).output
embedding_model = keras.Model(
inputs = model.input, outputs = intermediate_layer)
assert(len(self.reshape_dims) == 1)
dim = self.reshape_dims[0]
fn = lambda x: np.reshape(embedding_model.predict(x), [-1, dim])
self.etrain_x = self.iterative_fn(fn, train_x)
self.etest_x = self.iterative_fn(fn, test_x)
save_pickle([self.etrain_x, self.etest_x],
savefile = self.embedding_save_file)
clear_session()
print('Loaded embeddings.')
def remap(self, x, classnums):
x = np.squeeze(x)
new_labels = {label: i for i, label in enumerate(classnums)}
x_remapped = np.copy(x)
for i in range(x.shape[0]):
x_remapped[i] = new_labels[x[i]]
return x_remapped, new_labels
def get_taskwise_data(self):
self.tasks = {}
for i in range(self.n_tasks):
self.tasks[i] = {}
class_nums = self.task_labels[i]
tr_indices = np.array([np.where(self.train_y == class_num)[0] for \
class_num in class_nums]).flatten()
test_indices = np.array([np.where(self.test_y == class_num)[0] for \
class_num in class_nums]).flatten()
self.tasks[i]['train_x'] = self.etrain_x[tr_indices]
self.tasks[i]['img_train_x'] = self.train_x[tr_indices]
self.tasks[i]['train_y'], tr_labels = self.remap(self.train_y[tr_indices], class_nums)
self.tasks[i]['n'] = len(tr_indices)
if self.randomize:
idx = np.random.permutation(self.tasks[i]['n'])
self.tasks[i]['train_x'] = self.tasks[i]['train_x'][idx]
self.tasks[i]['img_train_x'] = self.tasks[i]['img_train_x'][idx]
self.tasks[i]['train_y'] = self.tasks[i]['train_y'][idx]
self.tasks[i]['test_x'] = self.etest_x[test_indices]
self.tasks[i]['img_test_x'] = self.test_x[test_indices]
self.tasks[i]['test_y'], test_labels = self.remap(self.test_y[test_indices], class_nums)
self.tasks[i]['tn'] = len(test_indices)
if self.randomize:
idx = np.random.permutation(self.tasks[i]['tn'])
self.tasks[i]['test_x'] = self.tasks[i]['test_x'][idx]
self.tasks[i]['img_test_x'] = self.tasks[i]['img_test_x'][idx]
self.tasks[i]['test_y'] = self.tasks[i]['test_y'][idx]
assert(tr_labels == test_labels)
def switch_task(self, new_task_idx):
assert(0 <= new_task_idx < self.n_tasks)
self.curr_idx = new_task_idx
self.n = self.tasks[self.curr_idx]['n']
self.tn = self.tasks[self.curr_idx]['tn']
self.train_x = self.tasks[self.curr_idx]['train_x']
self.img_train_x = self.tasks[self.curr_idx]['img_train_x']
self.train_y = np.squeeze(self.tasks[self.curr_idx]['train_y'])
self.test_x = self.tasks[self.curr_idx]['test_x']
self.img_test_x = self.tasks[self.curr_idx]['img_test_x']
self.test_y = np.squeeze(self.tasks[self.curr_idx]['test_y'])
def inspect(self):
print('inspect')
r, c = self.n_tasks, self.n_labels_per_task
xw = min(15, c)
yw = max(1.5*r, 10)
fig = plt.figure(figsize = (xw, yw))
subplot_i = 0
for task in range(self.n_tasks):
self.switch_task(task)
classes_to_show = np.unique(self.test_y)
all_indices = [np.where(self.test_y == class_num)[0] for class_num in classes_to_show]
n_ex = [len(item) for item in all_indices]
example_indices = [np.random.choice(item) for item in all_indices]
examples = self.img_test_x[example_indices]
for i, img_idx in enumerate(classes_to_show):
ax = fig.add_subplot(r, c, subplot_i+1)
ax.set_xticks(())
ax.set_yticks(())
label_human_readable = str(img_idx)
img = examples[img_idx]
ax.set_xlabel(label_human_readable)
plt.imshow(img, cmap='gray', interpolation='none')
subplot_i += 1
plt.savefig("inspect.png")
plt.show()
def __iter__(self):
return self
def __next__(self):
if self.i+self.batch_size > self.n:
self.i = 0
ret_data = self.train_x[self.i:self.i+self.batch_size]
ret_labels = self.train_y[self.i:self.i+self.batch_size]
self.i += self.batch_size
return ret_data, ret_labels
def test(self, samples = 32):
idx = np.random.choice(self.tn, size = samples, replace = False)
return self.test_x[idx], self.test_y[idx] | true | true |
f739a1518b03b04ccd669f466a9ce6a25ca6b6d6 | 354 | py | Python | b2share/records/__init__.py | hjhsalo/b2share-new | 2a2a961f7cc3a5353850e9a409fd7e879c715b0b | [
"MIT"
] | null | null | null | b2share/records/__init__.py | hjhsalo/b2share-new | 2a2a961f7cc3a5353850e9a409fd7e879c715b0b | [
"MIT"
] | null | null | null | b2share/records/__init__.py | hjhsalo/b2share-new | 2a2a961f7cc3a5353850e9a409fd7e879c715b0b | [
"MIT"
] | 1 | 2020-09-29T10:56:03.000Z | 2020-09-29T10:56:03.000Z | # -*- coding: utf-8 -*-
#
# Copyright (C) 2020 EUDAT.
#
# B2SHARE is free software; you can redistribute it and/or modify it under
# the terms of the MIT License; see LICENSE file for more details.
"""EUDAT Collaborative Data Infrastructure."""
from __future__ import absolute_import, print_function
from .ext import B2SHARE
__all__ = ('B2SHARE', )
| 22.125 | 74 | 0.728814 |
from __future__ import absolute_import, print_function
from .ext import B2SHARE
__all__ = ('B2SHARE', )
| true | true |
f739a270319b2d8d31892c372cc46ea5a17414c5 | 128 | py | Python | cma_es/__init__.py | rimmartin/cctbx_project | 644090f9432d9afc22cfb542fc3ab78ca8e15e5d | [
"BSD-3-Clause-LBNL"
] | null | null | null | cma_es/__init__.py | rimmartin/cctbx_project | 644090f9432d9afc22cfb542fc3ab78ca8e15e5d | [
"BSD-3-Clause-LBNL"
] | null | null | null | cma_es/__init__.py | rimmartin/cctbx_project | 644090f9432d9afc22cfb542fc3ab78ca8e15e5d | [
"BSD-3-Clause-LBNL"
] | null | null | null | from __future__ import division
import boost.python
cma_es_ext = boost.python.import_ext("cma_es_ext")
from cma_es_ext import *
| 25.6 | 50 | 0.828125 | from __future__ import division
import boost.python
cma_es_ext = boost.python.import_ext("cma_es_ext")
from cma_es_ext import *
| true | true |
f739a2dbd892235c33c043932a06a72ce6ceade8 | 1,395 | py | Python | c42_csr2postman/models/interfaces.py | 42c-presales/42c-report-scan-to-postman | 1d9a4965d54d2fd28944367894896f2ace9c74e9 | [
"Apache-2.0"
] | 1 | 2021-12-30T21:20:35.000Z | 2021-12-30T21:20:35.000Z | c42_csr2postman/models/interfaces.py | 42c-presales/42c-report-scan-to-postman | 1d9a4965d54d2fd28944367894896f2ace9c74e9 | [
"Apache-2.0"
] | null | null | null | c42_csr2postman/models/interfaces.py | 42c-presales/42c-report-scan-to-postman | 1d9a4965d54d2fd28944367894896f2ace9c74e9 | [
"Apache-2.0"
] | null | null | null | from enum import Enum
#
# This code was taken from:
# https://gist.github.com/cr0hn/89172938b7ac42c3100f4980ad881a24
#
class Serializable:
def _clean_dict_(self,
data = None,
clean_or_raw: str = "clean") -> dict:
# DICT
if type(data) is dict:
ret = {}
for x, y in data.items():
if x.startswith("raw") and clean_or_raw == "clean":
continue
ret[x] = self._clean_dict_(y, clean_or_raw=clean_or_raw)
return ret
# LIST
elif type(data) is list:
ret = []
for d in data:
ret.append(self._clean_dict_(d, clean_or_raw=clean_or_raw))
return ret
elif hasattr(data, "clean_dict"):
return data.clean_dict(clean_or_raw=clean_or_raw)
elif isinstance(data, Enum):
return data.value
else:
if hasattr(data, "decode"):
return data.decode()
return data
def clean_dict(self,
clean_or_raw: str = "clean") -> dict:
"""removes fields 'raw' from content"""
return self._clean_dict_(self.__dict__, clean_or_raw=clean_or_raw)
def raw_dict(self) -> dict:
"""Dumps all content to valid json file"""
return self.clean_dict(clean_or_raw="raw")
| 23.644068 | 75 | 0.542652 | from enum import Enum
class Serializable:
def _clean_dict_(self,
data = None,
clean_or_raw: str = "clean") -> dict:
if type(data) is dict:
ret = {}
for x, y in data.items():
if x.startswith("raw") and clean_or_raw == "clean":
continue
ret[x] = self._clean_dict_(y, clean_or_raw=clean_or_raw)
return ret
elif type(data) is list:
ret = []
for d in data:
ret.append(self._clean_dict_(d, clean_or_raw=clean_or_raw))
return ret
elif hasattr(data, "clean_dict"):
return data.clean_dict(clean_or_raw=clean_or_raw)
elif isinstance(data, Enum):
return data.value
else:
if hasattr(data, "decode"):
return data.decode()
return data
def clean_dict(self,
clean_or_raw: str = "clean") -> dict:
return self._clean_dict_(self.__dict__, clean_or_raw=clean_or_raw)
def raw_dict(self) -> dict:
return self.clean_dict(clean_or_raw="raw")
| true | true |
f739a6c60de2b2fb645d7927730f0bfd084ab556 | 927 | py | Python | app/admin/forms/role_form.py | erics1996/D5-Video | cb07e211c821e805296f24d28c80ac6fb99bfd5d | [
"Apache-2.0"
] | 1 | 2020-09-26T14:03:48.000Z | 2020-09-26T14:03:48.000Z | app/admin/forms/role_form.py | erics1996/d5_video | cb07e211c821e805296f24d28c80ac6fb99bfd5d | [
"Apache-2.0"
] | null | null | null | app/admin/forms/role_form.py | erics1996/d5_video | cb07e211c821e805296f24d28c80ac6fb99bfd5d | [
"Apache-2.0"
] | null | null | null | from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, SelectMultipleField
from wtforms.validators import DataRequired
from ...models import Auth
auth_list = Auth.query.all()
class RoleForm(FlaskForm):
name = StringField(
label="角色名称",
validators=[
DataRequired("请输入角色名称!")
],
description="角色名称",
render_kw={
"class": "form-control",
"id": "input_name",
"placeholder": "请输入角色名称!",
}
)
auth = SelectMultipleField(
label="操作权限",
validators=[
DataRequired("请选择操作权限!")
],
coerce=int,
choices=[(v.id, v.name) for v in auth_list],
description="操作权限",
render_kw={
"class": "form-control",
}
)
submit = SubmitField(
'添加',
render_kw={
"class": "btn btn-primary"
}
)
| 23.175 | 65 | 0.536138 | from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, SelectMultipleField
from wtforms.validators import DataRequired
from ...models import Auth
auth_list = Auth.query.all()
class RoleForm(FlaskForm):
name = StringField(
label="角色名称",
validators=[
DataRequired("请输入角色名称!")
],
description="角色名称",
render_kw={
"class": "form-control",
"id": "input_name",
"placeholder": "请输入角色名称!",
}
)
auth = SelectMultipleField(
label="操作权限",
validators=[
DataRequired("请选择操作权限!")
],
coerce=int,
choices=[(v.id, v.name) for v in auth_list],
description="操作权限",
render_kw={
"class": "form-control",
}
)
submit = SubmitField(
'添加',
render_kw={
"class": "btn btn-primary"
}
)
| true | true |
f739a745aa5a59a1a22b1c74871f419c2106250e | 413 | py | Python | python_modules/libraries/dagster-postgres/dagster_postgres/alembic/versions/b601eb913efa_add_tick_selector_index.py | dehume/dagster | 3b55c4e864775b7a70ed8ff539629317a1202505 | [
"Apache-2.0"
] | null | null | null | python_modules/libraries/dagster-postgres/dagster_postgres/alembic/versions/b601eb913efa_add_tick_selector_index.py | dehume/dagster | 3b55c4e864775b7a70ed8ff539629317a1202505 | [
"Apache-2.0"
] | null | null | null | python_modules/libraries/dagster-postgres/dagster_postgres/alembic/versions/b601eb913efa_add_tick_selector_index.py | dehume/dagster | 3b55c4e864775b7a70ed8ff539629317a1202505 | [
"Apache-2.0"
] | null | null | null | """add tick selector index
Revision ID: b601eb913efa
Revises: 16e3115a602a
Create Date: 2022-03-25 10:28:53.372766
"""
from dagster.core.storage.migration.utils import create_tick_selector_index
# revision identifiers, used by Alembic.
revision = "b601eb913efa"
down_revision = "16e3115a602a"
branch_labels = None
depends_on = None
def upgrade():
create_tick_selector_index()
def downgrade():
pass
| 17.956522 | 75 | 0.772397 | from dagster.core.storage.migration.utils import create_tick_selector_index
revision = "b601eb913efa"
down_revision = "16e3115a602a"
branch_labels = None
depends_on = None
def upgrade():
create_tick_selector_index()
def downgrade():
pass
| true | true |
f739aa00433ed35b99af31f1e5fd4de6fccab999 | 1,142 | py | Python | scripts/addons/animation_nodes/nodes/mesh/bmesh_create.py | Tilapiatsu/blender-custom_conf | 05592fedf74e4b7075a6228b8448a5cda10f7753 | [
"MIT"
] | 2 | 2020-04-16T22:12:40.000Z | 2022-01-22T17:18:45.000Z | scripts/addons/animation_nodes/nodes/mesh/bmesh_create.py | Tilapiatsu/blender-custom_conf | 05592fedf74e4b7075a6228b8448a5cda10f7753 | [
"MIT"
] | null | null | null | scripts/addons/animation_nodes/nodes/mesh/bmesh_create.py | Tilapiatsu/blender-custom_conf | 05592fedf74e4b7075a6228b8448a5cda10f7753 | [
"MIT"
] | 2 | 2019-05-16T04:01:09.000Z | 2020-08-25T11:42:26.000Z | import bpy
import bmesh
from bpy.props import *
from ... base_types import AnimationNode
class CreateBMeshFromMesh(bpy.types.Node, AnimationNode):
bl_idname = "an_CreateBMeshFromMeshNode"
bl_label = "Create BMesh"
errorHandlingType = "EXCEPTION"
def create(self):
self.newInput("Mesh", "Mesh", "meshData")
self.newOutput("BMesh", "BMesh", "bm")
def execute(self, meshData):
try:
return getBMeshFromMesh(meshData)
except IndexError as e:
self.raiseErrorMessage("Missing vertices")
except ValueError as e:
self.raiseErrorMessage("Multiple identical edges or polygons")
def getBMeshFromMesh(meshData):
bm = bmesh.new()
for co in meshData.vertices:
bm.verts.new(co)
# for Blender Version >= 2.73
try: bm.verts.ensure_lookup_table()
except: pass
for edgeIndices in meshData.edges:
bm.edges.new((bm.verts[edgeIndices[0]], bm.verts[edgeIndices[1]]))
for polygonIndices in meshData.polygons:
bm.faces.new(tuple(bm.verts[index] for index in polygonIndices))
bm.normal_update()
return bm
| 28.55 | 74 | 0.669877 | import bpy
import bmesh
from bpy.props import *
from ... base_types import AnimationNode
class CreateBMeshFromMesh(bpy.types.Node, AnimationNode):
bl_idname = "an_CreateBMeshFromMeshNode"
bl_label = "Create BMesh"
errorHandlingType = "EXCEPTION"
def create(self):
self.newInput("Mesh", "Mesh", "meshData")
self.newOutput("BMesh", "BMesh", "bm")
def execute(self, meshData):
try:
return getBMeshFromMesh(meshData)
except IndexError as e:
self.raiseErrorMessage("Missing vertices")
except ValueError as e:
self.raiseErrorMessage("Multiple identical edges or polygons")
def getBMeshFromMesh(meshData):
bm = bmesh.new()
for co in meshData.vertices:
bm.verts.new(co)
try: bm.verts.ensure_lookup_table()
except: pass
for edgeIndices in meshData.edges:
bm.edges.new((bm.verts[edgeIndices[0]], bm.verts[edgeIndices[1]]))
for polygonIndices in meshData.polygons:
bm.faces.new(tuple(bm.verts[index] for index in polygonIndices))
bm.normal_update()
return bm
| true | true |
f739aacd030ba09bfd0eb44145da5d5dbad40ed4 | 4,934 | py | Python | PDF_Generation_and_Text_Summarization/src/main.py | kanishkaa24/Telegram-bots | f91532244e8950a9e9c74dd76cf64e9bd2e23c04 | [
"MIT"
] | null | null | null | PDF_Generation_and_Text_Summarization/src/main.py | kanishkaa24/Telegram-bots | f91532244e8950a9e9c74dd76cf64e9bd2e23c04 | [
"MIT"
] | 7 | 2021-07-01T18:02:45.000Z | 2021-07-14T07:03:10.000Z | PDF_Generation_and_Text_Summarization/src/main.py | kanishkaa24/Telegram-bots | f91532244e8950a9e9c74dd76cf64e9bd2e23c04 | [
"MIT"
] | 9 | 2021-07-01T18:02:03.000Z | 2021-07-11T10:20:34.000Z | from __future__ import absolute_import
from __future__ import division, print_function, unicode_literals
from sumy.parsers.html import HtmlParser
from sumy.parsers.plaintext import PlaintextParser
from sumy.nlp.tokenizers import Tokenizer
from sumy.summarizers.lsa import LsaSummarizer as Summarizer
from sumy.nlp.stemmers import Stemmer
from sumy.utils import get_stop_words
import os
import telebot
import convertapi
import requests
import nltk
nltk.download("punkt")
LANGUAGE = "english"
#Set up the environment variables.
api_key = os.environ['API_KEY']
#taken from the website https://www.convertapi.com/
#Set up the environment variables after getting an api key.
file_conversion_api_key = os.environ['CONVERSION_API_KEY']
#Authentication
convertapi.api_secret = file_conversion_api_key
bot = telebot.TeleBot(api_key)
#Function to store number of sentences to be kept in the summary.
def ask_no_of_sentences(message):
try:
global SENTENCES_COUNT
SENTENCES_COUNT = int(message.text)
msg = bot.send_message(message.chat.id, "Please send the .txt file containing the text to be summarized.")
bot.register_next_step_handler(msg, summarize)
except Exception as e:
bot.send_message(message.chat.id, "Please only send the number of sentences.")
#Function to summarize text.
def summarize(message):
try:
file_id = message.document.file_id
url = bot.get_file_url(file_id)
parser = HtmlParser.from_url(url, Tokenizer(LANGUAGE))
stemmer = Stemmer(LANGUAGE)
summarizer = Summarizer(stemmer)
summarizer.stop_words = get_stop_words(LANGUAGE)
l=[]
for sentence in summarizer(parser.document, SENTENCES_COUNT):
l.append(str(sentence))
sentences = "".join(l)
bot.send_message(message.chat.id, sentences)
except Exception as e:
bot.send_message(message.chat.id, "Something went wrong.")
#Function to convert documents to Pdf.
def convert_to_Pdf(message):
try:
#get the file id of the document.
file_id = message.document.file_id
#get the path of the document using the file_id.
path = bot.get_file_url(file_id)
#send the file to the api for converting it to pdf.
result = convertapi.convert('pdf', { 'File': path })
#send the converted file to the user.
bot.send_document(message.chat.id,result.file.url)
except Exception as e:
bot.send_message(message.chat.id,"Seems like the document format is not supported. Please try converting on this website instead \n https://www.convertapi.com/ Sorry for inconvenience.")
#Function to convert images of format png,jpg and jpeg to pdf files.
def img_to_pdf(message):
try:
#get the file id of the photo.
file_id = message.photo[0].file_id
#get the path of the document using the file_id.
path = bot.get_file_url(file_id)
#send the img to the api for converting it to pdf.
result = convertapi.convert('pdf', {'File': path})
#send the converted file to the user who requested.
bot.send_document(message.chat.id,result.file.url)
except Exception as e:
bot.send_message(message.chat.id,"Seems like either the file type doesnot match or the file is to big for conversion. Please try converting on this website instead \n https://www.convertapi.com/ Sorry For inconvenience.")
#Function to select the options for user.
def provide_functionality(message):
try:
if(message.text == "1"):
msg = bot.send_message(message.chat.id, "Please send the document you wish to convert.")
bot.register_next_step_handler(msg, convert_to_Pdf)
elif(message.text == "2"):
msg = bot.send_message(message.chat.id, "Please send the image you wish to convert.")
bot.register_next_step_handler(msg, img_to_pdf)
elif(message.text == "3"):
msg = bot.send_message(message.chat.id, "Please send the number of sentences you wish to have in summary.")
bot.register_next_step_handler(msg, ask_no_of_sentences)
elif(message.text == "4"):
exit(message)
else:
msg = bot.send_message(message.chat.id,"Please select an appropriate option. Thank you!")
bot.register_next_step_handler(msg, provide_functionality)
except Exception as e:
bot.reply_to(message, 'Something went wrong!')
#Function for exiting.
@bot.message_handler(commands=['Exit'])
def exit(message):
bot.send_message(message.chat.id, "OK, Thank You very much for your patience. Apologies for any kind of trouble you might have faced. It was great talking to you.")
@bot.message_handler(content_types=['text'])
def markup_eg(message):
msg = bot.send_message(message.chat.id, "Hey there, Following are the functionalities I can provide you.\n Press the number you prefer...\n 1. Convert Doc to Pdf \n 2. Convert Images to PDF \n 3. Summarize a text \n 4. Exit")
bot.register_next_step_handler(msg, provide_functionality)
bot.enable_save_next_step_handlers(delay=1)
bot.load_next_step_handlers()
bot.polling()
| 39.790323 | 227 | 0.747669 | from __future__ import absolute_import
from __future__ import division, print_function, unicode_literals
from sumy.parsers.html import HtmlParser
from sumy.parsers.plaintext import PlaintextParser
from sumy.nlp.tokenizers import Tokenizer
from sumy.summarizers.lsa import LsaSummarizer as Summarizer
from sumy.nlp.stemmers import Stemmer
from sumy.utils import get_stop_words
import os
import telebot
import convertapi
import requests
import nltk
nltk.download("punkt")
LANGUAGE = "english"
api_key = os.environ['API_KEY']
file_conversion_api_key = os.environ['CONVERSION_API_KEY']
convertapi.api_secret = file_conversion_api_key
bot = telebot.TeleBot(api_key)
def ask_no_of_sentences(message):
try:
global SENTENCES_COUNT
SENTENCES_COUNT = int(message.text)
msg = bot.send_message(message.chat.id, "Please send the .txt file containing the text to be summarized.")
bot.register_next_step_handler(msg, summarize)
except Exception as e:
bot.send_message(message.chat.id, "Please only send the number of sentences.")
def summarize(message):
try:
file_id = message.document.file_id
url = bot.get_file_url(file_id)
parser = HtmlParser.from_url(url, Tokenizer(LANGUAGE))
stemmer = Stemmer(LANGUAGE)
summarizer = Summarizer(stemmer)
summarizer.stop_words = get_stop_words(LANGUAGE)
l=[]
for sentence in summarizer(parser.document, SENTENCES_COUNT):
l.append(str(sentence))
sentences = "".join(l)
bot.send_message(message.chat.id, sentences)
except Exception as e:
bot.send_message(message.chat.id, "Something went wrong.")
def convert_to_Pdf(message):
try:
file_id = message.document.file_id
path = bot.get_file_url(file_id)
result = convertapi.convert('pdf', { 'File': path })
bot.send_document(message.chat.id,result.file.url)
except Exception as e:
bot.send_message(message.chat.id,"Seems like the document format is not supported. Please try converting on this website instead \n https://www.convertapi.com/ Sorry for inconvenience.")
def img_to_pdf(message):
try:
file_id = message.photo[0].file_id
path = bot.get_file_url(file_id)
result = convertapi.convert('pdf', {'File': path})
bot.send_document(message.chat.id,result.file.url)
except Exception as e:
bot.send_message(message.chat.id,"Seems like either the file type doesnot match or the file is to big for conversion. Please try converting on this website instead \n https://www.convertapi.com/ Sorry For inconvenience.")
def provide_functionality(message):
try:
if(message.text == "1"):
msg = bot.send_message(message.chat.id, "Please send the document you wish to convert.")
bot.register_next_step_handler(msg, convert_to_Pdf)
elif(message.text == "2"):
msg = bot.send_message(message.chat.id, "Please send the image you wish to convert.")
bot.register_next_step_handler(msg, img_to_pdf)
elif(message.text == "3"):
msg = bot.send_message(message.chat.id, "Please send the number of sentences you wish to have in summary.")
bot.register_next_step_handler(msg, ask_no_of_sentences)
elif(message.text == "4"):
exit(message)
else:
msg = bot.send_message(message.chat.id,"Please select an appropriate option. Thank you!")
bot.register_next_step_handler(msg, provide_functionality)
except Exception as e:
bot.reply_to(message, 'Something went wrong!')
@bot.message_handler(commands=['Exit'])
def exit(message):
bot.send_message(message.chat.id, "OK, Thank You very much for your patience. Apologies for any kind of trouble you might have faced. It was great talking to you.")
@bot.message_handler(content_types=['text'])
def markup_eg(message):
msg = bot.send_message(message.chat.id, "Hey there, Following are the functionalities I can provide you.\n Press the number you prefer...\n 1. Convert Doc to Pdf \n 2. Convert Images to PDF \n 3. Summarize a text \n 4. Exit")
bot.register_next_step_handler(msg, provide_functionality)
bot.enable_save_next_step_handlers(delay=1)
bot.load_next_step_handlers()
bot.polling()
| true | true |
f739ab0c9b82f092ac736775b4d3e4a637761850 | 2,206 | py | Python | datahub/omis/payment/test/test_utils.py | Staberinde/data-hub-api | 3d0467dbceaf62a47158eea412a3dba827073300 | [
"MIT"
] | 6 | 2019-12-02T16:11:24.000Z | 2022-03-18T10:02:02.000Z | datahub/omis/payment/test/test_utils.py | Staberinde/data-hub-api | 3d0467dbceaf62a47158eea412a3dba827073300 | [
"MIT"
] | 1,696 | 2019-10-31T14:08:37.000Z | 2022-03-29T12:35:57.000Z | datahub/omis/payment/test/test_utils.py | Staberinde/data-hub-api | 3d0467dbceaf62a47158eea412a3dba827073300 | [
"MIT"
] | 9 | 2019-11-22T12:42:03.000Z | 2021-09-03T14:25:05.000Z | from dateutil.parser import parse as dateutil_parse
from datahub.omis.payment.constants import PaymentMethod
from datahub.omis.payment.utils import trasform_govuk_payment_to_omis_payment_data
class TestGetOmisPaymentDataFromGovukPayment:
"""
Tests for the `trasform_govuk_payment_to_omis_payment_data` function.
"""
def test_with_non_success_response_returns_none(self):
"""
Test that if the status of the GOV.UK payment is not `success`,
the method returns None
"""
govuk_payment = {
'state': {
'status': 'created',
},
}
assert not trasform_govuk_payment_to_omis_payment_data(govuk_payment)
def test_data(self):
"""Test the transformed data from a GOV.UK payment."""
govuk_payment = {
'amount': 1234,
'state': {'status': 'success'},
'email': 'email@example.com',
'created_date': '2018-02-13T14:56:56.734Z',
'reference': '12345',
'card_details': {
'last_digits_card_number': '1111',
'cardholder_name': 'John Doe',
'expiry_date': '01/20',
'billing_address': {
'line1': 'line 1 address',
'line2': 'line 2 address',
'postcode': 'SW1A 1AA',
'city': 'London',
'country': 'GB',
},
'card_brand': 'Visa',
},
}
payment_data = trasform_govuk_payment_to_omis_payment_data(govuk_payment)
assert payment_data == {
'amount': 1234,
'method': PaymentMethod.CARD,
'received_on': dateutil_parse('2018-02-13').date(),
'transaction_reference': '12345',
'cardholder_name': 'John Doe',
'card_brand': 'Visa',
'billing_email': 'email@example.com',
'billing_address_1': 'line 1 address',
'billing_address_2': 'line 2 address',
'billing_address_town': 'London',
'billing_address_postcode': 'SW1A 1AA',
'billing_address_country': 'GB',
}
| 35.015873 | 82 | 0.548504 | from dateutil.parser import parse as dateutil_parse
from datahub.omis.payment.constants import PaymentMethod
from datahub.omis.payment.utils import trasform_govuk_payment_to_omis_payment_data
class TestGetOmisPaymentDataFromGovukPayment:
def test_with_non_success_response_returns_none(self):
govuk_payment = {
'state': {
'status': 'created',
},
}
assert not trasform_govuk_payment_to_omis_payment_data(govuk_payment)
def test_data(self):
govuk_payment = {
'amount': 1234,
'state': {'status': 'success'},
'email': 'email@example.com',
'created_date': '2018-02-13T14:56:56.734Z',
'reference': '12345',
'card_details': {
'last_digits_card_number': '1111',
'cardholder_name': 'John Doe',
'expiry_date': '01/20',
'billing_address': {
'line1': 'line 1 address',
'line2': 'line 2 address',
'postcode': 'SW1A 1AA',
'city': 'London',
'country': 'GB',
},
'card_brand': 'Visa',
},
}
payment_data = trasform_govuk_payment_to_omis_payment_data(govuk_payment)
assert payment_data == {
'amount': 1234,
'method': PaymentMethod.CARD,
'received_on': dateutil_parse('2018-02-13').date(),
'transaction_reference': '12345',
'cardholder_name': 'John Doe',
'card_brand': 'Visa',
'billing_email': 'email@example.com',
'billing_address_1': 'line 1 address',
'billing_address_2': 'line 2 address',
'billing_address_town': 'London',
'billing_address_postcode': 'SW1A 1AA',
'billing_address_country': 'GB',
}
| true | true |
f739ab2e7da05df02e8a0bb23f983d4e84291de6 | 7,439 | py | Python | gennav/envs/binaryOccupancyGrid2D_env.py | pranavgo/gennav | fc57707912c6f1c6af208a30b2ab0ad78c2cc798 | [
"MIT"
] | null | null | null | gennav/envs/binaryOccupancyGrid2D_env.py | pranavgo/gennav | fc57707912c6f1c6af208a30b2ab0ad78c2cc798 | [
"MIT"
] | null | null | null | gennav/envs/binaryOccupancyGrid2D_env.py | pranavgo/gennav | fc57707912c6f1c6af208a30b2ab0ad78c2cc798 | [
"MIT"
] | null | null | null | import numpy as np
from gennav.envs.base import Environment
from gennav.utils.common import RobotState
from gennav.utils.geometry import Point
from matplotlib import pyplot as plt
class BinaryOccupancyGrid2DEnv(Environment):
"""Base class for a Binary Occupancy Grid 2D envrionment.
Arguments:
X (unsigned int) : The number of grid cells in the x-direction
Y (unsigned int) : the number of grid cells in the y-direction
"""
def __init__(self, X=10, Y=10):
super(BinaryOccupancyGrid2DEnv, self).__init__()
self.X = X
self.Y = Y
self.scan = None
self.robotPose = None
self.scale = 5
self.grid = np.zeros((self.X * self.scale, self.Y * self.scale))
# Storing transforms
self.transforms = {}
self.mapTbot = {
"from": "map",
"to": "bot",
"transform": self.scale
* np.array(
[[1, 0, int(self.X / 2)], [0, 1, int(self.Y / 2)], [0, 0, 1]]
).reshape(3, 3),
}
self.botTworld = {"from": "bot", "to": "world", "transform": np.empty((3, 3))}
self.mapTworld = {
"from": "map",
"to": "world",
"transform": np.dot(self.mapTbot["transform"], self.botTworld["transform"]),
}
self.transforms["mapTbot"] = self.mapTbot
self.transforms["botTworld"] = self.botTworld
self.transforms["mapTworld"] = self.mapTworld
def update(self, scan, robotPose):
"""Function to update the environment
Args:
scan (list) : List of ang_min, ang_max, ranges
robotPose (gennav.utils.RobotPose) : Current RobotPose
"""
self.scan = scan
self.robotPose = robotPose
self.compute_transforms()
self.fillOccupancy()
def fillOccupancy(self):
"""Function that fill the occupnacy grid on every update
Assumptions:
1. RobotPose is considered (0, 0, 0) to accomodate the laser scan, which produces ranges wrt to the bot
2. The RobotPose in the occupancy grid is (X * scale_factor/2, Y * scale_factor /2, 0)
3. The attribute robotPose is the real pose of the robot wrt to the world Frame,
thus it helps us to calculate the transform for trajectory and pose validity queries
"""
self.grid[:] = 0
ang_min, ang_max, ranges = self.scan
angle_step = (ang_max - ang_min) / len(ranges)
for i, rng in enumerate(ranges):
# Check for obstacles
if np.abs(rng) is not np.inf:
x, y = (
rng * np.cos(ang_min + i * angle_step),
rng * np.sin(ang_max + i * angle_step),
)
newState = self.transform("bot", "map", RobotState(Point(x, y, 0)))
x_, y_ = newState.position.x, newState.position.y
# Checking if the range is within the grid, to mark them as occupied
if 0 <= x_ < self.grid.shape[0] and 0 <= y_ < self.grid.shape[1]:
if self.grid[int(x_)][int(-y_ - 1)] != 1:
self.grid[int(x_)][int(-y_ - 1)] = 1
def get_status(self, state):
"""Get whether a given state is valid within the environment.
Method for checking the validity of a given RobotPose in the environment.
Args:
state (gennav.utils.RobotState): State to be checked
Returns:
bool: True if state is valid otherwise False
"""
state = self.transform("world", "map", state)
x, y = state.position.x, state.position.y
if self.grid[x][-y - 1] == 1:
return False
else:
return True
def get_traj_status(self, traj):
"""Get whether a given trajectory is valid within the environment.
Method for checking the validity of a trajectory in the given environment.
Args:
state (gennav.utils.Trajectory): Trajectory to be checked
Returns:
bool: True if state is valid otherwise False
"""
collision = False
for i in range(len(traj.path) - 1):
collision = self.check_line_segment(
self.transform("world", "map", traj.path[i]),
self.transform("world", "map", traj.path[i + 1]),
)
if collision:
break
return not collision
def transform(self, frame1, frame2, rsf1):
"""Transform robotPose from one pose to the other
Args:
frame1 (string) : from the frame (world, bot, map)
frame2 (string) : to the frame (world, bot, map)
rsf1 (gennav.utils.common.RobotState) : RobotState in frame1
Returns:
rsf2 (gennav.utils.common.RobotState) : RobotState in frame2
"""
# TODO: Make it more robust in terms of checking frames
# Check if the required trnasform or the inverse of the transform exists
frame = frame2 + "T" + frame1
frame_inv = frame1 + "T" + frame2
if frame in self.transforms.keys():
t_matrix = self.transforms[frame]["transform"]
elif frame_inv in self.transforms.keys():
t_matrix = np.linalg.inv(self.transforms[frame_inv]["transform"])
else:
raise Exception("Transform for the frames not found")
# Transform using matrix multiplication
pf2 = np.dot(
t_matrix, np.array([rsf1.position.x, rsf1.position.y, 1]).reshape(3, 1)
)
rsf2 = RobotState(position=Point(pf2[0].item(), pf2[1].item()))
# Return RobotState
return rsf2
def compute_transforms(self):
"""Computes transforms between frames
Uses robot pose to compute transform between the world frame and the bot frame
"""
x, y, yaw = (
self.robotPose.position.x,
self.robotPose.position.y,
self.robotPose.orientation.yaw,
)
worldTbot = np.array(
[[np.cos(yaw), -np.sin(yaw), x], [np.sin(yaw), np.cos(yaw), y], [0, 0, 1]]
).reshape(3, 3)
self.botTworld["transform"] = np.linalg.inv(worldTbot)
self.mapTworld["transform"] = np.dot(
self.mapTbot["transform"], self.botTworld["transform"]
)
def visualise_grid(self):
"""
Helper function to visualise grid
"""
plt.imshow(self.grid, origin="bottom", cmap="binary")
plt.show()
def check_line_segment(self, state1, state2):
"""Checks whether a line segment is collision free in the environent
Computes a line segment from the start point to the end point and
parametrically checks if the grid cells they occupy are occupied.
Args:
state1 (gennav.utils.common.RobotState) : One end point
state2 (gennav.utils.common.RobotState) : The other end point
"""
point1 = state1.position
point2 = state2.position
x1, y1 = point1.x, point1.y
x2, y2 = point2.x, point2.y
m = (y2 - y1) / (x2 - x1)
collision = False
for x in np.arange(x1, x2, 0.5):
y = m * x - m * x1 + y1
if self.grid[int(x)][int(-y - 1)] == 1:
collision = True
break
return collision
| 37.195 | 119 | 0.56634 | import numpy as np
from gennav.envs.base import Environment
from gennav.utils.common import RobotState
from gennav.utils.geometry import Point
from matplotlib import pyplot as plt
class BinaryOccupancyGrid2DEnv(Environment):
def __init__(self, X=10, Y=10):
super(BinaryOccupancyGrid2DEnv, self).__init__()
self.X = X
self.Y = Y
self.scan = None
self.robotPose = None
self.scale = 5
self.grid = np.zeros((self.X * self.scale, self.Y * self.scale))
self.transforms = {}
self.mapTbot = {
"from": "map",
"to": "bot",
"transform": self.scale
* np.array(
[[1, 0, int(self.X / 2)], [0, 1, int(self.Y / 2)], [0, 0, 1]]
).reshape(3, 3),
}
self.botTworld = {"from": "bot", "to": "world", "transform": np.empty((3, 3))}
self.mapTworld = {
"from": "map",
"to": "world",
"transform": np.dot(self.mapTbot["transform"], self.botTworld["transform"]),
}
self.transforms["mapTbot"] = self.mapTbot
self.transforms["botTworld"] = self.botTworld
self.transforms["mapTworld"] = self.mapTworld
def update(self, scan, robotPose):
self.scan = scan
self.robotPose = robotPose
self.compute_transforms()
self.fillOccupancy()
def fillOccupancy(self):
self.grid[:] = 0
ang_min, ang_max, ranges = self.scan
angle_step = (ang_max - ang_min) / len(ranges)
for i, rng in enumerate(ranges):
if np.abs(rng) is not np.inf:
x, y = (
rng * np.cos(ang_min + i * angle_step),
rng * np.sin(ang_max + i * angle_step),
)
newState = self.transform("bot", "map", RobotState(Point(x, y, 0)))
x_, y_ = newState.position.x, newState.position.y
if 0 <= x_ < self.grid.shape[0] and 0 <= y_ < self.grid.shape[1]:
if self.grid[int(x_)][int(-y_ - 1)] != 1:
self.grid[int(x_)][int(-y_ - 1)] = 1
def get_status(self, state):
state = self.transform("world", "map", state)
x, y = state.position.x, state.position.y
if self.grid[x][-y - 1] == 1:
return False
else:
return True
def get_traj_status(self, traj):
collision = False
for i in range(len(traj.path) - 1):
collision = self.check_line_segment(
self.transform("world", "map", traj.path[i]),
self.transform("world", "map", traj.path[i + 1]),
)
if collision:
break
return not collision
def transform(self, frame1, frame2, rsf1):
frame = frame2 + "T" + frame1
frame_inv = frame1 + "T" + frame2
if frame in self.transforms.keys():
t_matrix = self.transforms[frame]["transform"]
elif frame_inv in self.transforms.keys():
t_matrix = np.linalg.inv(self.transforms[frame_inv]["transform"])
else:
raise Exception("Transform for the frames not found")
pf2 = np.dot(
t_matrix, np.array([rsf1.position.x, rsf1.position.y, 1]).reshape(3, 1)
)
rsf2 = RobotState(position=Point(pf2[0].item(), pf2[1].item()))
return rsf2
def compute_transforms(self):
x, y, yaw = (
self.robotPose.position.x,
self.robotPose.position.y,
self.robotPose.orientation.yaw,
)
worldTbot = np.array(
[[np.cos(yaw), -np.sin(yaw), x], [np.sin(yaw), np.cos(yaw), y], [0, 0, 1]]
).reshape(3, 3)
self.botTworld["transform"] = np.linalg.inv(worldTbot)
self.mapTworld["transform"] = np.dot(
self.mapTbot["transform"], self.botTworld["transform"]
)
def visualise_grid(self):
plt.imshow(self.grid, origin="bottom", cmap="binary")
plt.show()
def check_line_segment(self, state1, state2):
point1 = state1.position
point2 = state2.position
x1, y1 = point1.x, point1.y
x2, y2 = point2.x, point2.y
m = (y2 - y1) / (x2 - x1)
collision = False
for x in np.arange(x1, x2, 0.5):
y = m * x - m * x1 + y1
if self.grid[int(x)][int(-y - 1)] == 1:
collision = True
break
return collision
| true | true |
f739adbe6fbb5a4d0d1b1b218dbbca4bcaeca0af | 1,379 | py | Python | api/utils/m3u.py | StoneMoe/Anime-API | cef7186a198c8af35bc642a756dd40d88544d07c | [
"MIT"
] | null | null | null | api/utils/m3u.py | StoneMoe/Anime-API | cef7186a198c8af35bc642a756dd40d88544d07c | [
"MIT"
] | null | null | null | api/utils/m3u.py | StoneMoe/Anime-API | cef7186a198c8af35bc642a756dd40d88544d07c | [
"MIT"
] | null | null | null | import re
from dataclasses import dataclass
from logging import getLogger
from typing import List
logger = getLogger('M3U')
@dataclass
class M3UMedia:
title: str
tvg_name: str
tvg_ID: str
tvg_logo: str
tvg_group: str
link: str
class M3UParser:
"""Mod from https://github.com/Timmy93/M3uParser/blob/master/M3uParser.py"""
def __init__(self, content: str = None):
self.lines: List[str] = []
self.data: List[M3UMedia] = []
if content is not None:
self.read_data(content)
if self.lines:
self.scan_all()
def read_data(self, content: str):
self.lines = [line.rstrip('\n') for line in content.splitlines()]
def scan_all(self):
for index, line in enumerate(self.lines):
if line.startswith('#EXTINF'):
self.process_ext_inf(index)
def process_ext_inf(self, n):
line_info = self.lines[n]
line_link = self.lines[n + 1]
m = re.search("tvg-id=\"(.*?)\"", line_info)
tid = m.group(1)
m = re.search("tvg-logo=\"(.*?)\"", line_info)
logo = m.group(1)
m = re.search("group-title=\"(.*?)\"", line_info)
group = m.group(1)
m = re.search("[,](?!.*[,])(.*?)$", line_info)
title = m.group(1)
self.data.append(M3UMedia(title, '', tid, logo, group, line_link))
| 26.519231 | 80 | 0.57578 | import re
from dataclasses import dataclass
from logging import getLogger
from typing import List
logger = getLogger('M3U')
@dataclass
class M3UMedia:
title: str
tvg_name: str
tvg_ID: str
tvg_logo: str
tvg_group: str
link: str
class M3UParser:
def __init__(self, content: str = None):
self.lines: List[str] = []
self.data: List[M3UMedia] = []
if content is not None:
self.read_data(content)
if self.lines:
self.scan_all()
def read_data(self, content: str):
self.lines = [line.rstrip('\n') for line in content.splitlines()]
def scan_all(self):
for index, line in enumerate(self.lines):
if line.startswith('#EXTINF'):
self.process_ext_inf(index)
def process_ext_inf(self, n):
line_info = self.lines[n]
line_link = self.lines[n + 1]
m = re.search("tvg-id=\"(.*?)\"", line_info)
tid = m.group(1)
m = re.search("tvg-logo=\"(.*?)\"", line_info)
logo = m.group(1)
m = re.search("group-title=\"(.*?)\"", line_info)
group = m.group(1)
m = re.search("[,](?!.*[,])(.*?)$", line_info)
title = m.group(1)
self.data.append(M3UMedia(title, '', tid, logo, group, line_link))
| true | true |
f739b0de39ba522d0f0921439324218d5be5adb7 | 8,835 | py | Python | magic_button_a.py | ContinuumBridge/magic_button | ecc42e333b28c93a19197300bcfd95a05a9b995f | [
"MIT"
] | null | null | null | magic_button_a.py | ContinuumBridge/magic_button | ecc42e333b28c93a19197300bcfd95a05a9b995f | [
"MIT"
] | null | null | null | magic_button_a.py | ContinuumBridge/magic_button | ecc42e333b28c93a19197300bcfd95a05a9b995f | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# magic_button_.py
"""
Copyright (c) 2015 ContinuumBridge Limited
Written by Peter Claydon
"""
import sys
import os.path
import time
import json
from twisted.internet import reactor
from cbcommslib import CbApp, CbClient
from cbconfig import *
configFile = CB_CONFIG_DIR + "magic_button.config"
CHECK_INTERVAL = 30
WATCHDOG_INTERVAL = 70
MAX_SEND_INTERVAL = 60*30 # Ensure we have sent a message to client within this time
CID = "CID157" # Client ID
config = {
"uuids": [ ]
}
def nicetime(timeStamp):
localtime = time.localtime(timeStamp)
milliseconds = '%03d' % int((timeStamp - int(timeStamp)) * 1000)
now = time.strftime('%H:%M:%S, %d-%m-%Y', localtime)
return now
class App(CbApp):
def __init__(self, argv):
self.state = "stopped"
self.devices = []
self.idToName = {}
self.buttonStates = {}
self.beaconAdaptor = None
self.lastSent = 0 # When a message was last sent to the client
# Super-class init must be called
CbApp.__init__(self, argv)
def setState(self, action):
self.state = action
msg = {"id": self.id,
"status": "state",
"state": self.state}
self.sendManagerMessage(msg)
def onConcMessage(self, message):
self.client.receive(message)
def checkConnected(self):
# Called every CHECK_INTERVAL
now = time.time()
if self.buttonStates != {}:
delkeys = []
for b in self.buttonStates:
#self.cbLog("debug", "checkConnected, buttonStates: " + str(self.buttonStates) + ", b: " + str(b))
if now - self.buttonStates[b]["connectTime"] > WATCHDOG_INTERVAL:
self.buttonStates[b]["rssi"] = -200
toClient = {"b": b,
"p": self.buttonStates[b]["rssi"],
"c": False
}
self.client.send(toClient)
self.cbLog("debug", "checkConnected, button no longer connected: " + str(json.dumps(toClient, indent=4)))
self.lastSent = now
delkeys.append(b)
self.cbLog("debug", "checkConnected, buttonStates after del: " + str(self.buttonStates))
#self.cbLog("debug", "checkConnected, buttonStates after del: " + str(self.buttonStates))
for d in delkeys:
del self.buttonStates[d]
if now - self.lastSent > MAX_SEND_INTERVAL:
self.cbLog("debug", "Exceeded MAX_SEND_INTERVAL")
self.lastSent = now
toClient = {"status": "init"}
self.client.send(toClient)
reactor.callLater(CHECK_INTERVAL, self.checkConnected)
def onClientMessage(self, message):
self.cbLog("debug", "onClientMessage, message: " + str(json.dumps(message, indent=4)))
global config
if "uuids" in message:
config["uuids"] = message["uuids"]
#self.cbLog("debug", "onClientMessage, updated UUIDs: " + str(json.dumps(config, indent=4)))
try:
with open(configFile, 'w') as f:
json.dump(config, f)
except Exception as ex:
self.cbLog("warning", "onClientMessage, could not write to file. Type: " + str(type(ex)) + ", exception: " + str(ex.args))
self.readLocalConfig()
self.requestUUIDs(self.beaconAdaptor)
def requestUUIDs(self, adaptor):
req = {"id": self.id,
"request": "service",
"service": [
{"characteristic": "ble_beacon",
"interval": 1.0,
"uuids": config["uuids"]
}
]
}
self.sendMessage(req, adaptor)
def onAdaptorService(self, message):
#self.cbLog("debug", "onAdaptorService, message: " + str(message))
for p in message["service"]:
if p["characteristic"] == "ble_beacon":
self.beaconAdaptor = message["id"]
self.requestUUIDs(self.beaconAdaptor)
def onAdaptorData(self, message):
#self.cbLog("debug", "onAdaptorData, message: " + str(json.dumps(message, indent=4)))
if True:
#try:
if self.state != "running":
self.setState("running")
if message["characteristic"] == "ble_beacon":
if message["data"]["uuid"] in config["uuids"]:
changed = False
buttonID = message["data"]["major"]
buttonState = message["data"]["minor"] & 0x01
rxPower = message["data"]["rx_power"]
if buttonID in self.buttonStates:
self.buttonStates[buttonID]["connectTime"] = time.time()
self.cbLog("debug", "Seen: " + str(buttonID))
else:
self.buttonStates[buttonID] = {
"connectTime": time.time(),
"state": -1
}
self.cbLog("info", "New button: " + str(buttonID))
if buttonState != self.buttonStates[buttonID]["state"]:
self.buttonStates[buttonID]["state"] = buttonState
self.buttonStates[buttonID]["rssi"] = rxPower
self.buttonStates[buttonID]["rssi_time"] = time.time()
changed = True
elif abs(self.buttonStates[buttonID]["rssi"] - rxPower) > 6 and rxPower < -80 \
or abs(self.buttonStates[buttonID]["rssi"] - rxPower) > 15:
self.buttonStates[buttonID]["rssi"] = rxPower
self.buttonStates[buttonID]["rssi_time"] = time.time()
changed = True
elif abs(self.buttonStates[buttonID]["rssi"] - message["data"]["rx_power"]) > 5:
if time.time() - self.buttonStates[buttonID]["rssi_time"] > 60 * 15:
self.buttonStates[buttonID]["rssi"] = rxPower
self.buttonStates[buttonID]["rssi_time"] = time.time()
changed = True
if changed:
toClient = {"b": buttonID,
"s": self.buttonStates[buttonID]["state"],
"p": self.buttonStates[buttonID]["rssi"],
"c": True
}
self.client.send(toClient)
self.cbLog("debug", "Sent to client: " + str(json.dumps(toClient, indent=4)))
#except Exception as ex:
# self.cbLog("warning", "onAdaptorData problem. Type: " + str(type(ex)) + ", exception: " + str(ex.args))
def readLocalConfig(self):
global config
try:
with open(configFile, 'r') as f:
newConfig = json.load(f)
self.cbLog("debug", "Read local config")
config.update(newConfig)
except Exception as ex:
self.cbLog("warning", "Problem reading magic_button.config. Type: " + str(type(ex)) + ", exception: " + str(ex.args))
for c in config:
if c.lower in ("true", "t", "1"):
config[c] = True
elif c.lower in ("false", "f", "0"):
config[c] = False
try:
config["uuids"] = [u.upper() for u in config["uuids"]]
except Exception as ex:
self.cbLog("warning", "Problem upper-casing uuids. Type: " + str(type(ex)) + ", exception: " + str(ex.args))
self.cbLog("debug", "Config: " + str(json.dumps(config, indent=4)))
def onConfigureMessage(self, managerConfig):
self.readLocalConfig()
now = time.time()
for adaptor in managerConfig["adaptors"]:
adtID = adaptor["id"]
if adtID not in self.devices:
# Because managerConfigure may be re-called if devices are added
name = adaptor["name"]
friendly_name = adaptor["friendly_name"]
self.idToName[adtID] = friendly_name.replace(" ", "_")
self.devices.append(adtID)
self.client = CbClient(self.id, CID, 3)
self.client.onClientMessage = self.onClientMessage
self.client.sendMessage = self.sendMessage
self.client.cbLog = self.cbLog
reactor.callLater(CHECK_INTERVAL, self.checkConnected)
self.setState("starting")
if __name__ == '__main__':
App(sys.argv)
| 44.175 | 139 | 0.519977 |
import sys
import os.path
import time
import json
from twisted.internet import reactor
from cbcommslib import CbApp, CbClient
from cbconfig import *
configFile = CB_CONFIG_DIR + "magic_button.config"
CHECK_INTERVAL = 30
WATCHDOG_INTERVAL = 70
MAX_SEND_INTERVAL = 60*30
CID = "CID157"
config = {
"uuids": [ ]
}
def nicetime(timeStamp):
localtime = time.localtime(timeStamp)
milliseconds = '%03d' % int((timeStamp - int(timeStamp)) * 1000)
now = time.strftime('%H:%M:%S, %d-%m-%Y', localtime)
return now
class App(CbApp):
def __init__(self, argv):
self.state = "stopped"
self.devices = []
self.idToName = {}
self.buttonStates = {}
self.beaconAdaptor = None
self.lastSent = 0
CbApp.__init__(self, argv)
def setState(self, action):
self.state = action
msg = {"id": self.id,
"status": "state",
"state": self.state}
self.sendManagerMessage(msg)
def onConcMessage(self, message):
self.client.receive(message)
def checkConnected(self):
now = time.time()
if self.buttonStates != {}:
delkeys = []
for b in self.buttonStates:
if now - self.buttonStates[b]["connectTime"] > WATCHDOG_INTERVAL:
self.buttonStates[b]["rssi"] = -200
toClient = {"b": b,
"p": self.buttonStates[b]["rssi"],
"c": False
}
self.client.send(toClient)
self.cbLog("debug", "checkConnected, button no longer connected: " + str(json.dumps(toClient, indent=4)))
self.lastSent = now
delkeys.append(b)
self.cbLog("debug", "checkConnected, buttonStates after del: " + str(self.buttonStates))
for d in delkeys:
del self.buttonStates[d]
if now - self.lastSent > MAX_SEND_INTERVAL:
self.cbLog("debug", "Exceeded MAX_SEND_INTERVAL")
self.lastSent = now
toClient = {"status": "init"}
self.client.send(toClient)
reactor.callLater(CHECK_INTERVAL, self.checkConnected)
def onClientMessage(self, message):
self.cbLog("debug", "onClientMessage, message: " + str(json.dumps(message, indent=4)))
global config
if "uuids" in message:
config["uuids"] = message["uuids"]
try:
with open(configFile, 'w') as f:
json.dump(config, f)
except Exception as ex:
self.cbLog("warning", "onClientMessage, could not write to file. Type: " + str(type(ex)) + ", exception: " + str(ex.args))
self.readLocalConfig()
self.requestUUIDs(self.beaconAdaptor)
def requestUUIDs(self, adaptor):
req = {"id": self.id,
"request": "service",
"service": [
{"characteristic": "ble_beacon",
"interval": 1.0,
"uuids": config["uuids"]
}
]
}
self.sendMessage(req, adaptor)
def onAdaptorService(self, message):
for p in message["service"]:
if p["characteristic"] == "ble_beacon":
self.beaconAdaptor = message["id"]
self.requestUUIDs(self.beaconAdaptor)
def onAdaptorData(self, message):
if True:
if self.state != "running":
self.setState("running")
if message["characteristic"] == "ble_beacon":
if message["data"]["uuid"] in config["uuids"]:
changed = False
buttonID = message["data"]["major"]
buttonState = message["data"]["minor"] & 0x01
rxPower = message["data"]["rx_power"]
if buttonID in self.buttonStates:
self.buttonStates[buttonID]["connectTime"] = time.time()
self.cbLog("debug", "Seen: " + str(buttonID))
else:
self.buttonStates[buttonID] = {
"connectTime": time.time(),
"state": -1
}
self.cbLog("info", "New button: " + str(buttonID))
if buttonState != self.buttonStates[buttonID]["state"]:
self.buttonStates[buttonID]["state"] = buttonState
self.buttonStates[buttonID]["rssi"] = rxPower
self.buttonStates[buttonID]["rssi_time"] = time.time()
changed = True
elif abs(self.buttonStates[buttonID]["rssi"] - rxPower) > 6 and rxPower < -80 \
or abs(self.buttonStates[buttonID]["rssi"] - rxPower) > 15:
self.buttonStates[buttonID]["rssi"] = rxPower
self.buttonStates[buttonID]["rssi_time"] = time.time()
changed = True
elif abs(self.buttonStates[buttonID]["rssi"] - message["data"]["rx_power"]) > 5:
if time.time() - self.buttonStates[buttonID]["rssi_time"] > 60 * 15:
self.buttonStates[buttonID]["rssi"] = rxPower
self.buttonStates[buttonID]["rssi_time"] = time.time()
changed = True
if changed:
toClient = {"b": buttonID,
"s": self.buttonStates[buttonID]["state"],
"p": self.buttonStates[buttonID]["rssi"],
"c": True
}
self.client.send(toClient)
self.cbLog("debug", "Sent to client: " + str(json.dumps(toClient, indent=4)))
def readLocalConfig(self):
global config
try:
with open(configFile, 'r') as f:
newConfig = json.load(f)
self.cbLog("debug", "Read local config")
config.update(newConfig)
except Exception as ex:
self.cbLog("warning", "Problem reading magic_button.config. Type: " + str(type(ex)) + ", exception: " + str(ex.args))
for c in config:
if c.lower in ("true", "t", "1"):
config[c] = True
elif c.lower in ("false", "f", "0"):
config[c] = False
try:
config["uuids"] = [u.upper() for u in config["uuids"]]
except Exception as ex:
self.cbLog("warning", "Problem upper-casing uuids. Type: " + str(type(ex)) + ", exception: " + str(ex.args))
self.cbLog("debug", "Config: " + str(json.dumps(config, indent=4)))
def onConfigureMessage(self, managerConfig):
self.readLocalConfig()
now = time.time()
for adaptor in managerConfig["adaptors"]:
adtID = adaptor["id"]
if adtID not in self.devices:
name = adaptor["name"]
friendly_name = adaptor["friendly_name"]
self.idToName[adtID] = friendly_name.replace(" ", "_")
self.devices.append(adtID)
self.client = CbClient(self.id, CID, 3)
self.client.onClientMessage = self.onClientMessage
self.client.sendMessage = self.sendMessage
self.client.cbLog = self.cbLog
reactor.callLater(CHECK_INTERVAL, self.checkConnected)
self.setState("starting")
if __name__ == '__main__':
App(sys.argv)
| true | true |
f739b1b19515993ce66190bb30755066f04b1c71 | 5,545 | py | Python | lowfat/management/commands/load2019applications.py | elena-kolomeets/lowfat | f7647f5cd12519f722e41808157a96cc3e37b6ce | [
"BSD-3-Clause"
] | 6 | 2017-02-23T16:44:36.000Z | 2019-03-18T11:39:03.000Z | lowfat/management/commands/load2019applications.py | elena-kolomeets/lowfat | f7647f5cd12519f722e41808157a96cc3e37b6ce | [
"BSD-3-Clause"
] | 286 | 2017-02-07T15:00:41.000Z | 2022-03-08T12:56:09.000Z | lowfat/management/commands/load2019applications.py | elena-kolomeets/lowfat | f7647f5cd12519f722e41808157a96cc3e37b6ce | [
"BSD-3-Clause"
] | 2 | 2018-06-19T12:38:08.000Z | 2020-11-23T12:15:08.000Z | import pandas as pd
from django.contrib.auth import get_user_model
from django.contrib.auth.models import BaseUserManager
from django.core.management.base import BaseCommand
from django.db import IntegrityError
from lowfat.models import Claimant
class Command(BaseCommand):
help = "Import CSV with 2019 applications."
def add_arguments(self, parser):
parser.add_argument('csv', nargs='?', default='2019.csv')
# pylint: disable=too-many-branches,too-many-locals
def handle(self, *args, **options):
fail_list = []
success_list = []
user_manager = BaseUserManager()
data = pd.read_csv(options['csv'])
for index, line in data.iterrows(): # pylint: disable=no-member,unused-variable
received_offer = line['Invited'] == 'YES'
if line["Research Classification"] == "N/A - I do not do research":
jacs = "Y0"
else:
jacs = line["Research Classification"][1:3]
applicants_dict = {
"application_year": 2018,
"fellow": False,
"received_offer": received_offer,
"forenames": line["First name"],
"surname": line["Surname"],
"affiliation": line["Home Institution"],
"department": line["Department"] if pd.notnull(line["Department"]) else "",
"group": line["Group within Department"] if pd.notnull(line["Group within Department"]) else "",
"career_stage_when_apply": line["Career stage"][6],
"job_title_when_apply": line["Job Title"],
"research_area": line["Area of work"],
"research_area_code": jacs,
"email": line["Email Address"],
"phone": line["Telephone number"],
"gender": line["Gender"][0] if pd.notnull(line["Gender"]) else 'R',
"home_country": "GB",
"home_city": "Unknow",
"funding": line["Which primary funding body/charity/organisation would you normally turn to if seeking financial support for your research/work"],
"funding_notes": line["Which additional funding body/charity/organisation would you probably turn to if seeking financial support for your research/work"] if pd.notnull(line["Which additional funding body/charity/organisation would you probably turn to if seeking financial support for your research/work"]) else "",
"claimantship_grant": 3000 if received_offer else 0,
"institutional_website": line["Please specify your Institutional webpage"] if pd.notnull(line["Please specify your Institutional webpage"]) else "",
"website": line["Please specify your blog"] if pd.notnull(line["Please specify your blog"]) else "",
"orcid": line["Please specify your ORCID"] if pd.notnull(line["Please specify your ORCID"]) else "",
"google_scholar": line["Please specify your Google Scholar"] if pd.notnull(line["Please specify your Google Scholar"]) else "",
"twitter": line["Please specify your Twitter handle"] if pd.notnull(line["Please specify your Twitter handle"]) else "",
"screencast_url": line["Application Screencast URL"] if pd.notnull(line["Application Screencast URL"]) else "",
"example_of_writing_url": line["Example of writing"] if pd.notnull(line["Example of writing"]) else "",
}
try:
applicant = Claimant(**applicants_dict)
applicant.save()
success_list.append(index)
if received_offer:
new_user = get_user_model().objects.create_user(
username=applicant.slug,
email=applicant.email,
password=user_manager.make_random_password(),
first_name=line["First name"],
last_name=line["Surname"]
)
applicant.user = new_user
applicant.save()
except IntegrityError as exception:
try:
applicant = Claimant.objects.get(
email=applicants_dict["email"]
)
for key, value in applicants_dict.items():
applicant[key] = value
applicant.save()
success_list.append(index)
if received_offer:
new_user = get_user_model().objects.create_user(
username=applicant.slug,
email=applicant.email,
password=user_manager.make_random_password(),
first_name=line["First name"],
last_name=line["Surname"]
)
applicant.user = new_user
applicant.save()
except BaseException as exception:
print("Error: {}\n{}\n{}".format(exception, line, 80 * "-"))
fail_list.append(index)
except BaseException as exception:
print("Error: {}\n{}\n{}".format(exception, line, 80 * "-"))
fail_list.append(index)
print(80 * "-")
print("Success: {}".format(success_list))
print("Fail: {}".format(fail_list))
| 50.409091 | 332 | 0.555275 | import pandas as pd
from django.contrib.auth import get_user_model
from django.contrib.auth.models import BaseUserManager
from django.core.management.base import BaseCommand
from django.db import IntegrityError
from lowfat.models import Claimant
class Command(BaseCommand):
help = "Import CSV with 2019 applications."
def add_arguments(self, parser):
parser.add_argument('csv', nargs='?', default='2019.csv')
def handle(self, *args, **options):
fail_list = []
success_list = []
user_manager = BaseUserManager()
data = pd.read_csv(options['csv'])
for index, line in data.iterrows():
received_offer = line['Invited'] == 'YES'
if line["Research Classification"] == "N/A - I do not do research":
jacs = "Y0"
else:
jacs = line["Research Classification"][1:3]
applicants_dict = {
"application_year": 2018,
"fellow": False,
"received_offer": received_offer,
"forenames": line["First name"],
"surname": line["Surname"],
"affiliation": line["Home Institution"],
"department": line["Department"] if pd.notnull(line["Department"]) else "",
"group": line["Group within Department"] if pd.notnull(line["Group within Department"]) else "",
"career_stage_when_apply": line["Career stage"][6],
"job_title_when_apply": line["Job Title"],
"research_area": line["Area of work"],
"research_area_code": jacs,
"email": line["Email Address"],
"phone": line["Telephone number"],
"gender": line["Gender"][0] if pd.notnull(line["Gender"]) else 'R',
"home_country": "GB",
"home_city": "Unknow",
"funding": line["Which primary funding body/charity/organisation would you normally turn to if seeking financial support for your research/work"],
"funding_notes": line["Which additional funding body/charity/organisation would you probably turn to if seeking financial support for your research/work"] if pd.notnull(line["Which additional funding body/charity/organisation would you probably turn to if seeking financial support for your research/work"]) else "",
"claimantship_grant": 3000 if received_offer else 0,
"institutional_website": line["Please specify your Institutional webpage"] if pd.notnull(line["Please specify your Institutional webpage"]) else "",
"website": line["Please specify your blog"] if pd.notnull(line["Please specify your blog"]) else "",
"orcid": line["Please specify your ORCID"] if pd.notnull(line["Please specify your ORCID"]) else "",
"google_scholar": line["Please specify your Google Scholar"] if pd.notnull(line["Please specify your Google Scholar"]) else "",
"twitter": line["Please specify your Twitter handle"] if pd.notnull(line["Please specify your Twitter handle"]) else "",
"screencast_url": line["Application Screencast URL"] if pd.notnull(line["Application Screencast URL"]) else "",
"example_of_writing_url": line["Example of writing"] if pd.notnull(line["Example of writing"]) else "",
}
try:
applicant = Claimant(**applicants_dict)
applicant.save()
success_list.append(index)
if received_offer:
new_user = get_user_model().objects.create_user(
username=applicant.slug,
email=applicant.email,
password=user_manager.make_random_password(),
first_name=line["First name"],
last_name=line["Surname"]
)
applicant.user = new_user
applicant.save()
except IntegrityError as exception:
try:
applicant = Claimant.objects.get(
email=applicants_dict["email"]
)
for key, value in applicants_dict.items():
applicant[key] = value
applicant.save()
success_list.append(index)
if received_offer:
new_user = get_user_model().objects.create_user(
username=applicant.slug,
email=applicant.email,
password=user_manager.make_random_password(),
first_name=line["First name"],
last_name=line["Surname"]
)
applicant.user = new_user
applicant.save()
except BaseException as exception:
print("Error: {}\n{}\n{}".format(exception, line, 80 * "-"))
fail_list.append(index)
except BaseException as exception:
print("Error: {}\n{}\n{}".format(exception, line, 80 * "-"))
fail_list.append(index)
print(80 * "-")
print("Success: {}".format(success_list))
print("Fail: {}".format(fail_list))
| true | true |
f739b1ea57fb45b466c82f6f7931188c2c068e75 | 2,294 | py | Python | test/other/dataTest.old.py | meracan/s3-netcdf-api | 920d09ef7b1a205230ea2c76eabcb4853616992c | [
"MIT"
] | 1 | 2020-08-30T01:47:45.000Z | 2020-08-30T01:47:45.000Z | test/other/dataTest.old.py | meracan/s3-netcdf-api | 920d09ef7b1a205230ea2c76eabcb4853616992c | [
"MIT"
] | null | null | null | test/other/dataTest.old.py | meracan/s3-netcdf-api | 920d09ef7b1a205230ea2c76eabcb4853616992c | [
"MIT"
] | null | null | null | import numpy as np
from datetime import datetime
npe=3
nelem=20
nnode=10
nstation=27
nsnode=3
ntime=8760
nfreq=3
ndir=5
elem=np.arange(nelem*npe,dtype="i4").reshape((nelem,npe))
time=np.datetime64(datetime(2000,1,1))+np.arange((ntime))*np.timedelta64(1, 'h')
lat=np.arange((nnode),dtype="f8")
lon=np.arange((nnode),dtype="f8")
nodes=np.column_stack((lon,lat))
bed=np.arange((nnode),dtype="f4")
slat=np.arange((nstation),dtype="f8")
slon=np.arange((nstation),dtype="f8")
freq=np.arange((nfreq),dtype="f8")
dir=np.arange((ndir),dtype="f8")
nshape=ntime*nnode
shape=(ntime,nnode)
variables={
"WIND":{
"Windv_x":np.arange(nshape,dtype="f4").reshape(shape),
"Windv_y":np.arange(nshape,dtype="f4").reshape(shape),
},
"HS":{"Hsig":np.arange(nshape,dtype="f4").reshape(shape),},
"DIR":{ "Dir":np.arange(nshape,dtype="f4").reshape(shape),},
"TPS":{"TPsmoo":np.arange(nshape,dtype="f4").reshape(shape),},
"TMM10":{"Tm_10":np.arange(nshape,dtype="f4").reshape(shape),},
"TM01":{"Tm01":np.arange(nshape,dtype="f4").reshape(shape),},
"TM02":{"Tm02":np.arange(nshape,dtype="f4").reshape(shape),},
"PDIR":{"PkDir":np.arange(nshape,dtype="f4").reshape(shape),},
"DSPR":{"Dspr":np.arange(nshape,dtype="f4").reshape(shape),},
"QP":{"Qp":np.arange(nshape,dtype="f4").reshape(shape),},
"TRANSP":{"Transp_x":np.arange(nshape,dtype="f4").reshape(shape),"Transp_y":np.arange(nshape,dtype="f4").reshape(shape),}
}
nshape=nstation*nsnode*ntime*nfreq*ndir
shape=(nstation,nsnode,ntime,nfreq,ndir)
spcgroup={
"spectra":(np.arange(nshape,dtype="f8")).reshape(shape)
}
stations={
"beverly": 1,
"brooks": 1,
"c_dixon": 1,
"c_eliz": 1,
"campbell": 1,
"e_dell": 1,
"hotspots": 2,
"line_n": 2,
"line_w": 3,
"line_s": 2,
"m_nomad": 1,
"n_hecat": 1,
"ne_isle": 1,
"neah": 2,
"p_renf": 1,
"perouse": 1,
"s_hecat": 1,
"s_morsb": 1,
"s_nomad": 1,
"sombrio": 1,
"sooke": 1,
"tarbotn": 1,
"tillamk": 1,
"tofino": 1,
"w_dixon": 1,
"w_morsb": 1,
"w_otter": 1,
"w_washn": 1
}
# Create lat lng for each station
for i,id in enumerate(stations):
c=np.array([[1.0,1.0]])
stations[id]={"id":i,"nsnodes":stations[id],"latlng":((np.arange(stations[id])+1)*i)[:,np.newaxis]*c} | 26.367816 | 123 | 0.624237 | import numpy as np
from datetime import datetime
npe=3
nelem=20
nnode=10
nstation=27
nsnode=3
ntime=8760
nfreq=3
ndir=5
elem=np.arange(nelem*npe,dtype="i4").reshape((nelem,npe))
time=np.datetime64(datetime(2000,1,1))+np.arange((ntime))*np.timedelta64(1, 'h')
lat=np.arange((nnode),dtype="f8")
lon=np.arange((nnode),dtype="f8")
nodes=np.column_stack((lon,lat))
bed=np.arange((nnode),dtype="f4")
slat=np.arange((nstation),dtype="f8")
slon=np.arange((nstation),dtype="f8")
freq=np.arange((nfreq),dtype="f8")
dir=np.arange((ndir),dtype="f8")
nshape=ntime*nnode
shape=(ntime,nnode)
variables={
"WIND":{
"Windv_x":np.arange(nshape,dtype="f4").reshape(shape),
"Windv_y":np.arange(nshape,dtype="f4").reshape(shape),
},
"HS":{"Hsig":np.arange(nshape,dtype="f4").reshape(shape),},
"DIR":{ "Dir":np.arange(nshape,dtype="f4").reshape(shape),},
"TPS":{"TPsmoo":np.arange(nshape,dtype="f4").reshape(shape),},
"TMM10":{"Tm_10":np.arange(nshape,dtype="f4").reshape(shape),},
"TM01":{"Tm01":np.arange(nshape,dtype="f4").reshape(shape),},
"TM02":{"Tm02":np.arange(nshape,dtype="f4").reshape(shape),},
"PDIR":{"PkDir":np.arange(nshape,dtype="f4").reshape(shape),},
"DSPR":{"Dspr":np.arange(nshape,dtype="f4").reshape(shape),},
"QP":{"Qp":np.arange(nshape,dtype="f4").reshape(shape),},
"TRANSP":{"Transp_x":np.arange(nshape,dtype="f4").reshape(shape),"Transp_y":np.arange(nshape,dtype="f4").reshape(shape),}
}
nshape=nstation*nsnode*ntime*nfreq*ndir
shape=(nstation,nsnode,ntime,nfreq,ndir)
spcgroup={
"spectra":(np.arange(nshape,dtype="f8")).reshape(shape)
}
stations={
"beverly": 1,
"brooks": 1,
"c_dixon": 1,
"c_eliz": 1,
"campbell": 1,
"e_dell": 1,
"hotspots": 2,
"line_n": 2,
"line_w": 3,
"line_s": 2,
"m_nomad": 1,
"n_hecat": 1,
"ne_isle": 1,
"neah": 2,
"p_renf": 1,
"perouse": 1,
"s_hecat": 1,
"s_morsb": 1,
"s_nomad": 1,
"sombrio": 1,
"sooke": 1,
"tarbotn": 1,
"tillamk": 1,
"tofino": 1,
"w_dixon": 1,
"w_morsb": 1,
"w_otter": 1,
"w_washn": 1
}
for i,id in enumerate(stations):
c=np.array([[1.0,1.0]])
stations[id]={"id":i,"nsnodes":stations[id],"latlng":((np.arange(stations[id])+1)*i)[:,np.newaxis]*c} | true | true |
f739b2a8f3b93c7ecc928486e5d8e852d554f5b4 | 2,140 | py | Python | tests/test_packer.py | asyaf/speech-packer | 5c576ef8922f2ec1279a53cb7ebbc6e5fd51157c | [
"MIT"
] | null | null | null | tests/test_packer.py | asyaf/speech-packer | 5c576ef8922f2ec1279a53cb7ebbc6e5fd51157c | [
"MIT"
] | null | null | null | tests/test_packer.py | asyaf/speech-packer | 5c576ef8922f2ec1279a53cb7ebbc6e5fd51157c | [
"MIT"
] | null | null | null | import mock
import pytest
import random
from speech_packer import Packer, SpeechAnalyzer
ITEMS = ['t-shirt', 'jeans', 'raincoat', 'sneakers', 'hiking boots']
@mock.patch("speech_packer.SpeechAnalyzer")
def test__add_item(mock_analyzer):
item = random.choice(ITEMS)
mock_analyzer.process_single_phrase.return_value = item
pckr = Packer(mock_analyzer)
pckr._add_item()
assert pckr._to_pack == set([item])
@mock.patch("speech_packer.SpeechAnalyzer")
def test__pack_item(mock_analyzer):
item = random.choice(ITEMS)
mock_analyzer.process_single_phrase.return_value = item
pckr = Packer(mock_analyzer)
pckr._add_item()
pckr._pack_item()
assert pckr._to_pack == set()
assert pckr._packed == set([item])
@mock.patch("speech_packer.SpeechAnalyzer")
def test__pack_item_failure(mock_analyzer):
item = random.choice(ITEMS)
mock_analyzer.process_single_phrase.return_value = item
pckr = Packer(mock_analyzer)
pckr._pack_item()
assert pckr._to_pack == set()
assert pckr._packed == set()
@mock.patch("speech_packer.SpeechAnalyzer")
def test__delete_item(mock_analyzer):
item1 = ITEMS[0]
item2 = ITEMS[1]
item3 = ITEMS[2]
mock_analyzer.process_single_phrase.return_value = item1
pckr = Packer(mock_analyzer)
pckr._add_item()
mock_analyzer.process_single_phrase.return_value = item2
pckr._add_item()
assert pckr._to_pack == set([item1, item2])
pckr._pack_item()
assert pckr._to_pack == set([item1])
assert pckr._packed == set([item2])
# deleting a non existent item
mock_analyzer.process_single_phrase.return_value = item3
pckr._delete_item()
assert pckr._to_pack == set([item1])
assert pckr._packed == set([item2])
# deleting an item that was packed
mock_analyzer.process_single_phrase.return_value = item2
pckr._delete_item()
assert pckr._to_pack == set([item1])
assert pckr._packed == set()
# deleting an item that was added
mock_analyzer.process_single_phrase.return_value = item1
pckr._delete_item()
assert pckr._to_pack == set()
assert pckr._packed == set()
| 30.140845 | 68 | 0.719626 | import mock
import pytest
import random
from speech_packer import Packer, SpeechAnalyzer
ITEMS = ['t-shirt', 'jeans', 'raincoat', 'sneakers', 'hiking boots']
@mock.patch("speech_packer.SpeechAnalyzer")
def test__add_item(mock_analyzer):
item = random.choice(ITEMS)
mock_analyzer.process_single_phrase.return_value = item
pckr = Packer(mock_analyzer)
pckr._add_item()
assert pckr._to_pack == set([item])
@mock.patch("speech_packer.SpeechAnalyzer")
def test__pack_item(mock_analyzer):
item = random.choice(ITEMS)
mock_analyzer.process_single_phrase.return_value = item
pckr = Packer(mock_analyzer)
pckr._add_item()
pckr._pack_item()
assert pckr._to_pack == set()
assert pckr._packed == set([item])
@mock.patch("speech_packer.SpeechAnalyzer")
def test__pack_item_failure(mock_analyzer):
item = random.choice(ITEMS)
mock_analyzer.process_single_phrase.return_value = item
pckr = Packer(mock_analyzer)
pckr._pack_item()
assert pckr._to_pack == set()
assert pckr._packed == set()
@mock.patch("speech_packer.SpeechAnalyzer")
def test__delete_item(mock_analyzer):
item1 = ITEMS[0]
item2 = ITEMS[1]
item3 = ITEMS[2]
mock_analyzer.process_single_phrase.return_value = item1
pckr = Packer(mock_analyzer)
pckr._add_item()
mock_analyzer.process_single_phrase.return_value = item2
pckr._add_item()
assert pckr._to_pack == set([item1, item2])
pckr._pack_item()
assert pckr._to_pack == set([item1])
assert pckr._packed == set([item2])
mock_analyzer.process_single_phrase.return_value = item3
pckr._delete_item()
assert pckr._to_pack == set([item1])
assert pckr._packed == set([item2])
mock_analyzer.process_single_phrase.return_value = item2
pckr._delete_item()
assert pckr._to_pack == set([item1])
assert pckr._packed == set()
mock_analyzer.process_single_phrase.return_value = item1
pckr._delete_item()
assert pckr._to_pack == set()
assert pckr._packed == set()
| true | true |
f739b304d34d719447a7561c7c99fb4cab2d7bb8 | 32,375 | py | Python | seq2act/models/input.py | deepneuralmachine/google-research | d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231 | [
"Apache-2.0"
] | 23,901 | 2018-10-04T19:48:53.000Z | 2022-03-31T21:27:42.000Z | seq2act/models/input.py | deepneuralmachine/google-research | d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231 | [
"Apache-2.0"
] | 891 | 2018-11-10T06:16:13.000Z | 2022-03-31T10:42:34.000Z | seq2act/models/input.py | deepneuralmachine/google-research | d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231 | [
"Apache-2.0"
] | 6,047 | 2018-10-12T06:31:02.000Z | 2022-03-31T13:59:28.000Z | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The input function of seq2act models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from enum import Enum
from tensor2tensor.layers import common_layers
import tensorflow.compat.v1 as tf # tf
NUM_TOKENS_PER_OBJ = 30
NUM_TOKENS_PER_SYN = 30
class DataSource(Enum):
"""The class that represents word2act data source."""
RICO_SCA = 'rico_sca'
ANDROID_HOWTO = 'android_howto'
PIXEL_HELP = 'pixel_help'
@staticmethod
def from_str(label):
if label == 'rico_sca':
return DataSource.RICO_SCA
elif label == 'android_howto':
return DataSource.ANDROID_HOWTO
elif label == 'pixel_help':
return DataSource.PIXEL_HELP
else:
raise ValueError('Unrecognized source %s' % label)
MAX_UI_OBJECT_NUM = {
DataSource.PIXEL_HELP: 93,
}
MAX_TOKEN_NUM = {
DataSource.ANDROID_HOWTO: 30,
DataSource.RICO_SCA: 30,
DataSource.PIXEL_HELP: 153,
}
# ['connect_str', token_id(connector_str)]
# token id based on all_source_lower_case_vocab_59429
PADDED_CONCATENATORS = [
[5, 0, 0],
[115, 0, 0],
[8, 32, 0],
[115, 8, 32],
[12, 0, 0],
]
CONCATENATORS_STR = [
', ',
' , ',
' and then ',
' , and then ',
'. '
]
def _construct_padding_info(data_source, load_dom_dist, load_extra):
"""Constructs the padding info tuple."""
token_num = MAX_TOKEN_NUM[data_source]
# Model uses this anchor padding value to mask out the padded features.
anchor_padding_value_int = tf.cast(-1, tf.int32)
padding_value_int = tf.cast(0, tf.int32)
padding_value_str = tf.cast('', tf.string)
# Tuple of (feature name, padded_shape, padded_value)
padding_info = [
('task', [None], padding_value_int),
('rule', [], padding_value_int),
('verbs', [None], padding_value_int),
('input_refs', [None, 2], padding_value_int),
('obj_refs', [None, 2], padding_value_int),
('verb_refs', [None, 2], padding_value_int),
('objects', [None], padding_value_int),
('obj_text', [None, None, token_num], padding_value_int),
('obj_type', [None, None], anchor_padding_value_int),
('obj_clickable', [None, None], padding_value_int),
('obj_screen_pos', [None, None, 4], tf.cast(0, tf.int32)),
('obj_dom_pos', [None, None, 3], padding_value_int),
('agreement_count', [], padding_value_int),
('data_source', [], padding_value_int),
]
if load_dom_dist:
padding_info.append(('obj_dom_dist', [None, None, None], padding_value_int))
if load_extra:
padding_info.append(('task_id', [], padding_value_str))
padding_info.append(('raw_task', [], padding_value_str))
padding_info.append(('obj_raw_text', [None, None], padding_value_str))
padded_shapes = {}
padded_values = {}
for (key, padding_shape, padding_value) in padding_info:
padded_shapes[key] = padding_shape
padded_values[key] = padding_value
return padded_shapes, padded_values
def input_fn(data_files,
batch_size,
repeat=-1,
data_source=DataSource.RICO_SCA,
required_agreement=2,
max_range=1000,
max_dom_pos=2000,
max_pixel_pos=100,
load_dom_dist=False,
load_extra=False,
buffer_size=8 * 1024,
shuffle_size=8 * 1024,
required_rule_id_list=None,
shuffle_repeat=True,
mean_synthetic_length=1.0,
stddev_synthetic_length=0.0,
load_screen=True,
shuffle_files=True):
"""Retrieves batches of data for training.
Adds padding to ensure all dimension in one batch are always same.
Args:
data_files: A list of file names to initialize the TFRecordDataset
batch_size: Number for the size of the batch.
repeat: the number of times to repeat the input data.
data_source: A DataSource instance.
required_agreement: the minimum agreement required.
max_range: the max range.
max_dom_pos: the max dom pos.
max_pixel_pos: the max screen pixels.
load_dom_dist: whether to load the dom distance feature.
load_extra: whether to load the raw text data.
buffer_size: the buffer size for prefetching.
shuffle_size: the shuffle size.
required_rule_id_list: the list of required rule ids.
shuffle_repeat: whether to shuffle and repeat.
mean_synthetic_length: the mean length for synthetic sequence.
stddev_synthetic_length: the stddev length for synthetic sequence.
load_screen: whether to load screen features.
shuffle_files: shuffling file names.
Returns:
a tf.dataset.Dateset object.
Raises:
ValueError: The data_format is neither 'recordio' nor 'tfrecord'.
"""
if not isinstance(data_source, DataSource):
assert False, 'data_source %s unsupported' % str(data_source)
padded_shapes, padded_values = _construct_padding_info(
data_source, load_dom_dist, load_extra)
if not isinstance(data_files, (list,)):
data_files = [data_files]
all_files = tf.concat(
values=[tf.matching_files(f) for f in data_files], axis=0)
if repeat == -1 and shuffle_files:
all_files = tf.random.shuffle(all_files)
if data_files[0].endswith('.recordio'):
dataset = tf.data.RecordIODataset(all_files)
elif data_files[0].endswith('.tfrecord'):
dataset = tf.data.TFRecordDataset(
all_files, num_parallel_reads=10 if repeat == -1 else None)
else:
assert False, 'Data_format %s is not supported.' % data_files[0]
def _map_fn(x):
return parse_tf_example(x, data_source, max_range, max_dom_pos,
max_pixel_pos, load_dom_dist=load_dom_dist,
load_extra=load_extra,
append_eos=(data_source != DataSource.RICO_SCA or
mean_synthetic_length == 1.0),
load_screen=load_screen)
dataset = dataset.map(_map_fn)
def _is_enough_agreement(example):
return tf.greater_equal(example['agreement_count'], required_agreement)
dataset = dataset.filter(_is_enough_agreement)
def _length_filter(example):
return tf.less(tf.shape(example['obj_refs'])[0], 20)
dataset = dataset.filter(_length_filter)
def _filter_data_by_rule(example, rule_id_list):
return tf.reduce_any(
[tf.equal(example['rule'], rule_id) for rule_id in rule_id_list])
if data_source == DataSource.RICO_SCA and required_rule_id_list is not None:
dataset = dataset.filter(
lambda x: _filter_data_by_rule(x, required_rule_id_list))
# (TODO: liyang) tf.data.experimental.bucket_by_sequence_length
if shuffle_repeat:
dataset = dataset.apply(tf.data.experimental.shuffle_and_repeat(
shuffle_size, count=repeat))
dataset = dataset.padded_batch(
batch_size, padded_shapes=padded_shapes, padding_values=padded_values)
if data_source == DataSource.RICO_SCA and mean_synthetic_length > 1.0:
def _stitch_fn(x):
return _batch_stitch(x, mean_length=mean_synthetic_length,
stddev=stddev_synthetic_length)
dataset = dataset.map(_stitch_fn)
dataset = dataset.prefetch(buffer_size=buffer_size)
return dataset
def hybrid_input_fn(data_files_list,
data_source_list,
batch_size_list,
max_range=1000,
max_dom_pos=2000,
max_pixel_pos=100,
load_dom_dist=False,
load_extra=False,
buffer_size=8 * 1024,
mean_synthetic_length=1.0,
stddev_synthetic_length=0.0,
hybrid_batch_size=128,
boost_input=False,
load_screen=True,
shuffle_size=1024):
"""Combines multiple datasouces."""
mixed_dataset = None
for data_files, data_source, batch_size in zip(
data_files_list, data_source_list, batch_size_list):
dataset = input_fn(data_files, batch_size, repeat=-1,
data_source=data_source,
required_agreement=-1,
max_range=max_range, max_dom_pos=max_dom_pos,
max_pixel_pos=max_pixel_pos,
load_dom_dist=load_dom_dist,
load_extra=load_extra,
buffer_size=0,
mean_synthetic_length=mean_synthetic_length,
stddev_synthetic_length=stddev_synthetic_length,
shuffle_repeat=False,
load_screen=load_screen)
if mixed_dataset is None:
mixed_dataset = dataset
else:
mixed_dataset = dataset.concatenate(mixed_dataset)
mixed_dataset = mixed_dataset.unbatch()
# Boost input examples
if boost_input:
def _input_booster(example):
with tf.control_dependencies([tf.rank(example['input_refs']), 2]):
has_input = tf.reduce_any(
tf.greater(example['input_refs'][:, 1],
example['input_refs'][:, 0]))
return tf.logical_or(has_input, tf.less(tf.random_uniform([]), 0.1))
dataset = dataset.filter(_input_booster)
# Remix single examples
mixed_dataset = mixed_dataset.shuffle(hybrid_batch_size * shuffle_size)
# Batch again
padded_shapes, padded_values = _construct_padding_info(
data_source_list[0], load_dom_dist, load_extra)
mixed_dataset = mixed_dataset.padded_batch(
hybrid_batch_size, padded_shapes=padded_shapes,
padding_values=padded_values)
mixed_dataset = mixed_dataset.repeat()
mixed_dataset = mixed_dataset.prefetch(buffer_size=buffer_size)
return mixed_dataset
def parse_tf_example(example_proto,
data_source,
max_range=100,
max_dom_pos=2000,
max_pixel_pos=100,
load_dom_dist=False,
load_extra=False,
append_eos=True,
load_screen=True):
"""Parses an example TFRecord proto into dictionary of tensors.
Args:
example_proto: TFRecord format proto that contains screen information.
data_source: A DataSource instance.
max_range: the max range.
max_dom_pos: the maximum dom positoin.
max_pixel_pos: the max dom position.
load_dom_dist: whether to load the feature.
load_extra: whether to load the extra data for debugging.
append_eos: whether to append eos.
load_screen: whether to load screen features.
Returns:
feature: The parsed tensor dictionary with the input feature data
label: The parsed label tensor with the input label for the feature
"""
feature_spec = {
'instruction_word_id_seq':
tf.FixedLenSequenceFeature([], tf.int64, allow_missing=True),
'input_str_position_seq':
tf.FixedLenSequenceFeature([], tf.int64, allow_missing=True),
'obj_desc_position_seq':
tf.FixedLenSequenceFeature([], tf.int64, allow_missing=True),
'verb_str_position_seq':
tf.FixedLenSequenceFeature([], tf.int64, allow_missing=True),
'agreement_count':
tf.FixedLenSequenceFeature([], tf.int64, allow_missing=True),
'instruction_rule_id':
tf.FixedLenSequenceFeature([], tf.int64, allow_missing=True)
}
if load_screen:
feature_spec['verb_id_seq'] = tf.FixedLenSequenceFeature(
[], tf.int64, allow_missing=True)
feature_spec['ui_target_id_seq'] = tf.FixedLenSequenceFeature(
[], tf.int64, allow_missing=True)
feature_spec['ui_obj_word_id_seq'] = tf.FixedLenSequenceFeature(
[], tf.int64, allow_missing=True)
feature_spec['ui_obj_type_id_seq'] = tf.FixedLenSequenceFeature(
[], tf.int64, allow_missing=True)
feature_spec['ui_obj_clickable_seq'] = tf.FixedLenSequenceFeature(
[], tf.int64, allow_missing=True)
feature_spec['ui_obj_cord_x_seq'] = tf.FixedLenSequenceFeature(
[], tf.float32, allow_missing=True)
feature_spec['ui_obj_cord_y_seq'] = tf.FixedLenSequenceFeature(
[], tf.float32, allow_missing=True)
feature_spec['ui_obj_dom_location_seq'] = tf.FixedLenSequenceFeature(
[], tf.int64, allow_missing=True)
if load_dom_dist:
feature_spec['ui_obj_dom_distance'] = tf.FixedLenSequenceFeature(
[], tf.int64, allow_missing=True)
if load_extra:
feature_spec['instruction_str'] = tf.FixedLenSequenceFeature(
[], tf.string, allow_missing=True)
feature_spec['task_id'] = tf.FixedLenSequenceFeature(
[], tf.string, allow_missing=True)
feature_spec['ui_obj_str_seq'] = tf.FixedLenSequenceFeature(
[], tf.string, allow_missing=True)
feature_dict = tf.parse_single_example(example_proto, feature_spec)
for key in feature_dict:
if feature_dict[key].dtype == tf.int64:
feature_dict[key] = tf.cast(feature_dict[key], tf.int32)
if data_source == DataSource.ANDROID_HOWTO:
tf.logging.info('Parsing android_howto dataset')
feature = _process_android_howto(feature_dict, max_range=max_range,
load_dom_dist=load_dom_dist,
load_extra=load_extra)
elif data_source == DataSource.RICO_SCA:
tf.logging.info('Parsing synthetic dataset')
feature = _process_rico_sca(
feature_dict, max_range=max_range, max_dom_pos=max_dom_pos,
load_dom_dist=load_dom_dist,
load_extra=load_extra,
load_screen=load_screen)
elif data_source == DataSource.PIXEL_HELP:
tf.logging.info('Parsing test dataset')
feature = _process_pixel_help(feature_dict, data_source,
load_dom_dist=load_dom_dist,
load_extra=load_extra)
else:
raise ValueError('Unsupported datasource %s' % str(data_source))
# Remove padding from "task"
feature['task'] = tf.boolean_mask(feature['task'],
tf.not_equal(feature['task'], 0))
feature['obj_screen_pos'] = tf.to_int32(
feature['obj_screen_pos'] * (max_pixel_pos - 1))
# Appending EOS and padding to match the appended length
if append_eos:
feature['input_refs'] = tf.pad(feature['input_refs'], [[0, 1], [0, 0]])
feature['obj_refs'] = tf.pad(feature['obj_refs'], [[0, 1], [0, 0]])
step_num = tf.size(feature['task'])
feature['verb_refs'] = tf.concat(
[feature['verb_refs'], [[step_num, step_num + 1]]], axis=0)
feature['task'] = tf.pad(feature['task'], [[0, 1]], constant_values=1)
feature['obj_text'] = tf.pad(feature['obj_text'], [[0, 1], [0, 0], [0, 0]])
feature['obj_clickable'] = tf.pad(feature['obj_clickable'],
[[0, 1], [0, 0]])
feature['obj_type'] = tf.pad(
feature['obj_type'], [[0, 1], [0, 0]], constant_values=-1)
feature['obj_screen_pos'] = tf.pad(feature['obj_screen_pos'],
[[0, 1], [0, 0], [0, 0]])
feature['obj_dom_pos'] = tf.pad(feature['obj_dom_pos'],
[[0, 1], [0, 0], [0, 0]])
if load_dom_dist:
feature['obj_dom_dist'] = tf.pad(feature['obj_dom_dist'],
[[0, 1], [0, 0], [0, 0]])
feature['objects'] = tf.pad(feature['objects'], [[0, 1]])
feature['verbs'] = tf.pad(feature['verbs'], [[0, 1]])
return feature
def _bound_refs(feature, max_range):
"""Makes sure the refs are in the allowed range."""
for key in feature:
if not key.endswith('_refs'):
continue
feature[key] = tf.where(
tf.greater(feature[key][:, 1] - feature[key][:, 0], max_range),
tf.stack([feature[key][:, 0], feature[key][:, 0] + max_range], axis=1),
feature[key])
def _process_android_howto(feature_dict, max_range, load_dom_dist=False,
load_extra=False):
"""Processes webanswer feature dictionary.
Args:
feature_dict: feature dictionary
max_range: the max range.
load_dom_dist: whether to load the dom distance feature.
load_extra: whether to load the extra data for debugging.
Returns:
A processed feature dictionary.
"""
feature = {
'task': tf.reshape(feature_dict['instruction_word_id_seq'], [-1]),
'input_refs': tf.reshape(feature_dict['input_str_position_seq'], [-1, 2]),
'obj_refs': tf.reshape(feature_dict['obj_desc_position_seq'], [-1, 2]),
'verb_refs': tf.reshape(feature_dict['verb_str_position_seq'], [-1, 2]),
'agreement_count': tf.reshape(feature_dict['agreement_count'], [])
}
if load_extra:
feature['task_id'] = tf.constant('empty_task_id', dtype=tf.string)
feature['raw_task'] = tf.reshape(feature_dict['instruction_str'], [])
_bound_refs(feature, max_range)
_load_fake_screen(feature, load_extra, load_dom_dist)
return feature
def _load_fake_screen(feature, load_extra, load_dom_dist):
"""Loads a fake screen."""
# Fills in fake ui object features into feature dictionary.
step_num = tf.shape(feature['verb_refs'])[0]
obj_num = 1
if load_extra:
feature['obj_raw_text'] = tf.fill([step_num, obj_num], '')
feature['data_source'] = tf.constant(1, dtype=tf.int32)
feature['obj_text'] = tf.zeros([step_num, obj_num, NUM_TOKENS_PER_OBJ],
tf.int32)
feature['obj_type'] = tf.cast(tf.fill([step_num, obj_num], -1), tf.int32)
feature['obj_clickable'] = tf.zeros([step_num, obj_num], tf.int32)
feature['obj_screen_pos'] = tf.zeros([step_num, obj_num, 4], tf.float32)
feature['obj_dom_pos'] = tf.zeros([step_num, obj_num, 3], tf.int32)
if load_dom_dist:
feature['obj_dom_dist'] = tf.zeros([step_num, obj_num, obj_num], tf.int32)
feature['objects'] = tf.zeros([step_num], tf.int32)
feature['verbs'] = tf.zeros([step_num], tf.int32)
feature['rule'] = tf.constant(5, dtype=tf.int32)
def _batch_stitch(features, mean_length=4.0, stddev=2.0):
"""Stitches a batch of single-step data to a batch of multi-step data."""
batch_size = common_layers.shape_list(features['task'])[0]
num_sequences = tf.maximum(
tf.to_int32(tf.to_float(batch_size) / mean_length), 1)
lengths = tf.random.truncated_normal(shape=[num_sequences],
mean=mean_length, stddev=stddev)
max_length = tf.reduce_max(lengths) * (
tf.to_float(batch_size) / tf.reduce_sum(lengths))
max_length = tf.to_int32(tf.ceil(max_length))
total_items = max_length * num_sequences
num_paddings = total_items - batch_size
indices = tf.random.shuffle(tf.range(total_items))
for key in features:
shape_list = common_layers.shape_list(features[key])
assert len(shape_list) >= 1
with tf.control_dependencies([
tf.assert_greater_equal(num_paddings, 0,
name='num_paddings_positive')]):
paddings = [[0, num_paddings]] + [[0, 0]] * (len(shape_list) - 1)
features[key] = tf.pad(features[key], paddings,
constant_values=-1 if key == 'obj_type' else 0)
features[key] = tf.gather(features[key], indices)
shape = [num_sequences, max_length]
if len(shape_list) >= 2:
shape += shape_list[1:]
features[key] = tf.reshape(features[key], shape)
# Remove all-padding seqs
step_mask = tf.reduce_any(tf.greater(features['task'], 1), axis=-1)
mask = tf.reduce_any(step_mask, axis=-1)
step_mask = tf.boolean_mask(step_mask, mask)
for key in features:
features[key] = tf.boolean_mask(features[key], mask=mask)
num_sequences = tf.shape(features['task'])[0]
# Sort steps within each seq
_, step_indices = tf.math.top_k(tf.to_int32(step_mask), k=max_length)
step_indices = step_indices + tf.expand_dims(
tf.range(num_sequences) * max_length, 1)
step_indices = tf.reshape(step_indices, [-1])
for key in features:
shape_list = common_layers.shape_list(features[key])
features[key] = tf.gather(tf.reshape(features[key], [-1] + shape_list[2:]),
step_indices)
features[key] = tf.reshape(features[key], shape_list)
features = _stitch(features)
return features
def _stitch(features):
"""Stitch features on the first dimension."""
full_mask = tf.greater(features['task'], 1)
step_mask = tf.reduce_any(full_mask, axis=-1)
step_mask_exclude_last = tf.pad(step_mask,
[[0, 0], [0, 1]],
constant_values=False)[:, 1:]
num_sequences = common_layers.shape_list(features['task'])[0]
num_steps = common_layers.shape_list(features['task'])[1]
connectors = tf.constant(PADDED_CONCATENATORS)
# Select connectors
connector_indices = tf.random.uniform(
[num_sequences * num_steps], minval=0,
maxval=len(PADDED_CONCATENATORS), dtype=tf.int32)
selected_connectors = tf.reshape(
tf.gather(connectors, connector_indices),
[num_sequences, num_steps, len(PADDED_CONCATENATORS[0])])
selected_connectors = tf.multiply(
selected_connectors,
tf.expand_dims(tf.to_int32(step_mask_exclude_last), 2),
name='connector_mask')
features['task'] = tf.concat([features['task'], selected_connectors], axis=-1)
ref_offsets = tf.expand_dims(
tf.cumsum(tf.reduce_sum(tf.to_int32(tf.greater(features['task'], 1)), -1),
exclusive=True, axis=-1), 2)
features['task'] = tf.reshape(features['task'], [num_sequences, -1])
full_mask = tf.greater(features['task'], 1)
full_mask_int = tf.to_int32(full_mask)
indices = tf.where(tf.sequence_mask(lengths=tf.reduce_sum(full_mask_int, -1)))
values = tf.boolean_mask(tf.reshape(features['task'], [-1]),
tf.reshape(full_mask, [-1]))
sparse_task = tf.sparse.SparseTensor(
indices=indices, values=values,
dense_shape=tf.to_int64(tf.shape(features['task'])))
# Stitch task and raw_task
stitched_features = {}
stitched_features['task'] = tf.sparse_tensor_to_dense(sparse_task)
max_len = tf.reduce_max(
tf.reduce_sum(tf.to_int32(tf.greater(stitched_features['task'], 1)), -1))
stitched_features['task'] = stitched_features['task'][:, :max_len]
if 'raw_task' in features:
connector_strs = tf.reshape(
tf.gather(tf.constant(CONCATENATORS_STR), connector_indices),
[num_sequences, num_steps])
masked_connector_strs = tf.where(
step_mask_exclude_last,
connector_strs, tf.fill(tf.shape(connector_strs), ''))
stitched_features['raw_task'] = tf.strings.reduce_join(
tf.strings.reduce_join(tf.concat([
tf.expand_dims(features['raw_task'], 2),
tf.expand_dims(masked_connector_strs, 2)], axis=2), axis=-1), -1)
# Stitch screen sequences
action_lengths = tf.reduce_sum(tf.to_int32(
tf.greater(features['verb_refs'][:, :, 0, 1],
features['verb_refs'][:, :, 0, 0])), -1)
max_action_length = tf.reduce_max(action_lengths)
def _pad(tensor, padding_value=0):
shape_list = common_layers.shape_list(tensor)
assert len(shape_list) >= 2
padding_list = [[0, 0], [0, 1]] + [[0, 0]] * (len(shape_list) - 2)
return tf.pad(tensor[:, :max_action_length],
padding_list, constant_values=padding_value)
for key in features.keys():
if key.endswith('_refs'):
features[key] = tf.squeeze(features[key], 2)
ref_mask = tf.expand_dims(tf.to_int32(
tf.not_equal(features[key][:, :, 0],
features[key][:, :, 1])), 2)
stitched_features[key] = tf.multiply(
(features[key] + ref_offsets), ref_mask, name='ref_mask')
stitched_features[key] = _pad(stitched_features[key])
elif key in ['verbs', 'objects', 'consumed', 'obj_dom_pos',
'obj_text', 'obj_type', 'obj_clickable', 'obj_screen_pos',
'verb_refs', 'obj_refs', 'input_refs', 'obj_dom_dist']:
features[key] = tf.squeeze(features[key], 2)
stitched_features[key] = features[key]
stitched_features[key] = _pad(
stitched_features[key],
padding_value=-1 if key == 'obj_type' else 0)
elif key not in ['task', 'raw_task']:
stitched_features[key] = features[key][:, 0]
# Append eos to 'task'
stitched_features['task'] = tf.pad(stitched_features['task'],
[[0, 0], [0, 1]])
task_mask = tf.to_int32(tf.greater(stitched_features['task'], 1))
task_eos_mask = tf.pad(task_mask, [[0, 0], [1, 0]], constant_values=1)[:, :-1]
stitched_features['task'] = stitched_features['task'] + (
task_eos_mask - task_mask)
# Append eos
verb_mask = tf.to_int32(tf.greater(stitched_features['verbs'], 1))
verb_eos_mask = tf.pad(verb_mask, [[0, 0], [1, 0]], constant_values=1)[:, :-1]
verb_eos = verb_eos_mask - verb_mask
stitched_features['verbs'] = stitched_features['verbs'] + verb_eos
# Append last step refs to 'verb_refs'
task_lengths = tf.where(tf.equal(stitched_features['task'], 1))[:, 1]
eos_pos = tf.to_int32(tf.stack([task_lengths, task_lengths + 1], axis=1))
action_mask = tf.to_int32(
tf.sequence_mask(action_lengths, max_action_length + 1))
action_and_eos_mask = tf.pad(action_mask, [[0, 0], [1, 0]],
constant_values=1)[:, :-1]
verb_ref_eos = action_and_eos_mask - action_mask
eos_refs = tf.multiply(
tf.tile(tf.expand_dims(eos_pos, 1), [1, max_action_length + 1, 1]),
tf.expand_dims(verb_ref_eos, 2), name='verb_ref_eos')
stitched_features['verb_refs'] += eos_refs
return stitched_features
def _process_rico_sca(feature_dict, max_range, max_dom_pos,
load_dom_dist=False, load_extra=False, load_screen=True):
"""Processes one_shot feature dictionary.
Args:
feature_dict: feature dictionary
max_range: the max range.
max_dom_pos: the max dom pos.
load_dom_dist: whether to load the dom distance feature.
load_extra: whether to load the extra data for debugging.
load_screen: whether to load the screen features.
Returns:
A processed feature dictionary.
"""
phrase_count = tf.size(feature_dict['obj_desc_position_seq']) // 2
feature = {
'task':
tf.reshape(feature_dict['instruction_word_id_seq'],
[phrase_count, NUM_TOKENS_PER_SYN]),
'input_refs':
tf.reshape(feature_dict['input_str_position_seq'],
[phrase_count, 1, 2]),
'obj_refs':
tf.reshape(feature_dict['obj_desc_position_seq'],
[phrase_count, 1, 2]),
'verb_refs':
tf.reshape(feature_dict['verb_str_position_seq'],
[phrase_count, 1, 2]),
'rule':
tf.reshape(feature_dict['instruction_rule_id'], [phrase_count]),
}
selected_synthetic_action_idx = tf.random_uniform(
shape=(), minval=0, maxval=phrase_count, dtype=tf.int32)
for key in feature:
feature[key] = feature[key][selected_synthetic_action_idx]
if load_extra:
feature['raw_task'] = tf.reshape(
feature_dict['instruction_str'],
[phrase_count])[selected_synthetic_action_idx]
feature['task_id'] = tf.constant('empty_task_id', dtype=tf.string)
if load_screen:
feature['verbs'] = tf.reshape(
feature_dict['verb_id_seq'],
[phrase_count, 1])[selected_synthetic_action_idx]
feature['objects'] = tf.reshape(
feature_dict['ui_target_id_seq'],
[phrase_count, 1])[selected_synthetic_action_idx]
feature['obj_text'] = tf.reshape(feature_dict['ui_obj_word_id_seq'],
[1, -1, NUM_TOKENS_PER_OBJ])
feature['obj_type'] = tf.reshape(
feature_dict['ui_obj_type_id_seq'], [1, -1])
feature['obj_clickable'] = tf.reshape(feature_dict['ui_obj_clickable_seq'],
[1, -1])
def _make_obj_screen_pos():
return tf.concat([
tf.reshape(feature_dict['ui_obj_cord_x_seq'], [1, -1, 2]),
tf.reshape(feature_dict['ui_obj_cord_y_seq'], [1, -1, 2])
], 2)
feature['obj_screen_pos'] = tf.cond(
tf.equal(
tf.size(feature_dict['ui_obj_cord_x_seq']),
0), lambda: tf.fill([1, tf.shape(feature['obj_type'])[1], 4], 0.),
_make_obj_screen_pos)
feature['obj_dom_pos'] = tf.reshape(feature_dict['ui_obj_dom_location_seq'],
[1, -1, 3])
feature['obj_dom_pos'] = tf.minimum(feature['obj_dom_pos'], max_dom_pos - 1)
if load_dom_dist:
num_ui_obj = tf.to_int32(
tf.sqrt(tf.to_float(tf.size(feature_dict['ui_obj_dom_distance']))))
feature['obj_dom_dist'] = tf.reshape(feature_dict['ui_obj_dom_distance'],
[1, num_ui_obj, num_ui_obj])
if load_extra:
feature['obj_raw_text'] = tf.reshape(feature_dict['ui_obj_str_seq'],
[1, -1])
else:
_load_fake_screen(feature, load_extra, load_dom_dist)
_bound_refs(feature, max_range)
feature['data_source'] = tf.constant(0, dtype=tf.int32)
feature['agreement_count'] = tf.constant(100, dtype=tf.int32)
return feature
def _process_pixel_help(feature_dict, data_source, load_dom_dist=False,
load_extra=False):
"""Processes testing data feature dictionary.
Args:
feature_dict: feature dictionary
data_source: TEST_PIXEL_HELP
load_dom_dist: whether to load the dom distance feature.
load_extra: whether to load the extra data for debugging.
Returns:
A processed feature dictionary.
"""
step_num = tf.size(feature_dict['verb_id_seq'])
feature = {
'task':
tf.reshape(feature_dict['instruction_word_id_seq'], [-1]),
'obj_text':
tf.reshape(feature_dict['ui_obj_word_id_seq'], [
step_num, MAX_UI_OBJECT_NUM[data_source],
MAX_TOKEN_NUM[data_source]
]),
'obj_type':
tf.reshape(feature_dict['ui_obj_type_id_seq'],
[step_num, MAX_UI_OBJECT_NUM[data_source]]),
'obj_clickable':
tf.reshape(feature_dict['ui_obj_clickable_seq'],
[step_num, MAX_UI_OBJECT_NUM[data_source]]),
# pylint: disable=g-long-ternary
'obj_screen_pos': (
tf.reshape(tf.concat([
tf.reshape(feature_dict['ui_obj_cord_x_seq'], [step_num, -1, 2]),
tf.reshape(feature_dict['ui_obj_cord_y_seq'], [step_num, -1, 2])
], axis=2), [step_num, MAX_UI_OBJECT_NUM[data_source], 4])),
'obj_dom_pos':
tf.reshape(feature_dict['ui_obj_dom_location_seq'],
[step_num, MAX_UI_OBJECT_NUM[data_source], 3]),
'verbs':
tf.reshape(feature_dict['verb_id_seq'], [step_num]),
'objects':
tf.reshape(feature_dict['ui_target_id_seq'], [step_num]),
'input_refs':
tf.reshape(feature_dict['input_str_position_seq'], [step_num, 2]),
'obj_refs':
tf.reshape(feature_dict['obj_desc_position_seq'], [step_num, 2]),
'verb_refs': # No data for Pixel on the field
tf.zeros([step_num, 2], tf.int32),
'agreement_count':
tf.constant(100, dtype=tf.int32),
}
if load_dom_dist:
feature['obj_dom_dist'] = tf.reshape(
feature_dict['ui_obj_dom_distance'],
[step_num, MAX_UI_OBJECT_NUM[data_source],
MAX_UI_OBJECT_NUM[data_source]])
feature['rule'] = tf.constant(5, dtype=tf.int32)
if load_extra:
feature['task_id'] = tf.reshape(feature_dict['task_id'], [])
feature['raw_task'] = tf.reshape(feature_dict['instruction_str'], [])
feature['obj_raw_text'] = tf.reshape(
feature_dict['ui_obj_str_seq'],
[step_num, MAX_UI_OBJECT_NUM[data_source]])
feature['data_source'] = tf.constant(2, dtype=tf.int32)
return feature
| 42.486877 | 80 | 0.652942 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from enum import Enum
from tensor2tensor.layers import common_layers
import tensorflow.compat.v1 as tf
NUM_TOKENS_PER_OBJ = 30
NUM_TOKENS_PER_SYN = 30
class DataSource(Enum):
RICO_SCA = 'rico_sca'
ANDROID_HOWTO = 'android_howto'
PIXEL_HELP = 'pixel_help'
@staticmethod
def from_str(label):
if label == 'rico_sca':
return DataSource.RICO_SCA
elif label == 'android_howto':
return DataSource.ANDROID_HOWTO
elif label == 'pixel_help':
return DataSource.PIXEL_HELP
else:
raise ValueError('Unrecognized source %s' % label)
MAX_UI_OBJECT_NUM = {
DataSource.PIXEL_HELP: 93,
}
MAX_TOKEN_NUM = {
DataSource.ANDROID_HOWTO: 30,
DataSource.RICO_SCA: 30,
DataSource.PIXEL_HELP: 153,
}
PADDED_CONCATENATORS = [
[5, 0, 0],
[115, 0, 0],
[8, 32, 0],
[115, 8, 32],
[12, 0, 0],
]
CONCATENATORS_STR = [
', ',
' , ',
' and then ',
' , and then ',
'. '
]
def _construct_padding_info(data_source, load_dom_dist, load_extra):
token_num = MAX_TOKEN_NUM[data_source]
anchor_padding_value_int = tf.cast(-1, tf.int32)
padding_value_int = tf.cast(0, tf.int32)
padding_value_str = tf.cast('', tf.string)
padding_info = [
('task', [None], padding_value_int),
('rule', [], padding_value_int),
('verbs', [None], padding_value_int),
('input_refs', [None, 2], padding_value_int),
('obj_refs', [None, 2], padding_value_int),
('verb_refs', [None, 2], padding_value_int),
('objects', [None], padding_value_int),
('obj_text', [None, None, token_num], padding_value_int),
('obj_type', [None, None], anchor_padding_value_int),
('obj_clickable', [None, None], padding_value_int),
('obj_screen_pos', [None, None, 4], tf.cast(0, tf.int32)),
('obj_dom_pos', [None, None, 3], padding_value_int),
('agreement_count', [], padding_value_int),
('data_source', [], padding_value_int),
]
if load_dom_dist:
padding_info.append(('obj_dom_dist', [None, None, None], padding_value_int))
if load_extra:
padding_info.append(('task_id', [], padding_value_str))
padding_info.append(('raw_task', [], padding_value_str))
padding_info.append(('obj_raw_text', [None, None], padding_value_str))
padded_shapes = {}
padded_values = {}
for (key, padding_shape, padding_value) in padding_info:
padded_shapes[key] = padding_shape
padded_values[key] = padding_value
return padded_shapes, padded_values
def input_fn(data_files,
batch_size,
repeat=-1,
data_source=DataSource.RICO_SCA,
required_agreement=2,
max_range=1000,
max_dom_pos=2000,
max_pixel_pos=100,
load_dom_dist=False,
load_extra=False,
buffer_size=8 * 1024,
shuffle_size=8 * 1024,
required_rule_id_list=None,
shuffle_repeat=True,
mean_synthetic_length=1.0,
stddev_synthetic_length=0.0,
load_screen=True,
shuffle_files=True):
if not isinstance(data_source, DataSource):
assert False, 'data_source %s unsupported' % str(data_source)
padded_shapes, padded_values = _construct_padding_info(
data_source, load_dom_dist, load_extra)
if not isinstance(data_files, (list,)):
data_files = [data_files]
all_files = tf.concat(
values=[tf.matching_files(f) for f in data_files], axis=0)
if repeat == -1 and shuffle_files:
all_files = tf.random.shuffle(all_files)
if data_files[0].endswith('.recordio'):
dataset = tf.data.RecordIODataset(all_files)
elif data_files[0].endswith('.tfrecord'):
dataset = tf.data.TFRecordDataset(
all_files, num_parallel_reads=10 if repeat == -1 else None)
else:
assert False, 'Data_format %s is not supported.' % data_files[0]
def _map_fn(x):
return parse_tf_example(x, data_source, max_range, max_dom_pos,
max_pixel_pos, load_dom_dist=load_dom_dist,
load_extra=load_extra,
append_eos=(data_source != DataSource.RICO_SCA or
mean_synthetic_length == 1.0),
load_screen=load_screen)
dataset = dataset.map(_map_fn)
def _is_enough_agreement(example):
return tf.greater_equal(example['agreement_count'], required_agreement)
dataset = dataset.filter(_is_enough_agreement)
def _length_filter(example):
return tf.less(tf.shape(example['obj_refs'])[0], 20)
dataset = dataset.filter(_length_filter)
def _filter_data_by_rule(example, rule_id_list):
return tf.reduce_any(
[tf.equal(example['rule'], rule_id) for rule_id in rule_id_list])
if data_source == DataSource.RICO_SCA and required_rule_id_list is not None:
dataset = dataset.filter(
lambda x: _filter_data_by_rule(x, required_rule_id_list))
if shuffle_repeat:
dataset = dataset.apply(tf.data.experimental.shuffle_and_repeat(
shuffle_size, count=repeat))
dataset = dataset.padded_batch(
batch_size, padded_shapes=padded_shapes, padding_values=padded_values)
if data_source == DataSource.RICO_SCA and mean_synthetic_length > 1.0:
def _stitch_fn(x):
return _batch_stitch(x, mean_length=mean_synthetic_length,
stddev=stddev_synthetic_length)
dataset = dataset.map(_stitch_fn)
dataset = dataset.prefetch(buffer_size=buffer_size)
return dataset
def hybrid_input_fn(data_files_list,
data_source_list,
batch_size_list,
max_range=1000,
max_dom_pos=2000,
max_pixel_pos=100,
load_dom_dist=False,
load_extra=False,
buffer_size=8 * 1024,
mean_synthetic_length=1.0,
stddev_synthetic_length=0.0,
hybrid_batch_size=128,
boost_input=False,
load_screen=True,
shuffle_size=1024):
mixed_dataset = None
for data_files, data_source, batch_size in zip(
data_files_list, data_source_list, batch_size_list):
dataset = input_fn(data_files, batch_size, repeat=-1,
data_source=data_source,
required_agreement=-1,
max_range=max_range, max_dom_pos=max_dom_pos,
max_pixel_pos=max_pixel_pos,
load_dom_dist=load_dom_dist,
load_extra=load_extra,
buffer_size=0,
mean_synthetic_length=mean_synthetic_length,
stddev_synthetic_length=stddev_synthetic_length,
shuffle_repeat=False,
load_screen=load_screen)
if mixed_dataset is None:
mixed_dataset = dataset
else:
mixed_dataset = dataset.concatenate(mixed_dataset)
mixed_dataset = mixed_dataset.unbatch()
if boost_input:
def _input_booster(example):
with tf.control_dependencies([tf.rank(example['input_refs']), 2]):
has_input = tf.reduce_any(
tf.greater(example['input_refs'][:, 1],
example['input_refs'][:, 0]))
return tf.logical_or(has_input, tf.less(tf.random_uniform([]), 0.1))
dataset = dataset.filter(_input_booster)
mixed_dataset = mixed_dataset.shuffle(hybrid_batch_size * shuffle_size)
padded_shapes, padded_values = _construct_padding_info(
data_source_list[0], load_dom_dist, load_extra)
mixed_dataset = mixed_dataset.padded_batch(
hybrid_batch_size, padded_shapes=padded_shapes,
padding_values=padded_values)
mixed_dataset = mixed_dataset.repeat()
mixed_dataset = mixed_dataset.prefetch(buffer_size=buffer_size)
return mixed_dataset
def parse_tf_example(example_proto,
data_source,
max_range=100,
max_dom_pos=2000,
max_pixel_pos=100,
load_dom_dist=False,
load_extra=False,
append_eos=True,
load_screen=True):
feature_spec = {
'instruction_word_id_seq':
tf.FixedLenSequenceFeature([], tf.int64, allow_missing=True),
'input_str_position_seq':
tf.FixedLenSequenceFeature([], tf.int64, allow_missing=True),
'obj_desc_position_seq':
tf.FixedLenSequenceFeature([], tf.int64, allow_missing=True),
'verb_str_position_seq':
tf.FixedLenSequenceFeature([], tf.int64, allow_missing=True),
'agreement_count':
tf.FixedLenSequenceFeature([], tf.int64, allow_missing=True),
'instruction_rule_id':
tf.FixedLenSequenceFeature([], tf.int64, allow_missing=True)
}
if load_screen:
feature_spec['verb_id_seq'] = tf.FixedLenSequenceFeature(
[], tf.int64, allow_missing=True)
feature_spec['ui_target_id_seq'] = tf.FixedLenSequenceFeature(
[], tf.int64, allow_missing=True)
feature_spec['ui_obj_word_id_seq'] = tf.FixedLenSequenceFeature(
[], tf.int64, allow_missing=True)
feature_spec['ui_obj_type_id_seq'] = tf.FixedLenSequenceFeature(
[], tf.int64, allow_missing=True)
feature_spec['ui_obj_clickable_seq'] = tf.FixedLenSequenceFeature(
[], tf.int64, allow_missing=True)
feature_spec['ui_obj_cord_x_seq'] = tf.FixedLenSequenceFeature(
[], tf.float32, allow_missing=True)
feature_spec['ui_obj_cord_y_seq'] = tf.FixedLenSequenceFeature(
[], tf.float32, allow_missing=True)
feature_spec['ui_obj_dom_location_seq'] = tf.FixedLenSequenceFeature(
[], tf.int64, allow_missing=True)
if load_dom_dist:
feature_spec['ui_obj_dom_distance'] = tf.FixedLenSequenceFeature(
[], tf.int64, allow_missing=True)
if load_extra:
feature_spec['instruction_str'] = tf.FixedLenSequenceFeature(
[], tf.string, allow_missing=True)
feature_spec['task_id'] = tf.FixedLenSequenceFeature(
[], tf.string, allow_missing=True)
feature_spec['ui_obj_str_seq'] = tf.FixedLenSequenceFeature(
[], tf.string, allow_missing=True)
feature_dict = tf.parse_single_example(example_proto, feature_spec)
for key in feature_dict:
if feature_dict[key].dtype == tf.int64:
feature_dict[key] = tf.cast(feature_dict[key], tf.int32)
if data_source == DataSource.ANDROID_HOWTO:
tf.logging.info('Parsing android_howto dataset')
feature = _process_android_howto(feature_dict, max_range=max_range,
load_dom_dist=load_dom_dist,
load_extra=load_extra)
elif data_source == DataSource.RICO_SCA:
tf.logging.info('Parsing synthetic dataset')
feature = _process_rico_sca(
feature_dict, max_range=max_range, max_dom_pos=max_dom_pos,
load_dom_dist=load_dom_dist,
load_extra=load_extra,
load_screen=load_screen)
elif data_source == DataSource.PIXEL_HELP:
tf.logging.info('Parsing test dataset')
feature = _process_pixel_help(feature_dict, data_source,
load_dom_dist=load_dom_dist,
load_extra=load_extra)
else:
raise ValueError('Unsupported datasource %s' % str(data_source))
feature['task'] = tf.boolean_mask(feature['task'],
tf.not_equal(feature['task'], 0))
feature['obj_screen_pos'] = tf.to_int32(
feature['obj_screen_pos'] * (max_pixel_pos - 1))
if append_eos:
feature['input_refs'] = tf.pad(feature['input_refs'], [[0, 1], [0, 0]])
feature['obj_refs'] = tf.pad(feature['obj_refs'], [[0, 1], [0, 0]])
step_num = tf.size(feature['task'])
feature['verb_refs'] = tf.concat(
[feature['verb_refs'], [[step_num, step_num + 1]]], axis=0)
feature['task'] = tf.pad(feature['task'], [[0, 1]], constant_values=1)
feature['obj_text'] = tf.pad(feature['obj_text'], [[0, 1], [0, 0], [0, 0]])
feature['obj_clickable'] = tf.pad(feature['obj_clickable'],
[[0, 1], [0, 0]])
feature['obj_type'] = tf.pad(
feature['obj_type'], [[0, 1], [0, 0]], constant_values=-1)
feature['obj_screen_pos'] = tf.pad(feature['obj_screen_pos'],
[[0, 1], [0, 0], [0, 0]])
feature['obj_dom_pos'] = tf.pad(feature['obj_dom_pos'],
[[0, 1], [0, 0], [0, 0]])
if load_dom_dist:
feature['obj_dom_dist'] = tf.pad(feature['obj_dom_dist'],
[[0, 1], [0, 0], [0, 0]])
feature['objects'] = tf.pad(feature['objects'], [[0, 1]])
feature['verbs'] = tf.pad(feature['verbs'], [[0, 1]])
return feature
def _bound_refs(feature, max_range):
for key in feature:
if not key.endswith('_refs'):
continue
feature[key] = tf.where(
tf.greater(feature[key][:, 1] - feature[key][:, 0], max_range),
tf.stack([feature[key][:, 0], feature[key][:, 0] + max_range], axis=1),
feature[key])
def _process_android_howto(feature_dict, max_range, load_dom_dist=False,
load_extra=False):
feature = {
'task': tf.reshape(feature_dict['instruction_word_id_seq'], [-1]),
'input_refs': tf.reshape(feature_dict['input_str_position_seq'], [-1, 2]),
'obj_refs': tf.reshape(feature_dict['obj_desc_position_seq'], [-1, 2]),
'verb_refs': tf.reshape(feature_dict['verb_str_position_seq'], [-1, 2]),
'agreement_count': tf.reshape(feature_dict['agreement_count'], [])
}
if load_extra:
feature['task_id'] = tf.constant('empty_task_id', dtype=tf.string)
feature['raw_task'] = tf.reshape(feature_dict['instruction_str'], [])
_bound_refs(feature, max_range)
_load_fake_screen(feature, load_extra, load_dom_dist)
return feature
def _load_fake_screen(feature, load_extra, load_dom_dist):
step_num = tf.shape(feature['verb_refs'])[0]
obj_num = 1
if load_extra:
feature['obj_raw_text'] = tf.fill([step_num, obj_num], '')
feature['data_source'] = tf.constant(1, dtype=tf.int32)
feature['obj_text'] = tf.zeros([step_num, obj_num, NUM_TOKENS_PER_OBJ],
tf.int32)
feature['obj_type'] = tf.cast(tf.fill([step_num, obj_num], -1), tf.int32)
feature['obj_clickable'] = tf.zeros([step_num, obj_num], tf.int32)
feature['obj_screen_pos'] = tf.zeros([step_num, obj_num, 4], tf.float32)
feature['obj_dom_pos'] = tf.zeros([step_num, obj_num, 3], tf.int32)
if load_dom_dist:
feature['obj_dom_dist'] = tf.zeros([step_num, obj_num, obj_num], tf.int32)
feature['objects'] = tf.zeros([step_num], tf.int32)
feature['verbs'] = tf.zeros([step_num], tf.int32)
feature['rule'] = tf.constant(5, dtype=tf.int32)
def _batch_stitch(features, mean_length=4.0, stddev=2.0):
batch_size = common_layers.shape_list(features['task'])[0]
num_sequences = tf.maximum(
tf.to_int32(tf.to_float(batch_size) / mean_length), 1)
lengths = tf.random.truncated_normal(shape=[num_sequences],
mean=mean_length, stddev=stddev)
max_length = tf.reduce_max(lengths) * (
tf.to_float(batch_size) / tf.reduce_sum(lengths))
max_length = tf.to_int32(tf.ceil(max_length))
total_items = max_length * num_sequences
num_paddings = total_items - batch_size
indices = tf.random.shuffle(tf.range(total_items))
for key in features:
shape_list = common_layers.shape_list(features[key])
assert len(shape_list) >= 1
with tf.control_dependencies([
tf.assert_greater_equal(num_paddings, 0,
name='num_paddings_positive')]):
paddings = [[0, num_paddings]] + [[0, 0]] * (len(shape_list) - 1)
features[key] = tf.pad(features[key], paddings,
constant_values=-1 if key == 'obj_type' else 0)
features[key] = tf.gather(features[key], indices)
shape = [num_sequences, max_length]
if len(shape_list) >= 2:
shape += shape_list[1:]
features[key] = tf.reshape(features[key], shape)
step_mask = tf.reduce_any(tf.greater(features['task'], 1), axis=-1)
mask = tf.reduce_any(step_mask, axis=-1)
step_mask = tf.boolean_mask(step_mask, mask)
for key in features:
features[key] = tf.boolean_mask(features[key], mask=mask)
num_sequences = tf.shape(features['task'])[0]
_, step_indices = tf.math.top_k(tf.to_int32(step_mask), k=max_length)
step_indices = step_indices + tf.expand_dims(
tf.range(num_sequences) * max_length, 1)
step_indices = tf.reshape(step_indices, [-1])
for key in features:
shape_list = common_layers.shape_list(features[key])
features[key] = tf.gather(tf.reshape(features[key], [-1] + shape_list[2:]),
step_indices)
features[key] = tf.reshape(features[key], shape_list)
features = _stitch(features)
return features
def _stitch(features):
full_mask = tf.greater(features['task'], 1)
step_mask = tf.reduce_any(full_mask, axis=-1)
step_mask_exclude_last = tf.pad(step_mask,
[[0, 0], [0, 1]],
constant_values=False)[:, 1:]
num_sequences = common_layers.shape_list(features['task'])[0]
num_steps = common_layers.shape_list(features['task'])[1]
connectors = tf.constant(PADDED_CONCATENATORS)
connector_indices = tf.random.uniform(
[num_sequences * num_steps], minval=0,
maxval=len(PADDED_CONCATENATORS), dtype=tf.int32)
selected_connectors = tf.reshape(
tf.gather(connectors, connector_indices),
[num_sequences, num_steps, len(PADDED_CONCATENATORS[0])])
selected_connectors = tf.multiply(
selected_connectors,
tf.expand_dims(tf.to_int32(step_mask_exclude_last), 2),
name='connector_mask')
features['task'] = tf.concat([features['task'], selected_connectors], axis=-1)
ref_offsets = tf.expand_dims(
tf.cumsum(tf.reduce_sum(tf.to_int32(tf.greater(features['task'], 1)), -1),
exclusive=True, axis=-1), 2)
features['task'] = tf.reshape(features['task'], [num_sequences, -1])
full_mask = tf.greater(features['task'], 1)
full_mask_int = tf.to_int32(full_mask)
indices = tf.where(tf.sequence_mask(lengths=tf.reduce_sum(full_mask_int, -1)))
values = tf.boolean_mask(tf.reshape(features['task'], [-1]),
tf.reshape(full_mask, [-1]))
sparse_task = tf.sparse.SparseTensor(
indices=indices, values=values,
dense_shape=tf.to_int64(tf.shape(features['task'])))
stitched_features = {}
stitched_features['task'] = tf.sparse_tensor_to_dense(sparse_task)
max_len = tf.reduce_max(
tf.reduce_sum(tf.to_int32(tf.greater(stitched_features['task'], 1)), -1))
stitched_features['task'] = stitched_features['task'][:, :max_len]
if 'raw_task' in features:
connector_strs = tf.reshape(
tf.gather(tf.constant(CONCATENATORS_STR), connector_indices),
[num_sequences, num_steps])
masked_connector_strs = tf.where(
step_mask_exclude_last,
connector_strs, tf.fill(tf.shape(connector_strs), ''))
stitched_features['raw_task'] = tf.strings.reduce_join(
tf.strings.reduce_join(tf.concat([
tf.expand_dims(features['raw_task'], 2),
tf.expand_dims(masked_connector_strs, 2)], axis=2), axis=-1), -1)
action_lengths = tf.reduce_sum(tf.to_int32(
tf.greater(features['verb_refs'][:, :, 0, 1],
features['verb_refs'][:, :, 0, 0])), -1)
max_action_length = tf.reduce_max(action_lengths)
def _pad(tensor, padding_value=0):
shape_list = common_layers.shape_list(tensor)
assert len(shape_list) >= 2
padding_list = [[0, 0], [0, 1]] + [[0, 0]] * (len(shape_list) - 2)
return tf.pad(tensor[:, :max_action_length],
padding_list, constant_values=padding_value)
for key in features.keys():
if key.endswith('_refs'):
features[key] = tf.squeeze(features[key], 2)
ref_mask = tf.expand_dims(tf.to_int32(
tf.not_equal(features[key][:, :, 0],
features[key][:, :, 1])), 2)
stitched_features[key] = tf.multiply(
(features[key] + ref_offsets), ref_mask, name='ref_mask')
stitched_features[key] = _pad(stitched_features[key])
elif key in ['verbs', 'objects', 'consumed', 'obj_dom_pos',
'obj_text', 'obj_type', 'obj_clickable', 'obj_screen_pos',
'verb_refs', 'obj_refs', 'input_refs', 'obj_dom_dist']:
features[key] = tf.squeeze(features[key], 2)
stitched_features[key] = features[key]
stitched_features[key] = _pad(
stitched_features[key],
padding_value=-1 if key == 'obj_type' else 0)
elif key not in ['task', 'raw_task']:
stitched_features[key] = features[key][:, 0]
stitched_features['task'] = tf.pad(stitched_features['task'],
[[0, 0], [0, 1]])
task_mask = tf.to_int32(tf.greater(stitched_features['task'], 1))
task_eos_mask = tf.pad(task_mask, [[0, 0], [1, 0]], constant_values=1)[:, :-1]
stitched_features['task'] = stitched_features['task'] + (
task_eos_mask - task_mask)
verb_mask = tf.to_int32(tf.greater(stitched_features['verbs'], 1))
verb_eos_mask = tf.pad(verb_mask, [[0, 0], [1, 0]], constant_values=1)[:, :-1]
verb_eos = verb_eos_mask - verb_mask
stitched_features['verbs'] = stitched_features['verbs'] + verb_eos
task_lengths = tf.where(tf.equal(stitched_features['task'], 1))[:, 1]
eos_pos = tf.to_int32(tf.stack([task_lengths, task_lengths + 1], axis=1))
action_mask = tf.to_int32(
tf.sequence_mask(action_lengths, max_action_length + 1))
action_and_eos_mask = tf.pad(action_mask, [[0, 0], [1, 0]],
constant_values=1)[:, :-1]
verb_ref_eos = action_and_eos_mask - action_mask
eos_refs = tf.multiply(
tf.tile(tf.expand_dims(eos_pos, 1), [1, max_action_length + 1, 1]),
tf.expand_dims(verb_ref_eos, 2), name='verb_ref_eos')
stitched_features['verb_refs'] += eos_refs
return stitched_features
def _process_rico_sca(feature_dict, max_range, max_dom_pos,
load_dom_dist=False, load_extra=False, load_screen=True):
phrase_count = tf.size(feature_dict['obj_desc_position_seq']) // 2
feature = {
'task':
tf.reshape(feature_dict['instruction_word_id_seq'],
[phrase_count, NUM_TOKENS_PER_SYN]),
'input_refs':
tf.reshape(feature_dict['input_str_position_seq'],
[phrase_count, 1, 2]),
'obj_refs':
tf.reshape(feature_dict['obj_desc_position_seq'],
[phrase_count, 1, 2]),
'verb_refs':
tf.reshape(feature_dict['verb_str_position_seq'],
[phrase_count, 1, 2]),
'rule':
tf.reshape(feature_dict['instruction_rule_id'], [phrase_count]),
}
selected_synthetic_action_idx = tf.random_uniform(
shape=(), minval=0, maxval=phrase_count, dtype=tf.int32)
for key in feature:
feature[key] = feature[key][selected_synthetic_action_idx]
if load_extra:
feature['raw_task'] = tf.reshape(
feature_dict['instruction_str'],
[phrase_count])[selected_synthetic_action_idx]
feature['task_id'] = tf.constant('empty_task_id', dtype=tf.string)
if load_screen:
feature['verbs'] = tf.reshape(
feature_dict['verb_id_seq'],
[phrase_count, 1])[selected_synthetic_action_idx]
feature['objects'] = tf.reshape(
feature_dict['ui_target_id_seq'],
[phrase_count, 1])[selected_synthetic_action_idx]
feature['obj_text'] = tf.reshape(feature_dict['ui_obj_word_id_seq'],
[1, -1, NUM_TOKENS_PER_OBJ])
feature['obj_type'] = tf.reshape(
feature_dict['ui_obj_type_id_seq'], [1, -1])
feature['obj_clickable'] = tf.reshape(feature_dict['ui_obj_clickable_seq'],
[1, -1])
def _make_obj_screen_pos():
return tf.concat([
tf.reshape(feature_dict['ui_obj_cord_x_seq'], [1, -1, 2]),
tf.reshape(feature_dict['ui_obj_cord_y_seq'], [1, -1, 2])
], 2)
feature['obj_screen_pos'] = tf.cond(
tf.equal(
tf.size(feature_dict['ui_obj_cord_x_seq']),
0), lambda: tf.fill([1, tf.shape(feature['obj_type'])[1], 4], 0.),
_make_obj_screen_pos)
feature['obj_dom_pos'] = tf.reshape(feature_dict['ui_obj_dom_location_seq'],
[1, -1, 3])
feature['obj_dom_pos'] = tf.minimum(feature['obj_dom_pos'], max_dom_pos - 1)
if load_dom_dist:
num_ui_obj = tf.to_int32(
tf.sqrt(tf.to_float(tf.size(feature_dict['ui_obj_dom_distance']))))
feature['obj_dom_dist'] = tf.reshape(feature_dict['ui_obj_dom_distance'],
[1, num_ui_obj, num_ui_obj])
if load_extra:
feature['obj_raw_text'] = tf.reshape(feature_dict['ui_obj_str_seq'],
[1, -1])
else:
_load_fake_screen(feature, load_extra, load_dom_dist)
_bound_refs(feature, max_range)
feature['data_source'] = tf.constant(0, dtype=tf.int32)
feature['agreement_count'] = tf.constant(100, dtype=tf.int32)
return feature
def _process_pixel_help(feature_dict, data_source, load_dom_dist=False,
load_extra=False):
step_num = tf.size(feature_dict['verb_id_seq'])
feature = {
'task':
tf.reshape(feature_dict['instruction_word_id_seq'], [-1]),
'obj_text':
tf.reshape(feature_dict['ui_obj_word_id_seq'], [
step_num, MAX_UI_OBJECT_NUM[data_source],
MAX_TOKEN_NUM[data_source]
]),
'obj_type':
tf.reshape(feature_dict['ui_obj_type_id_seq'],
[step_num, MAX_UI_OBJECT_NUM[data_source]]),
'obj_clickable':
tf.reshape(feature_dict['ui_obj_clickable_seq'],
[step_num, MAX_UI_OBJECT_NUM[data_source]]),
'obj_screen_pos': (
tf.reshape(tf.concat([
tf.reshape(feature_dict['ui_obj_cord_x_seq'], [step_num, -1, 2]),
tf.reshape(feature_dict['ui_obj_cord_y_seq'], [step_num, -1, 2])
], axis=2), [step_num, MAX_UI_OBJECT_NUM[data_source], 4])),
'obj_dom_pos':
tf.reshape(feature_dict['ui_obj_dom_location_seq'],
[step_num, MAX_UI_OBJECT_NUM[data_source], 3]),
'verbs':
tf.reshape(feature_dict['verb_id_seq'], [step_num]),
'objects':
tf.reshape(feature_dict['ui_target_id_seq'], [step_num]),
'input_refs':
tf.reshape(feature_dict['input_str_position_seq'], [step_num, 2]),
'obj_refs':
tf.reshape(feature_dict['obj_desc_position_seq'], [step_num, 2]),
'verb_refs':
tf.zeros([step_num, 2], tf.int32),
'agreement_count':
tf.constant(100, dtype=tf.int32),
}
if load_dom_dist:
feature['obj_dom_dist'] = tf.reshape(
feature_dict['ui_obj_dom_distance'],
[step_num, MAX_UI_OBJECT_NUM[data_source],
MAX_UI_OBJECT_NUM[data_source]])
feature['rule'] = tf.constant(5, dtype=tf.int32)
if load_extra:
feature['task_id'] = tf.reshape(feature_dict['task_id'], [])
feature['raw_task'] = tf.reshape(feature_dict['instruction_str'], [])
feature['obj_raw_text'] = tf.reshape(
feature_dict['ui_obj_str_seq'],
[step_num, MAX_UI_OBJECT_NUM[data_source]])
feature['data_source'] = tf.constant(2, dtype=tf.int32)
return feature
| true | true |
f739b352172edd66940a05e6d089bdf9743cb12f | 1,110 | py | Python | jdcloud_sdk/services/waf/models/KeyValCfg.py | Tanc009/jdcloud-sdk-python | 8b045c99bc5b73ca7348e950b6f01e03a27982f5 | [
"Apache-2.0"
] | 14 | 2018-04-19T09:53:56.000Z | 2022-01-27T06:05:48.000Z | jdcloud_sdk/services/waf/models/KeyValCfg.py | Tanc009/jdcloud-sdk-python | 8b045c99bc5b73ca7348e950b6f01e03a27982f5 | [
"Apache-2.0"
] | 15 | 2018-09-11T05:39:54.000Z | 2021-07-02T12:38:02.000Z | jdcloud_sdk/services/waf/models/KeyValCfg.py | Tanc009/jdcloud-sdk-python | 8b045c99bc5b73ca7348e950b6f01e03a27982f5 | [
"Apache-2.0"
] | 33 | 2018-04-20T05:29:16.000Z | 2022-02-17T09:10:05.000Z | # coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class KeyValCfg(object):
def __init__(self, key, val, atCfg, id=None, matchOp=None):
"""
:param id: (Optional) 序号id,更新时不能为空
:param matchOp: (Optional) 0-5 完全匹配0 前缀匹配1 包含2 正则3 大于4 后缀5
:param key: cookie key
:param val: val
:param atCfg: action配置
"""
self.id = id
self.matchOp = matchOp
self.key = key
self.val = val
self.atCfg = atCfg
| 30.833333 | 75 | 0.673874 |
class KeyValCfg(object):
def __init__(self, key, val, atCfg, id=None, matchOp=None):
self.id = id
self.matchOp = matchOp
self.key = key
self.val = val
self.atCfg = atCfg
| true | true |
f739b4d5394ea28e6767e0a78ff090af422b45e3 | 79 | py | Python | refresh.py | abagh0703/RetailTrail | cbca3c052523c52935066c5585e5dd2f1c6b4b1e | [
"MIT"
] | null | null | null | refresh.py | abagh0703/RetailTrail | cbca3c052523c52935066c5585e5dd2f1c6b4b1e | [
"MIT"
] | null | null | null | refresh.py | abagh0703/RetailTrail | cbca3c052523c52935066c5585e5dd2f1c6b4b1e | [
"MIT"
] | null | null | null | #!flask/bin/python
from app.views import update_all_sheets
update_all_sheets(); | 26.333333 | 39 | 0.835443 |
from app.views import update_all_sheets
update_all_sheets(); | true | true |
f739b57c6f533d8d403860c1962bebbc5e0b02d3 | 4,993 | py | Python | src/features/build_features.py | cafe-com-analytics/stock_market_index_daily_direction | e05eced04d3f0ae3134315de0163bfdf140c1e4a | [
"MIT"
] | null | null | null | src/features/build_features.py | cafe-com-analytics/stock_market_index_daily_direction | e05eced04d3f0ae3134315de0163bfdf140c1e4a | [
"MIT"
] | 8 | 2021-05-30T21:32:35.000Z | 2021-06-14T23:43:42.000Z | src/features/build_features.py | cafe-com-analytics/stock_market_index_daily_direction | e05eced04d3f0ae3134315de0163bfdf140c1e4a | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
import yfinance as yf
def downloading_stocks_data(dct, start_date: str = "2021-01-01", end_date: str = "2021-07-01") -> pd.DataFrame:
"""
Download the stocks daily information from tickers listed as keys of a dictionary, gets only "Close" price from
each day within start_date and end_date.
Args:
dct (dict): format {'ticker': {'name': name, etc}}
start_date (str, optional): [description]. Defaults to "2011-01-01".
end_date (str, optional): [description]. Defaults to "2022-01-01".
Returns:
pd.DataFrame: dataframe of close prices of each ticker.
"""
df = yf.download(list(dct.keys())[0], start=start_date, end=end_date, show_errors=False)[["Close"]]
df.columns = [dct[list(dct.keys())[0]]["name"]]
for market_index in list(dct.keys())[1:]:
df_temp = yf.download(market_index, start=start_date, end=end_date)[["Close"]]
df_temp.columns = [dct[market_index]["name"]]
df = df.merge(df_temp, how='left', left_index=True, right_index=True)
df.dropna(how='all', axis=0, inplace=True)
df.fillna(method='ffill', inplace=True)
df.fillna(method='bfill', inplace=True)
return df
def daily_return(df, lst_columns: list = 'all') -> pd.DataFrame:
"""
Return the daily return of the lst_columns.
"""
if lst_columns == 'all':
lst_columns = df.columns.tolist()
elif isinstance(lst_columns, list):
pass
else:
lst_columns = list(lst_columns)
for column in lst_columns:
df[column] = (np.log(df[column]) - np.log(df[column].shift(periods=1)))*100
df.dropna(axis=0, how='all', inplace=True)
return df
def return_in_period(df, lst_columns: list = 'all') -> pd.DataFrame:
"""
Return the return of the lst_columns.
"""
if lst_columns == 'all':
lst_columns = df.columns.tolist()
elif isinstance(lst_columns, list):
pass
else:
lst_columns = list(lst_columns)
for column in lst_columns:
df[column] = df[column]/df[column][0]
return df
def create_shifted_rt(df, rts: list, column_name: str = 'Close') -> pd.DataFrame:
"""
Return a dataframe with new lagged columns according to a rts' list.
Args:
df (pd.DataFrame): [description]
rts (list): list with int values. Each value represents a lag in period.
column_name (str, optional): [description]. Defaults to 'Close'.
Returns:
pd.DataFrame: [description]
"""
for t in rts:
df[f"rt-{t}"] = df[column_name].shift(periods=t)
return df
def uniform_clustering(df: pd.DataFrame, lst_columns: list = 'all') -> pd.DataFrame:
"""This function creates the target "Cluster" according to the limits described in (2011, Zuo and Kita)."""
if lst_columns == 'all':
lst_columns = df.columns.tolist()
elif isinstance(lst_columns, list):
pass
else:
lst_columns = list(lst_columns)
for column in lst_columns:
conditions = [
df[column] < -1.12,
(df[column] >= -1.12) & (df[column] < -0.42),
(df[column] >= -0.42) & (df[column] < 0),
(df[column] >= 0) & (df[column] < 0.44),
(df[column] >= 0.44) & (df[column] < 1.07),
df[column] >= 1.07]
choices = [1, 2, 3, 4, 5, 6]
df["cluster_"+column] = np.select(conditions, choices, default=np.nan)
return df
def binary_clustering(df: pd.DataFrame, lst_columns: list = 'all') -> pd.DataFrame:
"""
This function creates the target "Cluster" according to the limits described in article.
Args:
df (pd.DataFrame): [description]
lst_columns (list): [description]
Returns:
pd.DataFrame: return 'cluster_'+column with values 1 for positive return and 0 for equal or below zero.
"""
if lst_columns == 'all':
lst_columns = df.columns.tolist()
elif isinstance(lst_columns, list):
pass
else:
lst_columns = list(lst_columns)
for column in lst_columns:
df["cluster_"+column] = np.where(df[column] > 0, 1, 0)
return df
def boxplot_clustering(df: pd.DataFrame, lst_columns: list = 'all') -> pd.DataFrame:
if lst_columns == 'all':
lst_columns = df.columns.tolist()
elif isinstance(lst_columns, list):
pass
else:
lst_columns = list(lst_columns)
df_boxplot = df.describe().T
quartile_1 = df_boxplot["25%"][0]
quartile_2 = df_boxplot["50%"][0]
quartile_3 = df_boxplot["75%"][0]
for column in lst_columns:
conditions = [
(df[column] < quartile_1),
(df[column] >= quartile_1) & (df[column] < quartile_2),
(df[column] >= quartile_2) & (df[column] < quartile_3),
(df[column] >= quartile_3)]
choices = [int(1), int(2), int(3), int(4)]
df["cluster_"+column] = np.select(conditions, choices, default=np.nan)
return df
| 31.402516 | 115 | 0.612658 | import numpy as np
import pandas as pd
import yfinance as yf
def downloading_stocks_data(dct, start_date: str = "2021-01-01", end_date: str = "2021-07-01") -> pd.DataFrame:
df = yf.download(list(dct.keys())[0], start=start_date, end=end_date, show_errors=False)[["Close"]]
df.columns = [dct[list(dct.keys())[0]]["name"]]
for market_index in list(dct.keys())[1:]:
df_temp = yf.download(market_index, start=start_date, end=end_date)[["Close"]]
df_temp.columns = [dct[market_index]["name"]]
df = df.merge(df_temp, how='left', left_index=True, right_index=True)
df.dropna(how='all', axis=0, inplace=True)
df.fillna(method='ffill', inplace=True)
df.fillna(method='bfill', inplace=True)
return df
def daily_return(df, lst_columns: list = 'all') -> pd.DataFrame:
if lst_columns == 'all':
lst_columns = df.columns.tolist()
elif isinstance(lst_columns, list):
pass
else:
lst_columns = list(lst_columns)
for column in lst_columns:
df[column] = (np.log(df[column]) - np.log(df[column].shift(periods=1)))*100
df.dropna(axis=0, how='all', inplace=True)
return df
def return_in_period(df, lst_columns: list = 'all') -> pd.DataFrame:
if lst_columns == 'all':
lst_columns = df.columns.tolist()
elif isinstance(lst_columns, list):
pass
else:
lst_columns = list(lst_columns)
for column in lst_columns:
df[column] = df[column]/df[column][0]
return df
def create_shifted_rt(df, rts: list, column_name: str = 'Close') -> pd.DataFrame:
for t in rts:
df[f"rt-{t}"] = df[column_name].shift(periods=t)
return df
def uniform_clustering(df: pd.DataFrame, lst_columns: list = 'all') -> pd.DataFrame:
if lst_columns == 'all':
lst_columns = df.columns.tolist()
elif isinstance(lst_columns, list):
pass
else:
lst_columns = list(lst_columns)
for column in lst_columns:
conditions = [
df[column] < -1.12,
(df[column] >= -1.12) & (df[column] < -0.42),
(df[column] >= -0.42) & (df[column] < 0),
(df[column] >= 0) & (df[column] < 0.44),
(df[column] >= 0.44) & (df[column] < 1.07),
df[column] >= 1.07]
choices = [1, 2, 3, 4, 5, 6]
df["cluster_"+column] = np.select(conditions, choices, default=np.nan)
return df
def binary_clustering(df: pd.DataFrame, lst_columns: list = 'all') -> pd.DataFrame:
if lst_columns == 'all':
lst_columns = df.columns.tolist()
elif isinstance(lst_columns, list):
pass
else:
lst_columns = list(lst_columns)
for column in lst_columns:
df["cluster_"+column] = np.where(df[column] > 0, 1, 0)
return df
def boxplot_clustering(df: pd.DataFrame, lst_columns: list = 'all') -> pd.DataFrame:
if lst_columns == 'all':
lst_columns = df.columns.tolist()
elif isinstance(lst_columns, list):
pass
else:
lst_columns = list(lst_columns)
df_boxplot = df.describe().T
quartile_1 = df_boxplot["25%"][0]
quartile_2 = df_boxplot["50%"][0]
quartile_3 = df_boxplot["75%"][0]
for column in lst_columns:
conditions = [
(df[column] < quartile_1),
(df[column] >= quartile_1) & (df[column] < quartile_2),
(df[column] >= quartile_2) & (df[column] < quartile_3),
(df[column] >= quartile_3)]
choices = [int(1), int(2), int(3), int(4)]
df["cluster_"+column] = np.select(conditions, choices, default=np.nan)
return df
| true | true |
f739b8a64cad0334355a6d8a82943c315faa60c0 | 9,878 | py | Python | tests/components/greeneye_monitor/test_sensor.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 30,023 | 2016-04-13T10:17:53.000Z | 2020-03-02T12:56:31.000Z | tests/components/greeneye_monitor/test_sensor.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 24,710 | 2016-04-13T08:27:26.000Z | 2020-03-02T12:59:13.000Z | tests/components/greeneye_monitor/test_sensor.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 11,956 | 2016-04-13T18:42:31.000Z | 2020-03-02T09:32:12.000Z | """Tests for greeneye_monitor sensors."""
from unittest.mock import AsyncMock
from homeassistant.components.greeneye_monitor.sensor import (
DATA_PULSES,
DATA_WATT_SECONDS,
)
from homeassistant.const import STATE_UNKNOWN
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_registry import (
RegistryEntryDisabler,
async_get as get_entity_registry,
)
from .common import (
MULTI_MONITOR_CONFIG,
SINGLE_MONITOR_CONFIG_POWER_SENSORS,
SINGLE_MONITOR_CONFIG_PULSE_COUNTERS,
SINGLE_MONITOR_CONFIG_TEMPERATURE_SENSORS,
SINGLE_MONITOR_CONFIG_VOLTAGE_SENSORS,
SINGLE_MONITOR_SERIAL_NUMBER,
connect_monitor,
setup_greeneye_monitor_component_with_config,
)
from .conftest import assert_sensor_state
async def test_sensor_does_not_exist_before_monitor_connected(
hass: HomeAssistant, monitors: AsyncMock
) -> None:
"""Test that a sensor does not exist before its monitor is connected."""
# The sensor base class handles connecting the monitor, so we test this with a single voltage sensor for ease
await setup_greeneye_monitor_component_with_config(
hass, SINGLE_MONITOR_CONFIG_VOLTAGE_SENSORS
)
entity_registry = get_entity_registry(hass)
assert entity_registry.async_get("sensor.voltage_1") is None
async def test_sensors_created_when_monitor_connected(
hass: HomeAssistant, monitors: AsyncMock
) -> None:
"""Test that sensors get created when the monitor first connects."""
# The sensor base class handles updating the state on connection, so we test this with a single voltage sensor for ease
await setup_greeneye_monitor_component_with_config(
hass, SINGLE_MONITOR_CONFIG_VOLTAGE_SENSORS
)
assert len(monitors.listeners) == 1
await connect_monitor(hass, monitors, SINGLE_MONITOR_SERIAL_NUMBER)
assert len(monitors.listeners) == 0 # Make sure we cleaned up the listener
assert_sensor_state(hass, "sensor.voltage_1", "120.0")
async def test_sensors_created_during_setup_if_monitor_already_connected(
hass: HomeAssistant, monitors: AsyncMock
) -> None:
"""Test that sensors get created during setup if the monitor happens to connect really quickly."""
# The sensor base class handles updating the state on connection, so we test this with a single voltage sensor for ease
await connect_monitor(hass, monitors, SINGLE_MONITOR_SERIAL_NUMBER)
await setup_greeneye_monitor_component_with_config(
hass, SINGLE_MONITOR_CONFIG_VOLTAGE_SENSORS
)
assert len(monitors.listeners) == 0 # Make sure we cleaned up the listener
assert_sensor_state(hass, "sensor.voltage_1", "120.0")
async def test_disable_sensor_after_monitor_connected(
hass: HomeAssistant, monitors: AsyncMock
) -> None:
"""Test that a sensor disabled after its monitor connected stops listening for sensor changes."""
# The sensor base class handles connecting the monitor, so we test this with a single voltage sensor for ease
await setup_greeneye_monitor_component_with_config(
hass, SINGLE_MONITOR_CONFIG_VOLTAGE_SENSORS
)
monitor = await connect_monitor(hass, monitors, SINGLE_MONITOR_SERIAL_NUMBER)
assert len(monitor.voltage_sensor.listeners) == 1
await disable_entity(hass, "sensor.voltage_1")
assert len(monitor.voltage_sensor.listeners) == 0
async def test_updates_state_when_sensor_pushes(
hass: HomeAssistant, monitors: AsyncMock
) -> None:
"""Test that a sensor entity updates its state when the underlying sensor pushes an update."""
# The sensor base class handles triggering state updates, so we test this with a single voltage sensor for ease
await setup_greeneye_monitor_component_with_config(
hass, SINGLE_MONITOR_CONFIG_VOLTAGE_SENSORS
)
monitor = await connect_monitor(hass, monitors, SINGLE_MONITOR_SERIAL_NUMBER)
assert_sensor_state(hass, "sensor.voltage_1", "120.0")
monitor.voltage_sensor.voltage = 119.8
monitor.voltage_sensor.notify_all_listeners()
assert_sensor_state(hass, "sensor.voltage_1", "119.8")
async def test_power_sensor_initially_unknown(
hass: HomeAssistant, monitors: AsyncMock
) -> None:
"""Test that the power sensor can handle its initial state being unknown (since the GEM API needs at least two packets to arrive before it can compute watts)."""
await setup_greeneye_monitor_component_with_config(
hass, SINGLE_MONITOR_CONFIG_POWER_SENSORS
)
await connect_monitor(hass, monitors, SINGLE_MONITOR_SERIAL_NUMBER)
assert_sensor_state(
hass, "sensor.channel_1", STATE_UNKNOWN, {DATA_WATT_SECONDS: 1000}
)
# This sensor was configured with net metering on, so we should be taking the
# polarized value
assert_sensor_state(
hass, "sensor.channel_two", STATE_UNKNOWN, {DATA_WATT_SECONDS: -400}
)
async def test_power_sensor(hass: HomeAssistant, monitors: AsyncMock) -> None:
"""Test that a power sensor reports its values correctly, including handling net metering."""
await setup_greeneye_monitor_component_with_config(
hass, SINGLE_MONITOR_CONFIG_POWER_SENSORS
)
monitor = await connect_monitor(hass, monitors, SINGLE_MONITOR_SERIAL_NUMBER)
monitor.channels[0].watts = 120.0
monitor.channels[1].watts = 120.0
monitor.channels[0].notify_all_listeners()
monitor.channels[1].notify_all_listeners()
assert_sensor_state(hass, "sensor.channel_1", "120.0", {DATA_WATT_SECONDS: 1000})
# This sensor was configured with net metering on, so we should be taking the
# polarized value
assert_sensor_state(hass, "sensor.channel_two", "120.0", {DATA_WATT_SECONDS: -400})
async def test_pulse_counter_initially_unknown(
hass: HomeAssistant, monitors: AsyncMock
) -> None:
"""Test that the pulse counter sensor can handle its initial state being unknown (since the GEM API needs at least two packets to arrive before it can compute pulses per time)."""
await setup_greeneye_monitor_component_with_config(
hass, SINGLE_MONITOR_CONFIG_PULSE_COUNTERS
)
monitor = await connect_monitor(hass, monitors, SINGLE_MONITOR_SERIAL_NUMBER)
monitor.pulse_counters[0].pulses_per_second = None
monitor.pulse_counters[1].pulses_per_second = None
monitor.pulse_counters[2].pulses_per_second = None
monitor.pulse_counters[0].notify_all_listeners()
monitor.pulse_counters[1].notify_all_listeners()
monitor.pulse_counters[2].notify_all_listeners()
assert_sensor_state(hass, "sensor.pulse_a", STATE_UNKNOWN, {DATA_PULSES: 1000})
# This counter was configured with each pulse meaning 0.5 gallons and
# wanting to show gallons per minute, so 10 pulses per second -> 300 gal/min
assert_sensor_state(hass, "sensor.pulse_2", STATE_UNKNOWN, {DATA_PULSES: 1000})
# This counter was configured with each pulse meaning 0.5 gallons and
# wanting to show gallons per hour, so 10 pulses per second -> 18000 gal/hr
assert_sensor_state(hass, "sensor.pulse_3", STATE_UNKNOWN, {DATA_PULSES: 1000})
async def test_pulse_counter(hass: HomeAssistant, monitors: AsyncMock) -> None:
"""Test that a pulse counter sensor reports its values properly, including calculating different units."""
await setup_greeneye_monitor_component_with_config(
hass, SINGLE_MONITOR_CONFIG_PULSE_COUNTERS
)
await connect_monitor(hass, monitors, SINGLE_MONITOR_SERIAL_NUMBER)
assert_sensor_state(hass, "sensor.pulse_a", "10.0", {DATA_PULSES: 1000})
# This counter was configured with each pulse meaning 0.5 gallons and
# wanting to show gallons per minute, so 10 pulses per second -> 300 gal/min
assert_sensor_state(hass, "sensor.pulse_2", "300.0", {DATA_PULSES: 1000})
# This counter was configured with each pulse meaning 0.5 gallons and
# wanting to show gallons per hour, so 10 pulses per second -> 18000 gal/hr
assert_sensor_state(hass, "sensor.pulse_3", "18000.0", {DATA_PULSES: 1000})
async def test_temperature_sensor(hass: HomeAssistant, monitors: AsyncMock) -> None:
"""Test that a temperature sensor reports its values properly, including proper handling of when its native unit is different from that configured in hass."""
await setup_greeneye_monitor_component_with_config(
hass, SINGLE_MONITOR_CONFIG_TEMPERATURE_SENSORS
)
await connect_monitor(hass, monitors, SINGLE_MONITOR_SERIAL_NUMBER)
# The config says that the sensor is reporting in Fahrenheit; if we set that up
# properly, HA will have converted that to Celsius by default.
assert_sensor_state(hass, "sensor.temp_a", "0.0")
async def test_voltage_sensor(hass: HomeAssistant, monitors: AsyncMock) -> None:
"""Test that a voltage sensor reports its values properly."""
await setup_greeneye_monitor_component_with_config(
hass, SINGLE_MONITOR_CONFIG_VOLTAGE_SENSORS
)
await connect_monitor(hass, monitors, SINGLE_MONITOR_SERIAL_NUMBER)
assert_sensor_state(hass, "sensor.voltage_1", "120.0")
async def test_multi_monitor_sensors(hass: HomeAssistant, monitors: AsyncMock) -> None:
"""Test that sensors still work when multiple monitors are registered."""
await setup_greeneye_monitor_component_with_config(hass, MULTI_MONITOR_CONFIG)
await connect_monitor(hass, monitors, 1)
await connect_monitor(hass, monitors, 2)
await connect_monitor(hass, monitors, 3)
assert_sensor_state(hass, "sensor.unit_1_temp_1", "32.0")
assert_sensor_state(hass, "sensor.unit_2_temp_1", "0.0")
assert_sensor_state(hass, "sensor.unit_3_temp_1", "32.0")
async def disable_entity(hass: HomeAssistant, entity_id: str) -> None:
"""Disable the given entity."""
entity_registry = get_entity_registry(hass)
entity_registry.async_update_entity(
entity_id, disabled_by=RegistryEntryDisabler.USER
)
await hass.async_block_till_done()
| 46.815166 | 183 | 0.766147 | from unittest.mock import AsyncMock
from homeassistant.components.greeneye_monitor.sensor import (
DATA_PULSES,
DATA_WATT_SECONDS,
)
from homeassistant.const import STATE_UNKNOWN
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_registry import (
RegistryEntryDisabler,
async_get as get_entity_registry,
)
from .common import (
MULTI_MONITOR_CONFIG,
SINGLE_MONITOR_CONFIG_POWER_SENSORS,
SINGLE_MONITOR_CONFIG_PULSE_COUNTERS,
SINGLE_MONITOR_CONFIG_TEMPERATURE_SENSORS,
SINGLE_MONITOR_CONFIG_VOLTAGE_SENSORS,
SINGLE_MONITOR_SERIAL_NUMBER,
connect_monitor,
setup_greeneye_monitor_component_with_config,
)
from .conftest import assert_sensor_state
async def test_sensor_does_not_exist_before_monitor_connected(
hass: HomeAssistant, monitors: AsyncMock
) -> None:
await setup_greeneye_monitor_component_with_config(
hass, SINGLE_MONITOR_CONFIG_VOLTAGE_SENSORS
)
entity_registry = get_entity_registry(hass)
assert entity_registry.async_get("sensor.voltage_1") is None
async def test_sensors_created_when_monitor_connected(
hass: HomeAssistant, monitors: AsyncMock
) -> None:
await setup_greeneye_monitor_component_with_config(
hass, SINGLE_MONITOR_CONFIG_VOLTAGE_SENSORS
)
assert len(monitors.listeners) == 1
await connect_monitor(hass, monitors, SINGLE_MONITOR_SERIAL_NUMBER)
assert len(monitors.listeners) == 0
assert_sensor_state(hass, "sensor.voltage_1", "120.0")
async def test_sensors_created_during_setup_if_monitor_already_connected(
hass: HomeAssistant, monitors: AsyncMock
) -> None:
await connect_monitor(hass, monitors, SINGLE_MONITOR_SERIAL_NUMBER)
await setup_greeneye_monitor_component_with_config(
hass, SINGLE_MONITOR_CONFIG_VOLTAGE_SENSORS
)
assert len(monitors.listeners) == 0
assert_sensor_state(hass, "sensor.voltage_1", "120.0")
async def test_disable_sensor_after_monitor_connected(
hass: HomeAssistant, monitors: AsyncMock
) -> None:
await setup_greeneye_monitor_component_with_config(
hass, SINGLE_MONITOR_CONFIG_VOLTAGE_SENSORS
)
monitor = await connect_monitor(hass, monitors, SINGLE_MONITOR_SERIAL_NUMBER)
assert len(monitor.voltage_sensor.listeners) == 1
await disable_entity(hass, "sensor.voltage_1")
assert len(monitor.voltage_sensor.listeners) == 0
async def test_updates_state_when_sensor_pushes(
hass: HomeAssistant, monitors: AsyncMock
) -> None:
await setup_greeneye_monitor_component_with_config(
hass, SINGLE_MONITOR_CONFIG_VOLTAGE_SENSORS
)
monitor = await connect_monitor(hass, monitors, SINGLE_MONITOR_SERIAL_NUMBER)
assert_sensor_state(hass, "sensor.voltage_1", "120.0")
monitor.voltage_sensor.voltage = 119.8
monitor.voltage_sensor.notify_all_listeners()
assert_sensor_state(hass, "sensor.voltage_1", "119.8")
async def test_power_sensor_initially_unknown(
hass: HomeAssistant, monitors: AsyncMock
) -> None:
await setup_greeneye_monitor_component_with_config(
hass, SINGLE_MONITOR_CONFIG_POWER_SENSORS
)
await connect_monitor(hass, monitors, SINGLE_MONITOR_SERIAL_NUMBER)
assert_sensor_state(
hass, "sensor.channel_1", STATE_UNKNOWN, {DATA_WATT_SECONDS: 1000}
)
assert_sensor_state(
hass, "sensor.channel_two", STATE_UNKNOWN, {DATA_WATT_SECONDS: -400}
)
async def test_power_sensor(hass: HomeAssistant, monitors: AsyncMock) -> None:
await setup_greeneye_monitor_component_with_config(
hass, SINGLE_MONITOR_CONFIG_POWER_SENSORS
)
monitor = await connect_monitor(hass, monitors, SINGLE_MONITOR_SERIAL_NUMBER)
monitor.channels[0].watts = 120.0
monitor.channels[1].watts = 120.0
monitor.channels[0].notify_all_listeners()
monitor.channels[1].notify_all_listeners()
assert_sensor_state(hass, "sensor.channel_1", "120.0", {DATA_WATT_SECONDS: 1000})
assert_sensor_state(hass, "sensor.channel_two", "120.0", {DATA_WATT_SECONDS: -400})
async def test_pulse_counter_initially_unknown(
hass: HomeAssistant, monitors: AsyncMock
) -> None:
await setup_greeneye_monitor_component_with_config(
hass, SINGLE_MONITOR_CONFIG_PULSE_COUNTERS
)
monitor = await connect_monitor(hass, monitors, SINGLE_MONITOR_SERIAL_NUMBER)
monitor.pulse_counters[0].pulses_per_second = None
monitor.pulse_counters[1].pulses_per_second = None
monitor.pulse_counters[2].pulses_per_second = None
monitor.pulse_counters[0].notify_all_listeners()
monitor.pulse_counters[1].notify_all_listeners()
monitor.pulse_counters[2].notify_all_listeners()
assert_sensor_state(hass, "sensor.pulse_a", STATE_UNKNOWN, {DATA_PULSES: 1000})
assert_sensor_state(hass, "sensor.pulse_2", STATE_UNKNOWN, {DATA_PULSES: 1000})
assert_sensor_state(hass, "sensor.pulse_3", STATE_UNKNOWN, {DATA_PULSES: 1000})
async def test_pulse_counter(hass: HomeAssistant, monitors: AsyncMock) -> None:
await setup_greeneye_monitor_component_with_config(
hass, SINGLE_MONITOR_CONFIG_PULSE_COUNTERS
)
await connect_monitor(hass, monitors, SINGLE_MONITOR_SERIAL_NUMBER)
assert_sensor_state(hass, "sensor.pulse_a", "10.0", {DATA_PULSES: 1000})
assert_sensor_state(hass, "sensor.pulse_2", "300.0", {DATA_PULSES: 1000})
assert_sensor_state(hass, "sensor.pulse_3", "18000.0", {DATA_PULSES: 1000})
async def test_temperature_sensor(hass: HomeAssistant, monitors: AsyncMock) -> None:
await setup_greeneye_monitor_component_with_config(
hass, SINGLE_MONITOR_CONFIG_TEMPERATURE_SENSORS
)
await connect_monitor(hass, monitors, SINGLE_MONITOR_SERIAL_NUMBER)
assert_sensor_state(hass, "sensor.temp_a", "0.0")
async def test_voltage_sensor(hass: HomeAssistant, monitors: AsyncMock) -> None:
await setup_greeneye_monitor_component_with_config(
hass, SINGLE_MONITOR_CONFIG_VOLTAGE_SENSORS
)
await connect_monitor(hass, monitors, SINGLE_MONITOR_SERIAL_NUMBER)
assert_sensor_state(hass, "sensor.voltage_1", "120.0")
async def test_multi_monitor_sensors(hass: HomeAssistant, monitors: AsyncMock) -> None:
await setup_greeneye_monitor_component_with_config(hass, MULTI_MONITOR_CONFIG)
await connect_monitor(hass, monitors, 1)
await connect_monitor(hass, monitors, 2)
await connect_monitor(hass, monitors, 3)
assert_sensor_state(hass, "sensor.unit_1_temp_1", "32.0")
assert_sensor_state(hass, "sensor.unit_2_temp_1", "0.0")
assert_sensor_state(hass, "sensor.unit_3_temp_1", "32.0")
async def disable_entity(hass: HomeAssistant, entity_id: str) -> None:
entity_registry = get_entity_registry(hass)
entity_registry.async_update_entity(
entity_id, disabled_by=RegistryEntryDisabler.USER
)
await hass.async_block_till_done()
| true | true |
f739b8cbc7ce76e5b962e754b6abd04e7238cdb2 | 4,021 | py | Python | bayesian_irl/src/utils/prob_dists.py | clear-nus/BOIRL | cc872111fda3c7b8118e1a864831013c30f63948 | [
"MIT"
] | 1 | 2021-02-26T10:09:15.000Z | 2021-02-26T10:09:15.000Z | bayesian_irl/src/utils/prob_dists.py | clear-nus/BOIRL | cc872111fda3c7b8118e1a864831013c30f63948 | [
"MIT"
] | null | null | null | bayesian_irl/src/utils/prob_dists.py | clear-nus/BOIRL | cc872111fda3c7b8118e1a864831013c30f63948 | [
"MIT"
] | null | null | null | import scipy.stats
import numpy as np
from scipy.stats import multivariate_normal as MVG
class UniformDist:
def __init__(self, xmax=1., xmin=None):
self.xmax = xmax
self.xmin = - xmax if xmin is None else xmin
self.prob = 1 / (self.xmax - self.xmin)
def __call__(self, *args, **kwargs):
return self.prob
def __str__(self):
return 'UniformDist(max={}, min={})'.format(self.xmax, self.xmin)
class MultiuniformDist:
def __init__(self, xmax=[2.,10.], xmin=[-2.,-10.]):
self.xmax = xmax
self.xmin = - xmax if xmin is None else xmin
self.prob = (1 / (self.xmax[0] - self.xmin[0]))*(1 / (self.xmax[1] - self.xmin[1]))
def __call__(self, *args, **kwargs):
return self.prob
def __str__(self):
return 'UniformDist(max={}, min={})'.format(self.xmax, self.xmin)
class MultiuniformborlangeDist:
def __init__(self, xmax=[0., 0.], xmin=[-2.5,-2.5]):
self.xmax = xmax
self.xmin = - xmax if xmin is None else xmin
self.prob = (1 / (self.xmax[0] - self.xmin[0]))*(1 / (self.xmax[1] - self.xmin[1]))
def __call__(self, *args, **kwargs):
return self.prob
def __str__(self):
return 'UniformDist(max={}, min={})'.format(self.xmax, self.xmin)
class DistBase:
def __init__(self, dist, params):
self.dist = dist
self.params = params
def __call__(self, x):
"""
:x: input
:return: P(x)
"""
return np.exp(np.sum(self.dist.logpdf(x, **self.params)))
def sample(self, size=10):
return self.dist.rvs(size=size, **self.params)
def __str__(self):
return self.__class__.__name__ + '(' + ', '.join(['{}={}'.format(key, value)
for key, value in self.params.items()]) + ')'
class GaussianDist(DistBase):
def __init__(self, loc=0, scale=0.1):
"""
:param loc: location of gaussian distribution
:param scale: var == scale ** 2
"""
params = dict(loc=loc, scale=scale)
dist = scipy.stats.norm
super().__init__(dist=dist, params=params)
class MultigaussDist(DistBase):
def __init__(self, mean=np.array([1.25, 5.0]), cov=np.array([[1, 0], [0, 1]])):
"""
:param loc: location of gaussian distribution
:param scale: var == scale ** 2
"""
#params = dict(mean=mean, cov=cov)
self.rvs = MVG(mean=mean,cov=cov)
#super().__init__(dist=dist, params=params)
def __call__(self, x):
return np.exp(np.sum(self.rvs.logpdf(x)))
class MultigaussBorlangeDist(DistBase):
def __init__(self, dist, mean=np.array([-2, -1.0, -1]), cov=np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])):
"""
:param loc: location of gaussian distribution
:param scale: var == scale ** 2
"""
#params = dict(mean=mean, cov=cov)
self.rvs = MVG(mean=mean,cov=cov)
#super().__init__(dist=dist, params=params)
def __call__(self, x):
return np.exp(np.sum(self.rvs.logpdf(x)))
class BetaDist(DistBase):
def __init__(self, a=0.5, b=0.5, loc=0, scale=1):
params = dict(a=a, b=b, loc=loc, scale=scale)
dist = scipy.stats.beta
super().__init__(dist=dist, params=params)
class GammaDist(DistBase):
def __init__(self, a=2, loc=0, scale=1):
params = dict(a=a, loc=loc, scale=scale)
dist = scipy.stats.gamma
super().__init__(dist=dist, params=params)
if __name__ == '__main__':
import matplotlib.pyplot as plt
import os
dists = (GaussianDist, BetaDist, GammaDist)
for dist in dists:
distribution = dist()
samples = distribution.sample(size=100)
plt.hist(samples)
plt.title(distribution)
path = '/' + os.path.join(*os.path.abspath(__file__).split('/')[:-3], 'results',
'{}.png'.format(dist.__name__))
plt.savefig(path)
plt.cla()
| 30.930769 | 109 | 0.570754 | import scipy.stats
import numpy as np
from scipy.stats import multivariate_normal as MVG
class UniformDist:
def __init__(self, xmax=1., xmin=None):
self.xmax = xmax
self.xmin = - xmax if xmin is None else xmin
self.prob = 1 / (self.xmax - self.xmin)
def __call__(self, *args, **kwargs):
return self.prob
def __str__(self):
return 'UniformDist(max={}, min={})'.format(self.xmax, self.xmin)
class MultiuniformDist:
def __init__(self, xmax=[2.,10.], xmin=[-2.,-10.]):
self.xmax = xmax
self.xmin = - xmax if xmin is None else xmin
self.prob = (1 / (self.xmax[0] - self.xmin[0]))*(1 / (self.xmax[1] - self.xmin[1]))
def __call__(self, *args, **kwargs):
return self.prob
def __str__(self):
return 'UniformDist(max={}, min={})'.format(self.xmax, self.xmin)
class MultiuniformborlangeDist:
def __init__(self, xmax=[0., 0.], xmin=[-2.5,-2.5]):
self.xmax = xmax
self.xmin = - xmax if xmin is None else xmin
self.prob = (1 / (self.xmax[0] - self.xmin[0]))*(1 / (self.xmax[1] - self.xmin[1]))
def __call__(self, *args, **kwargs):
return self.prob
def __str__(self):
return 'UniformDist(max={}, min={})'.format(self.xmax, self.xmin)
class DistBase:
def __init__(self, dist, params):
self.dist = dist
self.params = params
def __call__(self, x):
return np.exp(np.sum(self.dist.logpdf(x, **self.params)))
def sample(self, size=10):
return self.dist.rvs(size=size, **self.params)
def __str__(self):
return self.__class__.__name__ + '(' + ', '.join(['{}={}'.format(key, value)
for key, value in self.params.items()]) + ')'
class GaussianDist(DistBase):
def __init__(self, loc=0, scale=0.1):
params = dict(loc=loc, scale=scale)
dist = scipy.stats.norm
super().__init__(dist=dist, params=params)
class MultigaussDist(DistBase):
def __init__(self, mean=np.array([1.25, 5.0]), cov=np.array([[1, 0], [0, 1]])):
self.rvs = MVG(mean=mean,cov=cov)
def __call__(self, x):
return np.exp(np.sum(self.rvs.logpdf(x)))
class MultigaussBorlangeDist(DistBase):
def __init__(self, dist, mean=np.array([-2, -1.0, -1]), cov=np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])):
self.rvs = MVG(mean=mean,cov=cov)
def __call__(self, x):
return np.exp(np.sum(self.rvs.logpdf(x)))
class BetaDist(DistBase):
def __init__(self, a=0.5, b=0.5, loc=0, scale=1):
params = dict(a=a, b=b, loc=loc, scale=scale)
dist = scipy.stats.beta
super().__init__(dist=dist, params=params)
class GammaDist(DistBase):
def __init__(self, a=2, loc=0, scale=1):
params = dict(a=a, loc=loc, scale=scale)
dist = scipy.stats.gamma
super().__init__(dist=dist, params=params)
if __name__ == '__main__':
import matplotlib.pyplot as plt
import os
dists = (GaussianDist, BetaDist, GammaDist)
for dist in dists:
distribution = dist()
samples = distribution.sample(size=100)
plt.hist(samples)
plt.title(distribution)
path = '/' + os.path.join(*os.path.abspath(__file__).split('/')[:-3], 'results',
'{}.png'.format(dist.__name__))
plt.savefig(path)
plt.cla()
| true | true |
f739b9330172f97b95754ddd39d54eca41d67a5d | 829 | gyp | Python | ui/gfx/compositor/compositor.gyp | meego-tablet-ux/meego-app-browser | 0f4ef17bd4b399c9c990a2f6ca939099495c2b9c | [
"BSD-3-Clause"
] | 1 | 2015-10-12T09:14:22.000Z | 2015-10-12T09:14:22.000Z | ui/gfx/compositor/compositor.gyp | meego-tablet-ux/meego-app-browser | 0f4ef17bd4b399c9c990a2f6ca939099495c2b9c | [
"BSD-3-Clause"
] | null | null | null | ui/gfx/compositor/compositor.gyp | meego-tablet-ux/meego-app-browser | 0f4ef17bd4b399c9c990a2f6ca939099495c2b9c | [
"BSD-3-Clause"
] | 1 | 2020-11-04T07:22:28.000Z | 2020-11-04T07:22:28.000Z | # Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'compositor',
'type': '<(library)',
'msvs_guid': '21CEE0E3-6F4E-4F01-B8C9-F7751CC21AA9',
'dependencies': [
'<(DEPTH)/base/base.gyp:base',
'<(DEPTH)/ui/gfx/gl/gl.gyp:gl',
'<(DEPTH)/ui/ui.gyp:ui_gfx',
],
'sources': [
'compositor.cc',
'compositor_gl.cc',
'compositor.h',
],
'conditions': [
['OS=="linux" or OS=="freebsd" or OS=="openbsd"', {
'sources!': [
'compositor.cc',
],
}, {
'sources!': [
'compositor_gl.cc',
]
}],
],
},
],
} | 24.382353 | 72 | 0.482509 |
{
'targets': [
{
'target_name': 'compositor',
'type': '<(library)',
'msvs_guid': '21CEE0E3-6F4E-4F01-B8C9-F7751CC21AA9',
'dependencies': [
'<(DEPTH)/base/base.gyp:base',
'<(DEPTH)/ui/gfx/gl/gl.gyp:gl',
'<(DEPTH)/ui/ui.gyp:ui_gfx',
],
'sources': [
'compositor.cc',
'compositor_gl.cc',
'compositor.h',
],
'conditions': [
['OS=="linux" or OS=="freebsd" or OS=="openbsd"', {
'sources!': [
'compositor.cc',
],
}, {
'sources!': [
'compositor_gl.cc',
]
}],
],
},
],
} | true | true |
f739b9472eb8f51df7defc5cbc20455e81259810 | 2,075 | py | Python | libs/XSSImageCheck.py | Fa1c0n35/RootTheBoxs | 4f2a9886c8eedca3039604b93929c8c09866115e | [
"Apache-2.0"
] | 1 | 2019-06-29T08:40:54.000Z | 2019-06-29T08:40:54.000Z | libs/XSSImageCheck.py | Fa1c0n35/RootTheBoxs | 4f2a9886c8eedca3039604b93929c8c09866115e | [
"Apache-2.0"
] | null | null | null | libs/XSSImageCheck.py | Fa1c0n35/RootTheBoxs | 4f2a9886c8eedca3039604b93929c8c09866115e | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
@author: moloch
Copyright 2014
--------------------------------------------
Check for ticksy .gif and .bmp files
http://jklmnn.de/imagejs/
"""
import os
from string import printable
from tornado.options import options
from random import randint
MAX_AVATAR_SIZE = 1024 * 1024
MIN_AVATAR_SIZE = 64
IMG_FORMATS = ["png", "jpeg", "jpg", "gif", "bmp"]
def is_xss_image(data):
return all([char in printable for char in data[:16]])
def get_new_avatar(dir, forceteam=False):
avatar = default_avatar(dir)
avatars = filter_avatars(dir)
if len(avatars) == 0:
return avatar
if dir == "team" or forceteam:
from models.Team import Team
cmplist = Team.all()
elif dir == "user":
from models.User import User
cmplist = User.all()
else:
from models.Box import Box
cmplist = Box.all()
dblist = []
for item in cmplist:
if item._avatar:
dblist.append(item._avatar)
for image in avatars:
if not image in dblist:
return image
return avatars[randint(0, len(avatars) - 1)]
def default_avatar(dir):
if dir == "team":
avatar = "default_team.jpg"
elif dir == "user":
avatar = "default_user.jpg"
else:
avatar = "default_box.jpg"
return avatar
def filter_avatars(dir):
avatars = os.listdir(options.avatar_dir + "/" + dir)
avatarlist = []
for avatar in avatars:
if avatar.lower().endswith(tuple(IMG_FORMATS)):
avatarlist.append(dir + "/" + avatar)
return avatarlist
def existing_avatars(dir):
avatars = []
if dir == "team":
from models.Team import Team
teams = Team.all()
for team in teams:
if team.avatar is not None and len(team.members) > 0:
avatars.append(team.avatar)
else:
from models.User import User
users = User.all()
for user in users:
if user.avatar is not None:
avatars.append(user.avatar)
return avatars
| 22.802198 | 65 | 0.589398 |
import os
from string import printable
from tornado.options import options
from random import randint
MAX_AVATAR_SIZE = 1024 * 1024
MIN_AVATAR_SIZE = 64
IMG_FORMATS = ["png", "jpeg", "jpg", "gif", "bmp"]
def is_xss_image(data):
return all([char in printable for char in data[:16]])
def get_new_avatar(dir, forceteam=False):
avatar = default_avatar(dir)
avatars = filter_avatars(dir)
if len(avatars) == 0:
return avatar
if dir == "team" or forceteam:
from models.Team import Team
cmplist = Team.all()
elif dir == "user":
from models.User import User
cmplist = User.all()
else:
from models.Box import Box
cmplist = Box.all()
dblist = []
for item in cmplist:
if item._avatar:
dblist.append(item._avatar)
for image in avatars:
if not image in dblist:
return image
return avatars[randint(0, len(avatars) - 1)]
def default_avatar(dir):
if dir == "team":
avatar = "default_team.jpg"
elif dir == "user":
avatar = "default_user.jpg"
else:
avatar = "default_box.jpg"
return avatar
def filter_avatars(dir):
avatars = os.listdir(options.avatar_dir + "/" + dir)
avatarlist = []
for avatar in avatars:
if avatar.lower().endswith(tuple(IMG_FORMATS)):
avatarlist.append(dir + "/" + avatar)
return avatarlist
def existing_avatars(dir):
avatars = []
if dir == "team":
from models.Team import Team
teams = Team.all()
for team in teams:
if team.avatar is not None and len(team.members) > 0:
avatars.append(team.avatar)
else:
from models.User import User
users = User.all()
for user in users:
if user.avatar is not None:
avatars.append(user.avatar)
return avatars
| true | true |
f739b9c26677c883ffe9a5df52de5da65fa944f6 | 3,105 | py | Python | src/data_prep_helpers.py | edesz/electricity-consumption-forecast | 9bc49523d9c2ed6d827ce690916980cf7e818fed | [
"MIT"
] | null | null | null | src/data_prep_helpers.py | edesz/electricity-consumption-forecast | 9bc49523d9c2ed6d827ce690916980cf7e818fed | [
"MIT"
] | 5 | 2021-08-23T19:18:49.000Z | 2022-02-12T14:10:57.000Z | src/data_prep_helpers.py | edesz/electricity-consumption-forecast | 9bc49523d9c2ed6d827ce690916980cf7e818fed | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import pandas as pd
def add_corona_dates(df, index_name, strategy=["during_corona", "no_corona"]):
"""
Inputs
------
strategy : List
division of datetimes based on stages of corona; acceptable strategies
are one of the following (order in list does not matter)
- ['during_corona', 'no_corona']
- ['pre_corona', 'during_corona', 'post_corona']
SOURCE
------
https://github.com/facebook/prophet/issues/1416#issuecomment-618553502
"""
d_corona = {
"BE": [
pd.to_datetime("2020-03-07 00:00:00"),
pd.to_datetime("2020-04-12 23:00:00"),
],
"CH": [
pd.to_datetime("2020-03-07 00:00:00"),
pd.to_datetime("2020-04-12 23:00:00"),
],
"CZ": [
pd.to_datetime("2020-03-14 00:00:00"),
pd.to_datetime("2020-04-12 23:00:00"),
],
"DE": [
pd.to_datetime("2020-03-14 00:00:00"),
pd.to_datetime("2020-04-12 23:00:00"),
],
"ES": [
pd.to_datetime("2020-03-14 00:00:00"),
pd.to_datetime("2020-04-12 23:00:00"),
],
"FR": [
pd.to_datetime("2020-03-07 00:00:00"),
pd.to_datetime("2020-04-12 23:00:00"),
],
"HR": [
pd.to_datetime("2020-03-21 00:00:00"),
pd.to_datetime("2020-04-12 23:00:00"),
],
"IT": [
pd.to_datetime("2020-03-14 00:00:00"),
pd.to_datetime("2020-04-12 23:00:00"),
],
"NL": [
pd.to_datetime("2020-03-14 00:00:00"),
pd.to_datetime("2020-04-12 23:00:00"),
],
"PL": [
pd.to_datetime("2020-03-14 00:00:00"),
pd.to_datetime("2020-04-12 23:00:00"),
],
}
df_corona = (
pd.DataFrame.from_dict(d_corona, orient="index")
.reset_index()
.rename(
columns={0: "corona_start", 1: "corona_end", "index": "country"}
)
)
df = df.merge(df_corona, on="country", how="left")
# Add corona periods based on specified strategy
strategies_dict = {
"dn": ["during_corona", "no_corona"],
"pdp": ["pre_corona", "during_corona", "post_corona"],
}
if set(strategy) == set(strategies_dict["dn"]):
df["no_corona"] = (df[index_name] < df["corona_start"]) | (
df[index_name] > df["corona_end"]
)
elif set(strategy) == set(strategies_dict["pdp"]):
df["pre_corona"] = df[index_name] < df["corona_start"]
df["post_corona"] = df[index_name] > df["corona_end"]
else:
strategies = ""
for _, v in strategies_dict.items():
strategies += "['" + "', '".join(map(str, v)) + "'], "
strategies = strategies.rstrip(", ")
raise Exception(
f"Unsupported corona strategy. Expected one of: {strategies}"
)
df["during_corona"] = (df[index_name] >= df["corona_start"]) & (
df[index_name] <= df["corona_end"]
)
return df
| 32.010309 | 78 | 0.515942 |
import pandas as pd
def add_corona_dates(df, index_name, strategy=["during_corona", "no_corona"]):
d_corona = {
"BE": [
pd.to_datetime("2020-03-07 00:00:00"),
pd.to_datetime("2020-04-12 23:00:00"),
],
"CH": [
pd.to_datetime("2020-03-07 00:00:00"),
pd.to_datetime("2020-04-12 23:00:00"),
],
"CZ": [
pd.to_datetime("2020-03-14 00:00:00"),
pd.to_datetime("2020-04-12 23:00:00"),
],
"DE": [
pd.to_datetime("2020-03-14 00:00:00"),
pd.to_datetime("2020-04-12 23:00:00"),
],
"ES": [
pd.to_datetime("2020-03-14 00:00:00"),
pd.to_datetime("2020-04-12 23:00:00"),
],
"FR": [
pd.to_datetime("2020-03-07 00:00:00"),
pd.to_datetime("2020-04-12 23:00:00"),
],
"HR": [
pd.to_datetime("2020-03-21 00:00:00"),
pd.to_datetime("2020-04-12 23:00:00"),
],
"IT": [
pd.to_datetime("2020-03-14 00:00:00"),
pd.to_datetime("2020-04-12 23:00:00"),
],
"NL": [
pd.to_datetime("2020-03-14 00:00:00"),
pd.to_datetime("2020-04-12 23:00:00"),
],
"PL": [
pd.to_datetime("2020-03-14 00:00:00"),
pd.to_datetime("2020-04-12 23:00:00"),
],
}
df_corona = (
pd.DataFrame.from_dict(d_corona, orient="index")
.reset_index()
.rename(
columns={0: "corona_start", 1: "corona_end", "index": "country"}
)
)
df = df.merge(df_corona, on="country", how="left")
strategies_dict = {
"dn": ["during_corona", "no_corona"],
"pdp": ["pre_corona", "during_corona", "post_corona"],
}
if set(strategy) == set(strategies_dict["dn"]):
df["no_corona"] = (df[index_name] < df["corona_start"]) | (
df[index_name] > df["corona_end"]
)
elif set(strategy) == set(strategies_dict["pdp"]):
df["pre_corona"] = df[index_name] < df["corona_start"]
df["post_corona"] = df[index_name] > df["corona_end"]
else:
strategies = ""
for _, v in strategies_dict.items():
strategies += "['" + "', '".join(map(str, v)) + "'], "
strategies = strategies.rstrip(", ")
raise Exception(
f"Unsupported corona strategy. Expected one of: {strategies}"
)
df["during_corona"] = (df[index_name] >= df["corona_start"]) & (
df[index_name] <= df["corona_end"]
)
return df
| true | true |
f739b9d2e83bc8735cf1080a47202ed8db294e20 | 563 | py | Python | Mundo_1/Ex004.py | alinenog/Mundo_Python-1-2-3 | 2cfda105eec436ae32394e01c011342515fceff5 | [
"MIT"
] | null | null | null | Mundo_1/Ex004.py | alinenog/Mundo_Python-1-2-3 | 2cfda105eec436ae32394e01c011342515fceff5 | [
"MIT"
] | null | null | null | Mundo_1/Ex004.py | alinenog/Mundo_Python-1-2-3 | 2cfda105eec436ae32394e01c011342515fceff5 | [
"MIT"
] | null | null | null | # Exercício Python 4:
#Faça um programa que leia algo pelo teclado e mostre na tela o seu tipo primitivo e todas as informações #possíveis sobre ele..'''
e=input('Digite algo:')
print('Entrada é um número: ',e.isnumeric())
print('Entrada é decimal:',e.isdecimal())
print('Entrada é alfabetica:',e.isalpha())
print('Esta em maisculas ?', e.isupper())
print('Esta em minusculas ?',e.islower())
print('Esta capitalizada',(e.istitle()))
print('É alfabético ?',e.isalpha())
print('E alfanumerico ? ',e.isalnum())
print('Só tem espaços ? ',e.isspace())
| 40.214286 | 133 | 0.687389 |
print('Entrada é um número: ',e.isnumeric())
print('Entrada é decimal:',e.isdecimal())
print('Entrada é alfabetica:',e.isalpha())
print('Esta em maisculas ?', e.isupper())
print('Esta em minusculas ?',e.islower())
print('Esta capitalizada',(e.istitle()))
print('É alfabético ?',e.isalpha())
print('E alfanumerico ? ',e.isalnum())
print('Só tem espaços ? ',e.isspace())
| true | true |
f739bb01fb98ab63d34326e29de7f3b579ae56e8 | 420 | py | Python | backend/covid-backend/config.py | alesanmed-educational-projects/core-data-covid-project | ab43beb98dba4153320796b54a078bb6075b0fd5 | [
"Unlicense"
] | null | null | null | backend/covid-backend/config.py | alesanmed-educational-projects/core-data-covid-project | ab43beb98dba4153320796b54a078bb6075b0fd5 | [
"Unlicense"
] | 18 | 2021-07-24T20:17:40.000Z | 2021-08-19T09:55:01.000Z | backend/covid-backend/config.py | alesanmed-educational-projects/core-data-covid-project | ab43beb98dba4153320796b54a078bb6075b0fd5 | [
"Unlicense"
] | null | null | null | import os
class Config(object):
DEBUG = True
DEVELOPMENT = True
PG_USER = os.environ.get("PG_USER", "")
PG_PASS = os.environ.get("PG_PASS", "")
PG_HOST = os.environ.get("PG_HOST", "")
PG_PORT = os.environ.get("PG_PORT", "")
PG_DB = os.environ.get("PG_DB", "")
SENDGRID_KEY = os.environ.get("SENDGRID_KEY", "")
class ProductionConfig(Config):
DEBUG = False
DEVELOPMENT = False
| 23.333333 | 53 | 0.628571 | import os
class Config(object):
DEBUG = True
DEVELOPMENT = True
PG_USER = os.environ.get("PG_USER", "")
PG_PASS = os.environ.get("PG_PASS", "")
PG_HOST = os.environ.get("PG_HOST", "")
PG_PORT = os.environ.get("PG_PORT", "")
PG_DB = os.environ.get("PG_DB", "")
SENDGRID_KEY = os.environ.get("SENDGRID_KEY", "")
class ProductionConfig(Config):
DEBUG = False
DEVELOPMENT = False
| true | true |
f739bbf109547b406feb03f593af55b0d99a80a5 | 11,180 | py | Python | src/openne/Z_0709.py | barcawy/OpenNE | 88018ed9bf34d09020464a430e09afb704b1f322 | [
"MIT"
] | null | null | null | src/openne/Z_0709.py | barcawy/OpenNE | 88018ed9bf34d09020464a430e09afb704b1f322 | [
"MIT"
] | null | null | null | src/openne/Z_0709.py | barcawy/OpenNE | 88018ed9bf34d09020464a430e09afb704b1f322 | [
"MIT"
] | null | null | null | from __future__ import print_function
import time
import math
import random
import numpy as np
import pickle as pkl
import networkx as nx
from gensim.models import Word2Vec
from fastdtw import fastdtw
from collections import Counter
from collections import defaultdict
import os
class Z(object):
def __init__(self, graph, path_length, num_paths, dim, prefix, hop, **kwargs):
kwargs["workers"] = kwargs.get("workers", 4)
#kwargs["hs"] = 1 # 1 分层softmax 0 负采样
self.graph = graph
preprocess = False
if preprocess:
self.ppr_matrix = self.constructSubGraph(hop)
self.degrees, self.degree_permuted = self.create_degree()
self.degree_neighbors, self.norm_weight = self.create_ppr_sample_table()
self.dump_to_disk(self.degree_neighbors,'E:/Project/OpenNE/matrix_pkl/' + prefix + '_'+ str(hop) + '_neighbors')
self.dump_to_disk(self.norm_weight,'E:/Project/OpenNE/matrix_pkl/' + prefix + '_'+ str(hop) + '_weight')
else:
self.degree_neighbors = self.load_pkl('E:/Project/OpenNE/matrix_pkl/' + prefix + '_'+ str(hop) + '_neighbors')
self.norm_weight = self.load_pkl('E:/Project/OpenNE/matrix_pkl/' + prefix + '_'+ str(hop) + '_weight')
sentences = self.simulate_walks(
num_walks=num_paths, walk_length=path_length)
kwargs["sentences"] = sentences
kwargs["min_count"] = kwargs.get("min_count", 0)
kwargs["size"] = kwargs.get("size", dim)
kwargs["sg"] = 1 # 1 skipgram; 0 CBOW
self.size = kwargs["size"]
print("Learning representation...")
word2vec = Word2Vec(**kwargs)
self.vectors = {}
for word in graph.G.nodes():
self.vectors[word] = word2vec.wv[word]
del word2vec
def dump_to_disk(self, f, file_name):
with open(file_name + '.pkl', 'wb') as handle:
pkl.dump(f, handle, protocol=pkl.HIGHEST_PROTOCOL)
def load_pkl(self, file_name):
with open(file_name + '.pkl', 'rb') as handle:
val = pkl.load(handle)
return val
def neighbors(self, fringe):
# find all 1-hop neighbors of nodes in fringe from A
graph = self.graph.G
res = set()
for node in fringe:
nei = graph.neighbors(node)
nei = set(nei)
res = res.union(nei)
return res
def constructSubGraph(self, hop):
graph = self.graph.G
edge_set = set(graph.edges())
nodes = list(graph.nodes())
#subgraph_map = defaultdict(nx.Graph)
ppr_matrix = {}
for node in nodes:
subgraph_map = nx.Graph()
subgraph_map.add_node(node)
fringe = set(node)
visited = set(node)
for dist in range(0, hop):
fringe = self.neighbors(fringe)
fringe = fringe - visited
visited = visited.union(fringe)
visited = list(visited)
for pos_u, u in enumerate(visited):
for v in visited[pos_u+1:]:
if (u, v) in edge_set or (v, u) in edge_set:
subgraph_map.add_edge(u, v)
ppr_matrix[node] = Counter()
walk = self.subgraph_walk(subgraph_map, walk_length=500, start_node=node)
ppr_matrix[node].update(walk)
return ppr_matrix
def subgraph_walk(self, subGraph, walk_length, start_node):
'''
Simulate a random walk starting from start node.
'''
G = subGraph
walk = [start_node]
while len(walk) < walk_length:
cur = walk[-1]
cur_nbrs = list(G.neighbors(cur))
if len(cur_nbrs) > 0:
walk.append(random.choice(cur_nbrs))
else:
# 独立的点
break
return walk
def deepwalk_walk(self, walk_length, start_node, alpha = 0.5):
'''
Simulate a random walk starting from start node.
'''
G = self.graph.G
walk = [start_node]
while len(walk) < walk_length:
cur = walk[-1]
alpha = 1#alpha/G.degree(cur)
if np.random.rand() < alpha:
walk.append(np.random.choice(self.degree_neighbors[cur], p=self.norm_weight[cur]))
else:
cur_nbrs = list(G.neighbors(cur))
if len(cur_nbrs) > 0:
# node2vec
n2v = 0
if n2v:
nbr = random.choice(cur_nbrs)
if set(cur_nbrs) & set(G.neighbors(nbr)):
walk.append(random.choice(cur_nbrs))
else:
walk.append(nbr)
else:
# deepwalk
walk.append(random.choice(cur_nbrs))
else:
break
return walk
def simulate_walks(self, num_walks, walk_length):
'''
Repeatedly simulate random walks from each node.
'''
G = self.graph.G
walks = []
nodes = list(G.nodes())
print('Simulate walk iteration:')
for walk_iter in range(num_walks):
# pool = multiprocessing.Pool(processes = 4)
print(str(walk_iter + 1), '/', str(num_walks))
random.shuffle(nodes)
for node in nodes:
# walks.append(pool.apply_async(deepwalk_walk_wrapper, (self, walk_length, node, )))
walks.append(self.deepwalk_walk(
walk_length=walk_length, start_node=node))
# pool.close()
# pool.join()
# print(len(walks))
return walks
def create_degree(self):
G = self.graph.G
print("- Creating degree vectors...")
degrees = {}
degrees_sorted = set()
degree_permuted = {}
for v in G.nodes():
degree = G.degree(v)
degrees_sorted.add(degree)
degree_permuted[v] = degree
if (degree not in degrees):
degrees[degree] = {}
degrees[degree]['vertices'] = []
degrees[degree]['vertices'].append(v)
degrees_sorted = np.array(list(degrees_sorted), dtype='int')
# degree_permuted = degrees_sorted
degrees_sorted = np.sort(degrees_sorted)
l = len(degrees_sorted)
for index, degree in enumerate(degrees_sorted):
if (index > 0):
degrees[degree]['before'] = degrees_sorted[index - 1]
if (index < (l - 1)):
degrees[degree]['after'] = degrees_sorted[index + 1]
print("- Degree vectors created.")
return degrees, degree_permuted
def create_ppr_sample_table(self):
print("- Creating PPR sample table ...")
nodes = list(self.graph.G.nodes())
degree_neighbors = {}
norm_weight = {}
nodes_num = len(nodes)
k = 0
for node in nodes:
print(str(k + 1), '/', str(nodes_num))
k += 1
degree_neighbors[node] = self.get_vertices(node)
norm_weight[node] = self.ppr_sample(node, degree_neighbors[node])
print("- PPR sample table created.")
return degree_neighbors, norm_weight
def cost(self, a, b):
ep = 0.001
m = max(a, b) + ep
mi = min(a, b) + ep
return ((m / mi) - 1)
def ppr_sample(self, node, neighbors):
node_ppr_v = [i[1] for i in self.ppr_matrix[node].most_common()]#[1:]
if len(node_ppr_v) == 0:
node_ppr_v = [1]
sim_list = []
nodes_num = len(self.graph.G.nodes())
for _neighbor in neighbors:
neighbor_ppr_v = [i[1] for i in self.ppr_matrix[_neighbor].most_common()]#[1:]
if len(neighbor_ppr_v) == 0:
neighbor_ppr_v = [1]
dits_dtw, _ = fastdtw(node_ppr_v, neighbor_ppr_v, radius=1, dist=self.cost)
sim_list.append(np.exp(-1.0 * dits_dtw))
norm_weight = [float(i) / sum(sim_list) for i in sim_list]
# sampled_neighbor = np.random.choice(neighbors, p=norm_weight)
return norm_weight
def verifyDegrees(self, degree_v_root, degree_a, degree_b):
if (degree_b == -1):
degree_now = degree_a
elif (degree_a == -1):
degree_now = degree_b
elif (abs(degree_b - degree_v_root) < abs(degree_a - degree_v_root)):
degree_now = degree_b
else:
degree_now = degree_a
return degree_now
def get_vertices(self, v):
num_seleted = 2 * math.log(len(self.graph.G.nodes()), 2)
vertices = []
degree_v = self.graph.G.degree(v)
try:
c_v = 0
for v2 in self.degrees[degree_v]['vertices']:
if (v != v2):
vertices.append(v2)
c_v += 1
if (c_v > num_seleted):
raise StopIteration
if ('before' not in self.degrees[degree_v]):
degree_b = -1
else:
degree_b = self.degrees[degree_v]['before']
if ('after' not in self.degrees[degree_v]):
degree_a = -1
else:
degree_a = self.degrees[degree_v]['after']
if (degree_b == -1 and degree_a == -1):
raise StopIteration
degree_now = self.verifyDegrees(degree_v, degree_a, degree_b)
while True:
for v2 in self.degrees[degree_now]['vertices']:
if (v != v2):
vertices.append(v2)
c_v += 1
if (c_v > num_seleted):
raise StopIteration
if (degree_now == degree_b):
if ('before' not in self.degrees[degree_b]):
degree_b = -1
else:
degree_b = self.degrees[degree_b]['before']
else:
if ('after' not in self.degrees[degree_a]):
degree_a = -1
else:
degree_a = self.degrees[degree_a]['after']
if (degree_b == -1 and degree_a == -1):
raise StopIteration
degree_now = self.verifyDegrees(degree_v, degree_a, degree_b)
except StopIteration:
return list(vertices)
return list(vertices)
def save_embeddings(self, filename):
fout = open(filename, 'w')
node_num = len(self.vectors.keys())
fout.write("{} {}\n".format(node_num, self.size))
for node, vec in self.vectors.items():
fout.write("{} {}\n".format(node,
' '.join([str(x) for x in vec])))
fout.close()
def save_results(self, filename, method, ratio, result):
fout = open(filename, 'w')
node_num = len(self.vectors)
fout.write("{} {} {} \n".format(method, ratio, result))
fout.close()
| 36.298701 | 124 | 0.530948 | from __future__ import print_function
import time
import math
import random
import numpy as np
import pickle as pkl
import networkx as nx
from gensim.models import Word2Vec
from fastdtw import fastdtw
from collections import Counter
from collections import defaultdict
import os
class Z(object):
def __init__(self, graph, path_length, num_paths, dim, prefix, hop, **kwargs):
kwargs["workers"] = kwargs.get("workers", 4)
h = graph
preprocess = False
if preprocess:
self.ppr_matrix = self.constructSubGraph(hop)
self.degrees, self.degree_permuted = self.create_degree()
self.degree_neighbors, self.norm_weight = self.create_ppr_sample_table()
self.dump_to_disk(self.degree_neighbors,'E:/Project/OpenNE/matrix_pkl/' + prefix + '_'+ str(hop) + '_neighbors')
self.dump_to_disk(self.norm_weight,'E:/Project/OpenNE/matrix_pkl/' + prefix + '_'+ str(hop) + '_weight')
else:
self.degree_neighbors = self.load_pkl('E:/Project/OpenNE/matrix_pkl/' + prefix + '_'+ str(hop) + '_neighbors')
self.norm_weight = self.load_pkl('E:/Project/OpenNE/matrix_pkl/' + prefix + '_'+ str(hop) + '_weight')
sentences = self.simulate_walks(
num_walks=num_paths, walk_length=path_length)
kwargs["sentences"] = sentences
kwargs["min_count"] = kwargs.get("min_count", 0)
kwargs["size"] = kwargs.get("size", dim)
kwargs["sg"] = 1
self.size = kwargs["size"]
print("Learning representation...")
word2vec = Word2Vec(**kwargs)
self.vectors = {}
for word in graph.G.nodes():
self.vectors[word] = word2vec.wv[word]
del word2vec
def dump_to_disk(self, f, file_name):
with open(file_name + '.pkl', 'wb') as handle:
pkl.dump(f, handle, protocol=pkl.HIGHEST_PROTOCOL)
def load_pkl(self, file_name):
with open(file_name + '.pkl', 'rb') as handle:
val = pkl.load(handle)
return val
def neighbors(self, fringe):
graph = self.graph.G
res = set()
for node in fringe:
nei = graph.neighbors(node)
nei = set(nei)
res = res.union(nei)
return res
def constructSubGraph(self, hop):
graph = self.graph.G
edge_set = set(graph.edges())
nodes = list(graph.nodes())
ppr_matrix = {}
for node in nodes:
subgraph_map = nx.Graph()
subgraph_map.add_node(node)
fringe = set(node)
visited = set(node)
for dist in range(0, hop):
fringe = self.neighbors(fringe)
fringe = fringe - visited
visited = visited.union(fringe)
visited = list(visited)
for pos_u, u in enumerate(visited):
for v in visited[pos_u+1:]:
if (u, v) in edge_set or (v, u) in edge_set:
subgraph_map.add_edge(u, v)
ppr_matrix[node] = Counter()
walk = self.subgraph_walk(subgraph_map, walk_length=500, start_node=node)
ppr_matrix[node].update(walk)
return ppr_matrix
def subgraph_walk(self, subGraph, walk_length, start_node):
G = subGraph
walk = [start_node]
while len(walk) < walk_length:
cur = walk[-1]
cur_nbrs = list(G.neighbors(cur))
if len(cur_nbrs) > 0:
walk.append(random.choice(cur_nbrs))
else:
break
return walk
def deepwalk_walk(self, walk_length, start_node, alpha = 0.5):
G = self.graph.G
walk = [start_node]
while len(walk) < walk_length:
cur = walk[-1]
alpha = 1
if np.random.rand() < alpha:
walk.append(np.random.choice(self.degree_neighbors[cur], p=self.norm_weight[cur]))
else:
cur_nbrs = list(G.neighbors(cur))
if len(cur_nbrs) > 0:
n2v = 0
if n2v:
nbr = random.choice(cur_nbrs)
if set(cur_nbrs) & set(G.neighbors(nbr)):
walk.append(random.choice(cur_nbrs))
else:
walk.append(nbr)
else:
walk.append(random.choice(cur_nbrs))
else:
break
return walk
def simulate_walks(self, num_walks, walk_length):
G = self.graph.G
walks = []
nodes = list(G.nodes())
print('Simulate walk iteration:')
for walk_iter in range(num_walks):
print(str(walk_iter + 1), '/', str(num_walks))
random.shuffle(nodes)
for node in nodes:
walks.append(self.deepwalk_walk(
walk_length=walk_length, start_node=node))
return walks
def create_degree(self):
G = self.graph.G
print("- Creating degree vectors...")
degrees = {}
degrees_sorted = set()
degree_permuted = {}
for v in G.nodes():
degree = G.degree(v)
degrees_sorted.add(degree)
degree_permuted[v] = degree
if (degree not in degrees):
degrees[degree] = {}
degrees[degree]['vertices'] = []
degrees[degree]['vertices'].append(v)
degrees_sorted = np.array(list(degrees_sorted), dtype='int')
degrees_sorted = np.sort(degrees_sorted)
l = len(degrees_sorted)
for index, degree in enumerate(degrees_sorted):
if (index > 0):
degrees[degree]['before'] = degrees_sorted[index - 1]
if (index < (l - 1)):
degrees[degree]['after'] = degrees_sorted[index + 1]
print("- Degree vectors created.")
return degrees, degree_permuted
def create_ppr_sample_table(self):
print("- Creating PPR sample table ...")
nodes = list(self.graph.G.nodes())
degree_neighbors = {}
norm_weight = {}
nodes_num = len(nodes)
k = 0
for node in nodes:
print(str(k + 1), '/', str(nodes_num))
k += 1
degree_neighbors[node] = self.get_vertices(node)
norm_weight[node] = self.ppr_sample(node, degree_neighbors[node])
print("- PPR sample table created.")
return degree_neighbors, norm_weight
def cost(self, a, b):
ep = 0.001
m = max(a, b) + ep
mi = min(a, b) + ep
return ((m / mi) - 1)
def ppr_sample(self, node, neighbors):
node_ppr_v = [i[1] for i in self.ppr_matrix[node].most_common()]
if len(node_ppr_v) == 0:
node_ppr_v = [1]
sim_list = []
nodes_num = len(self.graph.G.nodes())
for _neighbor in neighbors:
neighbor_ppr_v = [i[1] for i in self.ppr_matrix[_neighbor].most_common()]
if len(neighbor_ppr_v) == 0:
neighbor_ppr_v = [1]
dits_dtw, _ = fastdtw(node_ppr_v, neighbor_ppr_v, radius=1, dist=self.cost)
sim_list.append(np.exp(-1.0 * dits_dtw))
norm_weight = [float(i) / sum(sim_list) for i in sim_list]
return norm_weight
def verifyDegrees(self, degree_v_root, degree_a, degree_b):
if (degree_b == -1):
degree_now = degree_a
elif (degree_a == -1):
degree_now = degree_b
elif (abs(degree_b - degree_v_root) < abs(degree_a - degree_v_root)):
degree_now = degree_b
else:
degree_now = degree_a
return degree_now
def get_vertices(self, v):
num_seleted = 2 * math.log(len(self.graph.G.nodes()), 2)
vertices = []
degree_v = self.graph.G.degree(v)
try:
c_v = 0
for v2 in self.degrees[degree_v]['vertices']:
if (v != v2):
vertices.append(v2)
c_v += 1
if (c_v > num_seleted):
raise StopIteration
if ('before' not in self.degrees[degree_v]):
degree_b = -1
else:
degree_b = self.degrees[degree_v]['before']
if ('after' not in self.degrees[degree_v]):
degree_a = -1
else:
degree_a = self.degrees[degree_v]['after']
if (degree_b == -1 and degree_a == -1):
raise StopIteration
degree_now = self.verifyDegrees(degree_v, degree_a, degree_b)
while True:
for v2 in self.degrees[degree_now]['vertices']:
if (v != v2):
vertices.append(v2)
c_v += 1
if (c_v > num_seleted):
raise StopIteration
if (degree_now == degree_b):
if ('before' not in self.degrees[degree_b]):
degree_b = -1
else:
degree_b = self.degrees[degree_b]['before']
else:
if ('after' not in self.degrees[degree_a]):
degree_a = -1
else:
degree_a = self.degrees[degree_a]['after']
if (degree_b == -1 and degree_a == -1):
raise StopIteration
degree_now = self.verifyDegrees(degree_v, degree_a, degree_b)
except StopIteration:
return list(vertices)
return list(vertices)
def save_embeddings(self, filename):
fout = open(filename, 'w')
node_num = len(self.vectors.keys())
fout.write("{} {}\n".format(node_num, self.size))
for node, vec in self.vectors.items():
fout.write("{} {}\n".format(node,
' '.join([str(x) for x in vec])))
fout.close()
def save_results(self, filename, method, ratio, result):
fout = open(filename, 'w')
node_num = len(self.vectors)
fout.write("{} {} {} \n".format(method, ratio, result))
fout.close()
| true | true |
f739bc02705cb1b50f7d45185a730e81f1ecfdc1 | 3,122 | py | Python | crafters/image/ImageNormalizer/__init__.py | strawberrypie/jina-hub | 8b2356d58687694d817881c840745214f12e94c4 | [
"Apache-2.0"
] | 1 | 2021-03-19T15:37:02.000Z | 2021-03-19T15:37:02.000Z | crafters/image/ImageNormalizer/__init__.py | strawberrypie/jina-hub | 8b2356d58687694d817881c840745214f12e94c4 | [
"Apache-2.0"
] | 5,414 | 2021-04-06T09:04:26.000Z | 2022-03-30T13:12:40.000Z | crafters/image/ImageNormalizer/__init__.py | strawberrypie/jina-hub | 8b2356d58687694d817881c840745214f12e94c4 | [
"Apache-2.0"
] | null | null | null | __copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Tuple, Dict, Union, Iterable
import numpy as np
from jina.executors.decorators import single
from jina.executors.crafters import BaseCrafter
from .helper import _load_image, _move_channel_axis, _crop_image, _resize_short
class ImageNormalizer(BaseCrafter):
"""
Normalize the image.
:class:`ImageNormalizer` works on doc-level,
it receives values of file names on the
doc-level and returns image matrix on the chunk-level
:param target_size: Desired output size. If size is a sequence
like (h, w), the output size will be matched to this.
If size is an int, the smaller edge of the image will be matched
to this number maintaining the aspect ratio.
:param img_mean: The mean of the images in `RGB` channels.
Set to `[0.485, 0.456, 0.406]` for the models trained
on `imagenet` with pytorch backbone.
:param img_std: the std of the images in `RGB` channels.
Set to `[0.229, 0.224, 0.225]` for the models trained
on `imagenet` with pytorch backbone.
:param resize_dim: the size of images' height and width to be resized to.
The images are resized before cropping to the output size
:param channel_axis: the axis id of the color channel,
``-1`` indicates the color channel info at the last axis
"""
def __init__(self,
target_size: Union[Iterable[int], int] = 224,
img_mean: Tuple[float] = (0, 0, 0),
img_std: Tuple[float] = (1, 1, 1),
resize_dim: int = 256,
channel_axis: int = -1,
*args,
**kwargs):
"""Set Constructor."""
super().__init__(*args, **kwargs)
if isinstance(target_size, int):
self.target_size = target_size
elif isinstance(target_size, Iterable):
self.target_size = tuple(target_size)
else:
raise ValueError(f'target_size {target_size} should be an integer or tuple/list of 2 integers')
self.resize_dim = resize_dim
self.img_mean = np.array(img_mean).reshape((1, 1, 3))
self.img_std = np.array(img_std).reshape((1, 1, 3))
self.channel_axis = channel_axis
@single
def craft(self, blob: 'np.ndarray', *args, **kwargs) -> Dict:
"""
Normalize the image.
:param blob: the ndarray of the image with the color channel at the last axis
:return: a chunk dict with the normalized image
"""
raw_img = _load_image(blob, self.channel_axis)
_img = self._normalize(raw_img)
img = _move_channel_axis(_img, -1, self.channel_axis)
return dict(offset=0, blob=img)
def _normalize(self, img):
img = _resize_short(img, target_size=self.resize_dim)
img, _, _ = _crop_image(img, target_size=self.target_size, how='center')
img = np.array(img).astype('float32')/255
img -= self.img_mean
img /= self.img_std
return img
| 39.518987 | 107 | 0.63517 | __copyright__ = "Copyright (c) 2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Tuple, Dict, Union, Iterable
import numpy as np
from jina.executors.decorators import single
from jina.executors.crafters import BaseCrafter
from .helper import _load_image, _move_channel_axis, _crop_image, _resize_short
class ImageNormalizer(BaseCrafter):
def __init__(self,
target_size: Union[Iterable[int], int] = 224,
img_mean: Tuple[float] = (0, 0, 0),
img_std: Tuple[float] = (1, 1, 1),
resize_dim: int = 256,
channel_axis: int = -1,
*args,
**kwargs):
super().__init__(*args, **kwargs)
if isinstance(target_size, int):
self.target_size = target_size
elif isinstance(target_size, Iterable):
self.target_size = tuple(target_size)
else:
raise ValueError(f'target_size {target_size} should be an integer or tuple/list of 2 integers')
self.resize_dim = resize_dim
self.img_mean = np.array(img_mean).reshape((1, 1, 3))
self.img_std = np.array(img_std).reshape((1, 1, 3))
self.channel_axis = channel_axis
@single
def craft(self, blob: 'np.ndarray', *args, **kwargs) -> Dict:
raw_img = _load_image(blob, self.channel_axis)
_img = self._normalize(raw_img)
img = _move_channel_axis(_img, -1, self.channel_axis)
return dict(offset=0, blob=img)
def _normalize(self, img):
img = _resize_short(img, target_size=self.resize_dim)
img, _, _ = _crop_image(img, target_size=self.target_size, how='center')
img = np.array(img).astype('float32')/255
img -= self.img_mean
img /= self.img_std
return img
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.