id
stringlengths
1
265
text
stringlengths
6
5.19M
dataset_id
stringclasses
7 values
/CoSA-0.3.0.tar.gz/CoSA-0.3.0/cosa/analyzers/dispatcher.py
import os import copy import pickle from pysmt.shortcuts import Symbol from cosa.utils.logger import Logger from cosa.analyzers.mcsolver import MCConfig from cosa.analyzers.bmc_safety import BMCSafety from cosa.analyzers.bmc_parametric import BMCParametric from cosa.analyzers.bmc_ltl import BMCLTL from cosa.problem import VerificationType, Problem, VerificationStatus, Trace from cosa.encoders.miter import Miter from cosa.encoders.formulae import StringParser from cosa.representation import HTS, TS from cosa.encoders.btor2 import BTOR2Parser from cosa.encoders.ltl import LTLParser from cosa.encoders.factory import ModelParsersFactory, ClockBehaviorsFactory, GeneratorsFactory from cosa.modifiers.factory import ModelModifiersFactory from cosa.modifiers.coi import ConeOfInfluence from cosa.encoders.template import EncoderConfig, ModelInformation from cosa.encoders.parametric_behavior import ParametricBehavior from cosa.printers.trace import TextTracePrinter, VCDTracePrinter from cosa.modifiers.model_extension import ModelExtension from cosa.encoders.symbolic_transition_system import SymbolicSimpleTSParser from cosa.printers.factory import HTSPrintersFactory from cosa.printers.hts import STSHTSPrinter FLAG_SR = "[" FLAG_ST = "]" FLAG_SP = "+" MODEL_SP = ";" FILE_SP = "," COSACACHEDIR = ".CoSA/cache" class ProblemSolver(object): parser = None sparser = None lparser = None model_info = None coi = None def __init__(self): self.sparser = None self.lparser = None self.coi = None self.model_info = ModelInformation() GeneratorsFactory.init_generators() ClockBehaviorsFactory.init_clockbehaviors() def __process_trace(self, hts, trace, config, problem): prevass = [] full_trace = problem.full_trace or config.full_trace trace_vars_change = problem.trace_vars_change or config.trace_vars_change trace_all_vars = problem.trace_all_vars or config.trace_all_vars trace_values_base = problem.trace_values_base or config.trace_values_base diff_only = not trace_vars_change all_vars = trace_all_vars txttrace_synth_clock = False if full_trace: diff_only = False all_vars = True traces = [] abstract_clock_list = [] if txttrace_synth_clock: abstract_clock_list=self.model_info.abstract_clock_list # Human Readable Format hr_printer = TextTracePrinter() hr_printer.prop_vars = trace.prop_vars hr_printer.diff_only = diff_only hr_printer.all_vars = all_vars hr_printer.values_base = trace_values_base hr_trace = hr_printer.print_trace(hts=hts, \ model=trace.model, \ length=trace.length, \ map_function=self.parser.remap_an2or, \ find_loop=trace.infinite, \ abstract_clock_list=abstract_clock_list) traceH = Trace(hr_trace, trace.length) traceH.extension = hr_printer.get_file_ext() traceH.human_readable = hr_trace.human_readable traces.append(traceH) # VCD format vcd_trace = None if problem.vcd: vcd_printer = VCDTracePrinter() vcd_trace = vcd_printer.print_trace(hts=hts, \ model=trace.model, \ length=trace.length, \ map_function=self.parser.remap_an2or, \ abstract_clock_list=self.model_info.abstract_clock_list) traceV = Trace(vcd_trace, trace.length) traceV.extension = vcd_printer.get_file_ext() traceV.human_readable = vcd_trace.human_readable traces.append(traceV) return traces def __solve_problem(self, problem, config): if problem.name is not None: Logger.log("\n*** Analyzing problem \"%s\" ***"%(problem), 1) Logger.msg("Solving \"%s\" "%problem.name, 0, not(Logger.level(1))) parsing_defs = [problem.formula, problem.lemmas, problem.assumptions] for i in range(len(parsing_defs)): if parsing_defs[i] is not None: pdef_file = problem.relative_path+parsing_defs[i] if os.path.isfile(pdef_file): with open(pdef_file) as f: parsing_defs[i] = [p.strip() for p in f.read().strip().split("\n")] else: parsing_defs[i] = [p.strip() for p in parsing_defs[i].split(MODEL_SP)] else: parsing_defs[i] = None [formulae, problem.lemmas, problem.assumptions] = parsing_defs ParametricBehavior.apply_to_problem(problem, self.model_info) assumps = None lemmas = None trace = None traces = None if formulae is None: if problem.verification == VerificationType.SIMULATION: formulae = ["True"] elif (problem.verification is not None) and (problem.verification != VerificationType.EQUIVALENCE): Logger.error("Property not provided") accepted_ver = False if formulae is not None: problem.formula = formulae[0] precondition = config.precondition if config.precondition is not None else problem.precondition if precondition and problem.verification == VerificationType.SAFETY: problem.formula = "(%s) -> (%s)"%(precondition, problem.formula) if (problem.verification != VerificationType.EQUIVALENCE) and (problem.formula is not None): assumps = [t[1] for t in self.sparser.parse_formulae(problem.assumptions)] lemmas = [t[1] for t in self.sparser.parse_formulae(problem.lemmas)] for ass in assumps: problem.hts.add_assumption(ass) for lemma in lemmas: problem.hts.add_lemma(lemma) if problem.verification != VerificationType.LTL: (strprop, prop, types) = self.sparser.parse_formulae([problem.formula])[0] else: (strprop, prop, types) = self.lparser.parse_formulae([problem.formula])[0] problem.formula = prop if problem.verification is None: return problem if problem.coi: if Logger.level(2): timer = Logger.start_timer("COI") problem.hts = self.coi.compute(problem.hts, problem.formula) if Logger.level(2): Logger.get_timer(timer) mc_config = self.problem2mc_config(problem, config) bmc_safety = BMCSafety(problem.hts, mc_config) bmc_parametric = BMCParametric(problem.hts, mc_config) bmc_ltl = BMCLTL(problem.hts, mc_config) res = VerificationStatus.UNC bmc_length = max(problem.bmc_length, config.bmc_length) bmc_length_min = max(problem.bmc_length_min, config.bmc_length_min) if problem.verification == VerificationType.SAFETY: accepted_ver = True Logger.log("Property: %s"%(prop.serialize(threshold=100)), 2) res, trace, _ = bmc_safety.safety(prop, bmc_length, bmc_length_min, config.processes) if problem.verification == VerificationType.LTL: accepted_ver = True res, trace, _ = bmc_ltl.ltl(prop, bmc_length, bmc_length_min) if problem.verification == VerificationType.SIMULATION: accepted_ver = True res, trace = bmc_safety.simulate(prop, bmc_length) if problem.verification == VerificationType.PARAMETRIC: accepted_ver = True Logger.log("Property: %s"%(prop.serialize(threshold=100)), 2) res, traces, problem.region = bmc_parametric.parametric_safety(prop, bmc_length, bmc_length_min, ModelExtension.get_parameters(problem.hts), at_most=problem.cardinality) hts = problem.hts if problem.verification == VerificationType.EQUIVALENCE: accepted_ver = True htseq, miter_out = Miter.combine_systems(problem.hts, \ problem.hts2, \ bmc_length, \ problem.symbolic_init, \ problem.formula, \ True) if problem.assumptions is not None: assumps = [t[1] for t in self.sparser.parse_formulae(problem.assumptions)] if problem.lemmas is not None: lemmas = [t[1] for t in self.sparser.parse_formulae(problem.lemmas)] if assumps is not None: for assumption in assumps: htseq.add_assumption(assumption) if lemmas is not None: for lemma in lemmas: htseq.add_lemma(lemma) bmcseq = BMCSafety(htseq, mc_config) hts = htseq res, trace, t = bmcseq.safety(miter_out, bmc_length, bmc_length_min) if not accepted_ver: Logger.error("Invalid verification type") problem.status = res if trace is not None: problem.traces = self.__process_trace(hts, trace, config, problem) if traces is not None: problem.traces = [] for trace in traces: problem.traces += self.__process_trace(hts, trace, config, problem) if problem.assumptions is not None: problem.hts.assumptions = None Logger.log("\n*** Problem \"%s\" is %s ***"%(problem, res), 1) def get_file_flags(self, strfile): if FLAG_SR not in strfile: return (strfile, None) (strfile, flags) = (strfile[:strfile.index(FLAG_SR)], strfile[strfile.index(FLAG_SR)+1:strfile.index(FLAG_ST)].split(FLAG_SP)) return (strfile, flags) def md5(self, fname): import hashlib hash_md5 = hashlib.md5() with open(fname, "rb") as f: for chunk in iter(lambda: f.read(4096), b""): hash_md5.update(chunk) return hash_md5.hexdigest() def _is_cached(self, cachedir, filename, clean): hts_file = "%s/%s.ssts"%(cachedir, filename) mi_file = "%s/%s.mi"%(cachedir, filename) inv_file = "%s/%s.inv"%(cachedir, filename) ltl_file = "%s/%s.ltl"%(cachedir, filename) if clean: if os.path.isfile(hts_file): os.remove(hts_file) if os.path.isfile(mi_file): os.remove(mi_file) if os.path.isfile(inv_file): os.remove(inv_file) if os.path.isfile(ltl_file): os.remove(ltl_file) return os.path.isfile(hts_file) and \ os.path.isfile(mi_file) and \ os.path.isfile(inv_file) and \ os.path.isfile(ltl_file) def _to_cache(self, cachedir, filename, hts, inv, ltl, model_info): if not os.path.isdir(cachedir): os.makedirs(cachedir) hts_file = "%s/%s.ssts"%(cachedir, filename) mi_file = "%s/%s.mi"%(cachedir, filename) inv_file = "%s/%s.inv"%(cachedir, filename) ltl_file = "%s/%s.ltl"%(cachedir, filename) printer = HTSPrintersFactory.printer_by_name(STSHTSPrinter().get_name()) with open(hts_file, "w") as f: f.write(printer.print_hts(hts, properties=[], ftrans=True)) with open(mi_file, 'wb') as f: pickle.dump(model_info, f) with open(inv_file, 'wb') as f: pickle.dump(inv, f) with open(ltl_file, 'wb') as f: pickle.dump(ltl, f) def _from_cache(self, cachedir, filename, config, flags): hts_file = "%s/%s.ssts"%(cachedir, filename) mi_file = "%s/%s.mi"%(cachedir, filename) inv_file = "%s/%s.inv"%(cachedir, filename) ltl_file = "%s/%s.ltl"%(cachedir, filename) parser = SymbolicSimpleTSParser() hts = parser.parse_file(hts_file, config, flags)[0] with open(mi_file, 'rb') as f: model_info = pickle.load(f) if model_info is not None: # Symbols have to be re-defined to match the current object addresses model_info.abstract_clock_list = [(Symbol(x[0].symbol_name(), x[0].symbol_type()), x[1]) for x in model_info.abstract_clock_list] model_info.clock_list = [Symbol(x.symbol_name(), x.symbol_type()) for x in model_info.clock_list] with open(inv_file, 'rb') as f: inv = pickle.load(f) with open(ltl_file, 'rb') as f: ltl = pickle.load(f) return (hts, inv, ltl, model_info) def parse_model(self, \ relative_path, \ model_files, \ encoder_config, \ name=None, \ modifier=None, \ cache_files=False, \ clean_cache=False): hts = HTS(name if name is not None else "System") invar_props = [] ltl_props = [] models = model_files.split(FILE_SP) for strfile in models: (strfile, flags) = self.get_file_flags(strfile) filetype = strfile.split(".")[-1] strfile = strfile.replace("~", os.path.expanduser("~")) if strfile[0] != "/": strfile = relative_path+strfile parser = None for av_parser in ModelParsersFactory.get_parsers(): assert av_parser.name is not None if filetype in av_parser.get_extensions(): parser = av_parser if not self.parser: self.parser = av_parser if parser is not None: if not os.path.isfile(strfile): Logger.error("File \"%s\" does not exist"%strfile) if cache_files: md5 = self.md5(strfile) cf = "-".join(["1" if encoder_config.abstract_clock else "0", \ "1" if encoder_config.add_clock else "0", \ "1" if encoder_config.boolean else "0"]) cachefile = "%s-%s"%(md5, cf) cachedir = "%s/%s"%("/".join(strfile.split("/")[:-1]), COSACACHEDIR) if cache_files and self._is_cached(cachedir, cachefile, clean_cache): Logger.msg("Loading from cache file \"%s\"... "%(strfile), 0) (hts_a, inv_a, ltl_a, model_info) = self._from_cache(cachedir, cachefile, encoder_config, flags) else: Logger.msg("Parsing file \"%s\"... "%(strfile), 0) (hts_a, inv_a, ltl_a) = parser.parse_file(strfile, encoder_config, flags) model_info = parser.get_model_info() if modifier is not None: modifier(hts_a) if cache_files and not clean_cache: self._to_cache(cachedir, cachefile, hts_a, inv_a, ltl_a, model_info) self.model_info.combine(model_info) hts.combine(hts_a) invar_props += inv_a ltl_props += ltl_a Logger.log("DONE", 0) continue Logger.error("Filetype \"%s\" unsupported or parser is not available"%filetype) if Logger.level(1): print(hts.print_statistics(name, Logger.level(2))) return (hts, invar_props, ltl_props) def solve_problems(self, problems, config): encoder_config = self.problems2encoder_config(config, problems) self.sparser = StringParser(encoder_config) self.lparser = LTLParser() self.coi = ConeOfInfluence() invar_props = [] ltl_props = [] si = False if len(problems.symbolic_inits) == 0: problems.symbolic_inits.add(si) HTSM = 0 HTS2 = 1 HTSD = (HTSM, si) model_extension = config.model_extension if problems.model_extension is None else problems.model_extension assume_if_true = config.assume_if_true or problems.assume_if_true cache_files = config.cache_files or problems.cache_files clean_cache = config.clean_cache modifier = None if model_extension is not None: modifier = lambda hts: ModelExtension.extend(hts, ModelModifiersFactory.modifier_by_name(model_extension)) # generate systems for each problem configuration systems = {} for si in problems.symbolic_inits: encoder_config.symbolic_init = si (systems[(HTSM, si)], invar_props, ltl_props) = self.parse_model(problems.relative_path, \ problems.model_file, \ encoder_config, \ "System 1", \ modifier, \ cache_files=cache_files, \ clean_cache=clean_cache) if problems.equivalence is not None: (systems[(HTS2, si)], _, _) = self.parse_model(problems.relative_path, \ problems.equivalence, \ encoder_config, \ "System 2", \ cache_files=cache_files, \ clean_cache=clean_cache) else: systems[(HTS2, si)] = None if config.safety or config.problems: for invar_prop in invar_props: inv_prob = problems.new_problem() inv_prob.verification = VerificationType.SAFETY inv_prob.name = invar_prop[0] inv_prob.description = invar_prop[1] inv_prob.formula = invar_prop[2] problems.add_problem(inv_prob) if config.ltl or config.problems: for ltl_prop in ltl_props: ltl_prob = problems.new_problem() ltl_prob.verification = VerificationType.LTL ltl_prob.name = ltl_prop[0] ltl_prob.description = ltl_prop[1] ltl_prob.formula = ltl_prop[2] problems.add_problem(ltl_prob) if HTSD in systems: problems._hts = systems[HTSD] for problem in problems.problems: problem.hts = systems[(HTSM, problem.symbolic_init)] if problems._hts is None: problems._hts = problem.hts problem.hts2 = systems[(HTS2, problem.symbolic_init)] if problems._hts2 is None: problems._hts2 = problem.hts2 problem.vcd = problems.vcd or config.vcd or problem.vcd problem.abstract_clock = problems.abstract_clock or config.abstract_clock problem.add_clock = problems.add_clock or config.add_clock problem.coi = problems.coi or config.coi problem.run_coreir_passes = problems.run_coreir_passes problem.relative_path = problems.relative_path problem.cardinality = max(problems.cardinality, config.cardinality) if not problem.full_trace: problem.full_trace = problems.full_trace if not problem.trace_vars_change: problem.trace_vars_change = problems.trace_vars_change if not problem.trace_all_vars: problem.trace_all_vars = problems.trace_all_vars if not problem.clock_behaviors: clk_bhvs = [p for p in [problems.clock_behaviors, config.clock_behaviors] if p is not None] if len(clk_bhvs) > 0: problem.clock_behaviors = ";".join(clk_bhvs) if not problem.generators: problem.generators = config.generators Logger.log("Solving with abstract_clock=%s, add_clock=%s"%(problem.abstract_clock, problem.add_clock), 2) if problem.trace_prefix is not None: problem.trace_prefix = "".join([problem.relative_path,problem.trace_prefix]) if config.time or problems.time: timer_solve = Logger.start_timer("Problem %s"%problem.name, False) try: self.__solve_problem(problem, config) if problem.verification is None: Logger.log("Unset verification", 2) continue Logger.msg(" %s\n"%problem.status, 0, not(Logger.level(1))) if (assume_if_true) and \ (problem.status == VerificationStatus.TRUE) and \ (problem.assumptions == None) and \ (problem.verification == VerificationType.SAFETY): ass_ts = TS("Previous assumption from property") if TS.has_next(problem.formula): ass_ts.trans = problem.formula else: ass_ts.invar = problem.formula problem.hts.reset_formulae() problem.hts.add_ts(ass_ts) if config.time or problems.time: problem.time = Logger.get_timer(timer_solve, False) except KeyboardInterrupt as e: Logger.msg("\b\b Skipped!\n", 0) def problem2mc_config(self, problem, config): mc_config = MCConfig() config_selection = lambda problem, config: config if problem is None else problem mc_config.smt2file = config_selection(problem.smt2_tracing, config.smt2file) mc_config.prefix = problem.name mc_config.strategy = config_selection(problem.strategy, config.strategy) mc_config.incremental = config_selection(problem.incremental, config.incremental) mc_config.skip_solving = config_selection(problem.skip_solving, config.skip_solving) mc_config.solver_name = config_selection(problem.solver_name, config.solver_name) mc_config.prove = config_selection(problem.prove, config.prove) return mc_config def problems2encoder_config(self, config, problems): encoder_config = EncoderConfig() encoder_config.abstract_clock = problems.abstract_clock or config.abstract_clock encoder_config.symbolic_init = config.symbolic_init or config.symbolic_init encoder_config.zero_init = problems.zero_init or config.zero_init encoder_config.add_clock = problems.add_clock or config.add_clock encoder_config.deterministic = config.deterministic encoder_config.run_passes = config.run_passes encoder_config.boolean = problems.boolean or config.boolean encoder_config.devel = config.devel return encoder_config
PypiClean
/LoProp-0.3.5-py3-none-any.whl/loprop/veloxchem.py
import os import h5py from util.full import Matrix from .core import MolFrag class MolFragVeloxChem(MolFrag): dipole_labels = ('x', 'y', 'z') def __init__(self, tmpdir, **kwargs): super().__init__(tmpdir, **kwargs) # # Veloxchem files # self.interface = kwargs['checkpoint_file'] self.scf = kwargs['scf_checkpoint_file'] self._Z = None self._R = None self._Rab = None self.cpa = [] self.opa = [] self.noa = 0 self.get_basis_info() self.get_molecule_info() def get_basis_info(self): """ Obtain basis set info from checkpoint file """ with h5py.File(self.interface, 'r') as f: self.cpa = [int(i) for i in f['contracted_per_atom'][...]] self.opa = [ occ[...] for occ in f[f'occupied_per_atom'].values() ] self.noa = len(self.cpa) def get_molecule_info(self): noa = self.noa self.Rab = Matrix((noa, noa, 3)) self.dRab = Matrix((noa, noa, 3)) for a in range(noa): for b in range(noa): self.Rab[a, b, :] = (self.R[a, :] + self.R[b, :]) / 2 self.dRab[a, b, :] = (self.R[a, :] - self.R[b, :]) / 2 def get_density_matrix(self): with h5py.File(self.interface, 'r') as f: D = f['ao_density_matrix'][...] return D @property def x(self): """ Read dipole matrices to blocked loprop basis """ if self._x is not None: return self._x self._x = self.get_dipole_matrices() return self._x def get_dipole_matrices(self): with h5py.File(self.interface, 'r') as f: Dx = f['ao_dipole_matrices/x'][...].view(Matrix) Dy = f['ao_dipole_matrices/y'][...].view(Matrix) Dz = f['ao_dipole_matrices/z'][...].view(Matrix) return tuple(self.ao_to_blocked_loprop(Dx, Dy, Dz)) def get_quadrupole_matrices(self): with h5py.File(self.interface, 'r') as f: Qxx = f['ao_quadrupole_matrices/xx'][...].view(Matrix) Qxy = f['ao_quadrupole_matrices/xy'][...].view(Matrix) Qxz = f['ao_quadrupole_matrices/xz'][...].view(Matrix) Qyy = f['ao_quadrupole_matrices/yy'][...].view(Matrix) Qyz = f['ao_quadrupole_matrices/yz'][...].view(Matrix) Qzz = f['ao_quadrupole_matrices/zz'][...].view(Matrix) return tuple( self.ao_to_blocked_loprop( Qxx, Qxy, Qxz, Qyy, Qyz, Qzz ) ) @property def Dk(self): """ Read perturbed ao density matrices from file """ if self._Dk is not None: return self._Dk Dk = {} freq = 0.0 with h5py.File(self.interface, 'r') as f: for c in 'x', 'y', 'z': Dk[(c, freq)] = f[f'ao_lr_density_matrix/{c}/{freq}'][...] self._Dk = self.contravariant_ao_to_blocked_loprop(Dk) return self._Dk @property def D2k(self): pass def S(self): # # read overlap from hd5 file # with h5py.File(self.interface, 'r') as f: S = f['ao_overlap_matrix'][...].view(Matrix) return S @property def Z(self): if self._Z is None: with h5py.File(self.scf, 'r') as f: self._Z = f['nuclear_charges'][...] return self._Z @property def R(self): if self._R is None: with h5py.File(self.interface, 'r') as f: self._R = f['nuclear_coordinates'][...] return self._R
PypiClean
/algorithmadts-0.2.1.tar.gz/algorithmadts-0.2.1/README.md
# Algorithm Abstract Data Types Finlay's package for Abstract Data Types written for Algorithmics class ## Installation Run the following command in your terminal: `pip install AlgorithmADTs` AlgorithmADTs can now be imported into your python scripts! I recommend `from AlgorithmADTs import *` to include all functionality, but you can also import from `AlgorithmADTs.AbstractDataTypes` or `AlgorithmADTs.GraphAlgorithms` ## ADTS: ``` Array create: Integer -> Array set: Array x Integer x Element -> Array get: Array x Integer -> Element ``` ``` List create: None -> List is_empty: Array -> Boolean set: Array x Integer x Element -> List get: Array x Integer -> Element append: Array x Element -> List ``` ``` Stack create: None -> Stack push: Stack x Element -> Stack pop: Stack -> Stack is_empty: Stack -> Boolean head: Stack -> Element ``` ``` Queue create: None -> Queue enqueue: Queue x Element -> Queue dequeue: Queue -> Queue is_empty: Queue -> Boolean head: Queue -> Element ``` ``` PriorityQueue create: None -> Priority Queue enqueue: Priority Queue x Element x Integer -> Priority Queue dequeue: Priority Queue -> Priority Queue is_empty: Priority Queue -> Boolean head: Priority Queue -> Element ``` ``` Dictionary create: None -> Dictionary get: Dictionary x Element -> Element set: Dictionary x Element x Element -> Dictionary add: Dictionary x Element x Element -> Dictionary remove: Dictionary x Element -> Dictionary has_key: Dictionary x Element -> Boolean is_empty: Dictionary -> Boolean ``` ``` Graph create: None -> Graph add_node: Graph x Element -> Graph add_edge: Graph x Element x Element -> Graph adjacent: Graph x Element x Element -> Boolean neighbours: Graph x Element -> List ``` Multiple nodes and edges can now be added at one time with `add_nodes` and `add_edges`, using an iterable ``` WeightedGraph (inherits from Graph) create: None -> Graph add_node: Graph x Element -> Graph add_edge: Graph x Element x Element -> Graph adjacent: Graph x Element x Element -> Boolean neighbours: Graph x Element -> List get_weight: Graph x Element x Element -> integer ``` Note that there is no restriction in these classes that elements be hashable, unlike some Python data types e.g. a Python `dict` requires keys to be hashable. It also defines a variable `infinity`, set equal to `float('inf')` The following magic methods are supported: - `__getitem__` and `__setitem__` for classes with a 'get' and 'set' function. This allows you to call `instance[key]` and `instance[key] = value`. - `__iter__` for Array and List, which operates as expected. Dictionary iter returns an iterable of keys. This enables iterating through a class like `for elem in instance` - `__str__` and `__repr__` are defined for all classes except graphs and allow for classes to be easily viewed through printing Note that only the head element is visible for a stack or queue, so it is the only information that can be returned by these methods - Numerical magic methods (e.g. `__add__`) are defined for matrices ## Graph Algorithms Currently, the following graph algorithms are defined: - Prim's algorithm for computing the Minimal Spanning Tree of a weighted, undirected graph - Dijkstra's algorithm for finding the single source shortest path in a weighted graph - The Bellman-Ford algorithm which extends the functionality of Dijkstra's algorithm to allow for negative weights - The two variants of the Floyd-Warshall algorithm to calculate shortest path between all nodes and transitive closure of an unweighted graph - The PageRank algorithm for determining the relative importance of nodes in an unweighted graph ## Version things To implement: - Optional hashing for graphs? - Search methods like DPS BFS
PypiClean
/ocn-xmlchecker.env.tar.gz/env (copy)/lib/python2.7/encodings/cp856.py
"""#" import codecs ### Codec APIs class Codec(codecs.Codec): def encode(self,input,errors='strict'): return codecs.charmap_encode(input,errors,encoding_table) def decode(self,input,errors='strict'): return codecs.charmap_decode(input,errors,decoding_table) class IncrementalEncoder(codecs.IncrementalEncoder): def encode(self, input, final=False): return codecs.charmap_encode(input,self.errors,encoding_table)[0] class IncrementalDecoder(codecs.IncrementalDecoder): def decode(self, input, final=False): return codecs.charmap_decode(input,self.errors,decoding_table)[0] class StreamWriter(Codec,codecs.StreamWriter): pass class StreamReader(Codec,codecs.StreamReader): pass ### encodings module API def getregentry(): return codecs.CodecInfo( name='cp856', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, ) ### Decoding Table decoding_table = ( u'\x00' # 0x00 -> NULL u'\x01' # 0x01 -> START OF HEADING u'\x02' # 0x02 -> START OF TEXT u'\x03' # 0x03 -> END OF TEXT u'\x04' # 0x04 -> END OF TRANSMISSION u'\x05' # 0x05 -> ENQUIRY u'\x06' # 0x06 -> ACKNOWLEDGE u'\x07' # 0x07 -> BELL u'\x08' # 0x08 -> BACKSPACE u'\t' # 0x09 -> HORIZONTAL TABULATION u'\n' # 0x0A -> LINE FEED u'\x0b' # 0x0B -> VERTICAL TABULATION u'\x0c' # 0x0C -> FORM FEED u'\r' # 0x0D -> CARRIAGE RETURN u'\x0e' # 0x0E -> SHIFT OUT u'\x0f' # 0x0F -> SHIFT IN u'\x10' # 0x10 -> DATA LINK ESCAPE u'\x11' # 0x11 -> DEVICE CONTROL ONE u'\x12' # 0x12 -> DEVICE CONTROL TWO u'\x13' # 0x13 -> DEVICE CONTROL THREE u'\x14' # 0x14 -> DEVICE CONTROL FOUR u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE u'\x16' # 0x16 -> SYNCHRONOUS IDLE u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK u'\x18' # 0x18 -> CANCEL u'\x19' # 0x19 -> END OF MEDIUM u'\x1a' # 0x1A -> SUBSTITUTE u'\x1b' # 0x1B -> ESCAPE u'\x1c' # 0x1C -> FILE SEPARATOR u'\x1d' # 0x1D -> GROUP SEPARATOR u'\x1e' # 0x1E -> RECORD SEPARATOR u'\x1f' # 0x1F -> UNIT SEPARATOR u' ' # 0x20 -> SPACE u'!' # 0x21 -> EXCLAMATION MARK u'"' # 0x22 -> QUOTATION MARK u'#' # 0x23 -> NUMBER SIGN u'$' # 0x24 -> DOLLAR SIGN u'%' # 0x25 -> PERCENT SIGN u'&' # 0x26 -> AMPERSAND u"'" # 0x27 -> APOSTROPHE u'(' # 0x28 -> LEFT PARENTHESIS u')' # 0x29 -> RIGHT PARENTHESIS u'*' # 0x2A -> ASTERISK u'+' # 0x2B -> PLUS SIGN u',' # 0x2C -> COMMA u'-' # 0x2D -> HYPHEN-MINUS u'.' # 0x2E -> FULL STOP u'/' # 0x2F -> SOLIDUS u'0' # 0x30 -> DIGIT ZERO u'1' # 0x31 -> DIGIT ONE u'2' # 0x32 -> DIGIT TWO u'3' # 0x33 -> DIGIT THREE u'4' # 0x34 -> DIGIT FOUR u'5' # 0x35 -> DIGIT FIVE u'6' # 0x36 -> DIGIT SIX u'7' # 0x37 -> DIGIT SEVEN u'8' # 0x38 -> DIGIT EIGHT u'9' # 0x39 -> DIGIT NINE u':' # 0x3A -> COLON u';' # 0x3B -> SEMICOLON u'<' # 0x3C -> LESS-THAN SIGN u'=' # 0x3D -> EQUALS SIGN u'>' # 0x3E -> GREATER-THAN SIGN u'?' # 0x3F -> QUESTION MARK u'@' # 0x40 -> COMMERCIAL AT u'A' # 0x41 -> LATIN CAPITAL LETTER A u'B' # 0x42 -> LATIN CAPITAL LETTER B u'C' # 0x43 -> LATIN CAPITAL LETTER C u'D' # 0x44 -> LATIN CAPITAL LETTER D u'E' # 0x45 -> LATIN CAPITAL LETTER E u'F' # 0x46 -> LATIN CAPITAL LETTER F u'G' # 0x47 -> LATIN CAPITAL LETTER G u'H' # 0x48 -> LATIN CAPITAL LETTER H u'I' # 0x49 -> LATIN CAPITAL LETTER I u'J' # 0x4A -> LATIN CAPITAL LETTER J u'K' # 0x4B -> LATIN CAPITAL LETTER K u'L' # 0x4C -> LATIN CAPITAL LETTER L u'M' # 0x4D -> LATIN CAPITAL LETTER M u'N' # 0x4E -> LATIN CAPITAL LETTER N u'O' # 0x4F -> LATIN CAPITAL LETTER O u'P' # 0x50 -> LATIN CAPITAL LETTER P u'Q' # 0x51 -> LATIN CAPITAL LETTER Q u'R' # 0x52 -> LATIN CAPITAL LETTER R u'S' # 0x53 -> LATIN CAPITAL LETTER S u'T' # 0x54 -> LATIN CAPITAL LETTER T u'U' # 0x55 -> LATIN CAPITAL LETTER U u'V' # 0x56 -> LATIN CAPITAL LETTER V u'W' # 0x57 -> LATIN CAPITAL LETTER W u'X' # 0x58 -> LATIN CAPITAL LETTER X u'Y' # 0x59 -> LATIN CAPITAL LETTER Y u'Z' # 0x5A -> LATIN CAPITAL LETTER Z u'[' # 0x5B -> LEFT SQUARE BRACKET u'\\' # 0x5C -> REVERSE SOLIDUS u']' # 0x5D -> RIGHT SQUARE BRACKET u'^' # 0x5E -> CIRCUMFLEX ACCENT u'_' # 0x5F -> LOW LINE u'`' # 0x60 -> GRAVE ACCENT u'a' # 0x61 -> LATIN SMALL LETTER A u'b' # 0x62 -> LATIN SMALL LETTER B u'c' # 0x63 -> LATIN SMALL LETTER C u'd' # 0x64 -> LATIN SMALL LETTER D u'e' # 0x65 -> LATIN SMALL LETTER E u'f' # 0x66 -> LATIN SMALL LETTER F u'g' # 0x67 -> LATIN SMALL LETTER G u'h' # 0x68 -> LATIN SMALL LETTER H u'i' # 0x69 -> LATIN SMALL LETTER I u'j' # 0x6A -> LATIN SMALL LETTER J u'k' # 0x6B -> LATIN SMALL LETTER K u'l' # 0x6C -> LATIN SMALL LETTER L u'm' # 0x6D -> LATIN SMALL LETTER M u'n' # 0x6E -> LATIN SMALL LETTER N u'o' # 0x6F -> LATIN SMALL LETTER O u'p' # 0x70 -> LATIN SMALL LETTER P u'q' # 0x71 -> LATIN SMALL LETTER Q u'r' # 0x72 -> LATIN SMALL LETTER R u's' # 0x73 -> LATIN SMALL LETTER S u't' # 0x74 -> LATIN SMALL LETTER T u'u' # 0x75 -> LATIN SMALL LETTER U u'v' # 0x76 -> LATIN SMALL LETTER V u'w' # 0x77 -> LATIN SMALL LETTER W u'x' # 0x78 -> LATIN SMALL LETTER X u'y' # 0x79 -> LATIN SMALL LETTER Y u'z' # 0x7A -> LATIN SMALL LETTER Z u'{' # 0x7B -> LEFT CURLY BRACKET u'|' # 0x7C -> VERTICAL LINE u'}' # 0x7D -> RIGHT CURLY BRACKET u'~' # 0x7E -> TILDE u'\x7f' # 0x7F -> DELETE u'\u05d0' # 0x80 -> HEBREW LETTER ALEF u'\u05d1' # 0x81 -> HEBREW LETTER BET u'\u05d2' # 0x82 -> HEBREW LETTER GIMEL u'\u05d3' # 0x83 -> HEBREW LETTER DALET u'\u05d4' # 0x84 -> HEBREW LETTER HE u'\u05d5' # 0x85 -> HEBREW LETTER VAV u'\u05d6' # 0x86 -> HEBREW LETTER ZAYIN u'\u05d7' # 0x87 -> HEBREW LETTER HET u'\u05d8' # 0x88 -> HEBREW LETTER TET u'\u05d9' # 0x89 -> HEBREW LETTER YOD u'\u05da' # 0x8A -> HEBREW LETTER FINAL KAF u'\u05db' # 0x8B -> HEBREW LETTER KAF u'\u05dc' # 0x8C -> HEBREW LETTER LAMED u'\u05dd' # 0x8D -> HEBREW LETTER FINAL MEM u'\u05de' # 0x8E -> HEBREW LETTER MEM u'\u05df' # 0x8F -> HEBREW LETTER FINAL NUN u'\u05e0' # 0x90 -> HEBREW LETTER NUN u'\u05e1' # 0x91 -> HEBREW LETTER SAMEKH u'\u05e2' # 0x92 -> HEBREW LETTER AYIN u'\u05e3' # 0x93 -> HEBREW LETTER FINAL PE u'\u05e4' # 0x94 -> HEBREW LETTER PE u'\u05e5' # 0x95 -> HEBREW LETTER FINAL TSADI u'\u05e6' # 0x96 -> HEBREW LETTER TSADI u'\u05e7' # 0x97 -> HEBREW LETTER QOF u'\u05e8' # 0x98 -> HEBREW LETTER RESH u'\u05e9' # 0x99 -> HEBREW LETTER SHIN u'\u05ea' # 0x9A -> HEBREW LETTER TAV u'\ufffe' # 0x9B -> UNDEFINED u'\xa3' # 0x9C -> POUND SIGN u'\ufffe' # 0x9D -> UNDEFINED u'\xd7' # 0x9E -> MULTIPLICATION SIGN u'\ufffe' # 0x9F -> UNDEFINED u'\ufffe' # 0xA0 -> UNDEFINED u'\ufffe' # 0xA1 -> UNDEFINED u'\ufffe' # 0xA2 -> UNDEFINED u'\ufffe' # 0xA3 -> UNDEFINED u'\ufffe' # 0xA4 -> UNDEFINED u'\ufffe' # 0xA5 -> UNDEFINED u'\ufffe' # 0xA6 -> UNDEFINED u'\ufffe' # 0xA7 -> UNDEFINED u'\ufffe' # 0xA8 -> UNDEFINED u'\xae' # 0xA9 -> REGISTERED SIGN u'\xac' # 0xAA -> NOT SIGN u'\xbd' # 0xAB -> VULGAR FRACTION ONE HALF u'\xbc' # 0xAC -> VULGAR FRACTION ONE QUARTER u'\ufffe' # 0xAD -> UNDEFINED u'\xab' # 0xAE -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK u'\xbb' # 0xAF -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK u'\u2591' # 0xB0 -> LIGHT SHADE u'\u2592' # 0xB1 -> MEDIUM SHADE u'\u2593' # 0xB2 -> DARK SHADE u'\u2502' # 0xB3 -> BOX DRAWINGS LIGHT VERTICAL u'\u2524' # 0xB4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT u'\ufffe' # 0xB5 -> UNDEFINED u'\ufffe' # 0xB6 -> UNDEFINED u'\ufffe' # 0xB7 -> UNDEFINED u'\xa9' # 0xB8 -> COPYRIGHT SIGN u'\u2563' # 0xB9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT u'\u2551' # 0xBA -> BOX DRAWINGS DOUBLE VERTICAL u'\u2557' # 0xBB -> BOX DRAWINGS DOUBLE DOWN AND LEFT u'\u255d' # 0xBC -> BOX DRAWINGS DOUBLE UP AND LEFT u'\xa2' # 0xBD -> CENT SIGN u'\xa5' # 0xBE -> YEN SIGN u'\u2510' # 0xBF -> BOX DRAWINGS LIGHT DOWN AND LEFT u'\u2514' # 0xC0 -> BOX DRAWINGS LIGHT UP AND RIGHT u'\u2534' # 0xC1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL u'\u252c' # 0xC2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL u'\u251c' # 0xC3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT u'\u2500' # 0xC4 -> BOX DRAWINGS LIGHT HORIZONTAL u'\u253c' # 0xC5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL u'\ufffe' # 0xC6 -> UNDEFINED u'\ufffe' # 0xC7 -> UNDEFINED u'\u255a' # 0xC8 -> BOX DRAWINGS DOUBLE UP AND RIGHT u'\u2554' # 0xC9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT u'\u2569' # 0xCA -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL u'\u2566' # 0xCB -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL u'\u2560' # 0xCC -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT u'\u2550' # 0xCD -> BOX DRAWINGS DOUBLE HORIZONTAL u'\u256c' # 0xCE -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL u'\xa4' # 0xCF -> CURRENCY SIGN u'\ufffe' # 0xD0 -> UNDEFINED u'\ufffe' # 0xD1 -> UNDEFINED u'\ufffe' # 0xD2 -> UNDEFINED u'\ufffe' # 0xD3 -> UNDEFINEDS u'\ufffe' # 0xD4 -> UNDEFINED u'\ufffe' # 0xD5 -> UNDEFINED u'\ufffe' # 0xD6 -> UNDEFINEDE u'\ufffe' # 0xD7 -> UNDEFINED u'\ufffe' # 0xD8 -> UNDEFINED u'\u2518' # 0xD9 -> BOX DRAWINGS LIGHT UP AND LEFT u'\u250c' # 0xDA -> BOX DRAWINGS LIGHT DOWN AND RIGHT u'\u2588' # 0xDB -> FULL BLOCK u'\u2584' # 0xDC -> LOWER HALF BLOCK u'\xa6' # 0xDD -> BROKEN BAR u'\ufffe' # 0xDE -> UNDEFINED u'\u2580' # 0xDF -> UPPER HALF BLOCK u'\ufffe' # 0xE0 -> UNDEFINED u'\ufffe' # 0xE1 -> UNDEFINED u'\ufffe' # 0xE2 -> UNDEFINED u'\ufffe' # 0xE3 -> UNDEFINED u'\ufffe' # 0xE4 -> UNDEFINED u'\ufffe' # 0xE5 -> UNDEFINED u'\xb5' # 0xE6 -> MICRO SIGN u'\ufffe' # 0xE7 -> UNDEFINED u'\ufffe' # 0xE8 -> UNDEFINED u'\ufffe' # 0xE9 -> UNDEFINED u'\ufffe' # 0xEA -> UNDEFINED u'\ufffe' # 0xEB -> UNDEFINED u'\ufffe' # 0xEC -> UNDEFINED u'\ufffe' # 0xED -> UNDEFINED u'\xaf' # 0xEE -> MACRON u'\xb4' # 0xEF -> ACUTE ACCENT u'\xad' # 0xF0 -> SOFT HYPHEN u'\xb1' # 0xF1 -> PLUS-MINUS SIGN u'\u2017' # 0xF2 -> DOUBLE LOW LINE u'\xbe' # 0xF3 -> VULGAR FRACTION THREE QUARTERS u'\xb6' # 0xF4 -> PILCROW SIGN u'\xa7' # 0xF5 -> SECTION SIGN u'\xf7' # 0xF6 -> DIVISION SIGN u'\xb8' # 0xF7 -> CEDILLA u'\xb0' # 0xF8 -> DEGREE SIGN u'\xa8' # 0xF9 -> DIAERESIS u'\xb7' # 0xFA -> MIDDLE DOT u'\xb9' # 0xFB -> SUPERSCRIPT ONE u'\xb3' # 0xFC -> SUPERSCRIPT THREE u'\xb2' # 0xFD -> SUPERSCRIPT TWO u'\u25a0' # 0xFE -> BLACK SQUARE u'\xa0' # 0xFF -> NO-BREAK SPACE ) ### Encoding table encoding_table=codecs.charmap_build(decoding_table)
PypiClean
/CsuPTMD-1.0.12.tar.gz/CsuPTMD-1.0.12/PTMD/maskrcnn_benchmark/apex/apex/contrib/sparsity/asp.py
import types import torch from .sparse_masklib import create_mask torchvision_imported=True try: import torchvision except ImportError: print("[ASP][Warning] torchvision cannot be imported.") torchvision_imported=False def eligible_modules(model, whitelist_layer_types, allowed_layer_names, disallowed_layer_names): eligible_modules_list = [] for name, mod in model.named_modules(): if isinstance(mod, whitelist_layer_types) and name not in disallowed_layer_names: if allowed_layer_names is not None and name not in allowed_layer_names: continue eligible_modules_list.append((name, mod)) return eligible_modules_list class ASP: __model = None __verbosity = 0 __optimizer = None __sparse_parameters = [] __calculate_mask = None @classmethod def init_model_for_pruning(cls, model, mask_calculator="m4n2_1d", verbosity=3, whitelist=[torch.nn.Linear, torch.nn.Conv1d, torch.nn.Conv2d, torch.nn.Conv3d], allowed_layer_names=None, disallowed_layer_names=[], allow_recompute_mask=False, custom_layer_dict={}): """Call this method to modify your model to take advantage of sparse matrix multiplication. Note that this call alone only augments the model with additional buffers needed for sparse MMA, it does not enable use of sparse MMA. If you are starting with a fresh model: model = ... ASP.init_model_for_pruning(model, mask_calculator, ...) if (training) ASP.init_optimizer_for_pruning(optimizer) ASP.compute_sparse_masks() // sparsity is off by default, call when youy want to enable it. If you are starting from a checkpoint: model = ... ASP.init_model_for_pruning(model, mask_calculator, ...) torch.load(...) if (training) ASP.init_optimizer_for_pruning(optimizer) Arguments: model The model mask_calculator Either callable that computes mask given a tensor OR pattern string for sparse mask lib. verbosity Integer controling verbosity level. 0 -> Only errors. 1 -> Errors and warnings. 2 -> Errors, warnings and info. 3 -> Errors, warnings, info and debug. whitelist Module types approved for sparsity. allowed_layer_names If not None, only layer names that appear in this list are considered for sparsity. disallowed_layer_names If not [], only layer names that do not appear in this list are considered for sparsity. allow_recompute_mask If True, stores pruned values so that dense weights can be restored. Pruned weights are stored in CPU memory, hence this option does not increase GPU memory usage. custom_layer_dict Dictionary of additional layer paremeters to sparsify. e.g. {CustomLinear: ['weight']} [Future] Support for allow_recompute_mask can be removed, it is not part of sparse inference recipe -- AKM. """ assert (cls.__model is None), "ASP has been initialized already." cls.__model = model cls.__verbosity = verbosity if isinstance(mask_calculator, str): def create_mask_from_pattern(param): return create_mask(param, mask_calculator).bool() cls.__calculate_mask = create_mask_from_pattern else: cls.__calculate_mask = mask_calculator #user defined function # function to extract variables that will be sparsified. # idea is that you will add one of these functions for each module type that can be sparsified. if torchvision_imported: print("[ASP] torchvision is imported, can work with the MaskRCNN/KeypointRCNN from torchvision.") sparse_parameter_list = {torch.nn.Linear: ['weight'], torch.nn.Conv1d: ['weight'], torch.nn.Conv2d: ['weight'], torch.nn.Conv3d: ['weight'], torchvision.ops.misc.Conv2d: ['weight']} else: sparse_parameter_list = {torch.nn.Linear: ['weight'], torch.nn.Conv1d: ['weight'], torch.nn.Conv2d: ['weight'], torch.nn.Conv3d: ['weight']} if custom_layer_dict: # Update default list to include user supplied custom (layer type : parameter tensor), make sure this tensor type is something ASP knows how to prune sparse_parameter_list.update(custom_layer_dict) whitelist += list(custom_layer_dict.keys()) for module_type in whitelist: assert (module_type in sparse_parameter_list), "Module %s :: Don't know how to sparsify module." % module.dtype() # find all sparse modules, extract sparse parameters and decorate def add_sparse_attributes(module_name, module): sparse_parameters = sparse_parameter_list[type(module)] for p_name, p in module.named_parameters(): if p_name in sparse_parameters and p.requires_grad: # check for NVIDIA's TC compatibility: we check along the horizontal direction if p.dtype == torch.float32 and ((p.size()[0] % 8) != 0 or (p.size()[1] % 16) != 0): #User defines FP32 and APEX internally uses FP16 math print("[ASP] Auto skipping pruning %s::%s of size=%s and type=%s for sparsity" % (module_name, p_name, str(p.size()), str(p.dtype))) continue if p.dtype == torch.float16 and ((p.size()[0] % 8) != 0 or (p.size()[1] % 16) != 0): #For Conv2d dim= K x CRS; we prune along C print("[ASP] Auto skipping pruning %s::%s of size=%s and type=%s for sparsity" % (module_name, p_name, str(p.size()), str(p.dtype))) continue if cls.__verbosity >= 3: print("[ASP] Sparsifying %s::%s of size=%s and type=%s for sparsity" % (module_name, p_name, str(p.size()), str(p.dtype))) mask = torch.ones_like(p).bool() buffname = p_name.split(".")[-1] # buffer names cannot contain "." module.register_buffer('__%s_mma_mask' % buffname, mask) if allow_recompute_mask: pruned = torch.zeros_like(p).cpu() module.register_buffer('__%s_mma_pruned_p' % buffname, pruned) else: pruned = None cls.__sparse_parameters.append((module_name, module, p_name, p, mask, pruned)) else: if cls.__verbosity >= 3: print("[ASP] Not sparsifying %s::%s of size=%s and type=%s" % (module_name, p_name, str(p.size()), str(p.dtype))) for name, sparse_module in eligible_modules(model, tuple(whitelist), allowed_layer_names, disallowed_layer_names): add_sparse_attributes(name, sparse_module) @classmethod def init_optimizer_for_pruning(cls, optimizer): """Call this method to monkey patch optimizer step function so that masks can be applied to gradients and weights during training. You must call init_model_for_pruning(...) before calling init_optimizer_for_pruning(...) """ assert (cls.__optimizer is None), "ASP has initialized optimizer already." assert (cls.__calculate_mask is not None), "Called ASP.init_optimizer_for_pruning before ASP.init_model_for_pruning." # store pointer to original optimizer step method cls.__optimizer = optimizer cls.__optimizer.__step = optimizer.step def __step(opt_self, *args, **kwargs): # prune gradients before step method with torch.no_grad(): for module_name, module, p_name, p, mask, pruned in cls.__sparse_parameters: if p.grad is not None: #thx pjudd p.grad.mul_(mask) # call original optimizer step method rval = opt_self.__step(*args, **kwargs) # prune parameters after step method with torch.no_grad(): for module_name, module, p_name, p, mask, pruned in cls.__sparse_parameters: p.mul_(mask) return rval cls.__optimizer.step = types.MethodType(__step, cls.__optimizer) @classmethod def compute_sparse_masks(cls): """Call this method to enable sparsity. If init(...) was called with allow_recompute_mask=False AND sparsity is disabled, pruned field can be None. """ with torch.no_grad(): for module_name, module, p_name, p, mask, pruned in cls.__sparse_parameters: if mask.sum() < mask.numel(): # when recalculating masks # restore dense parameter if allow_recompute_mask is enabled assert (pruned is not None), "Unable to restore dense parameter because allow_recompute_mask == False" p.add_(pruned.cuda()) mask.set_(cls.__calculate_mask(p)) if pruned is not None: # stow away pruned weights to cpu pruned.set_((p * (~mask)).cpu()) p.mul_(mask) # in-place multiplication, so pruned weights are 0-values, hence checkpoint will have 0s for pruned weights if cls.__verbosity >= 2: print("[ASP] Enabled %.2f%% sparsity for %s::%s of size=%s and type=%s" % (100.0*mask.sum()/mask.numel(), module_name, p_name, str(p.size()), str(p.dtype))) @classmethod def restore_pruned_weights(cls): """Call this method to disable sparsity and restore all weights. This will only work if init(...) was called with allow_recompute=True. """ with torch.no_grad(): for module_name, module, p_name, p, mask, pruned in cls.__sparse_parameters: if mask.sum() < mask.numel(): assert (pruned is not None), "Unable to restore dense parameter because allow_recompute_mask == False" p.add_(pruned.cuda()) mask.fill_(1) pruned.zero_() if cls.__verbosity >= 2: print("[ASP] Disabled sparsity for %s::%s (dense weights restored)" % (module_name, p_name)) @classmethod def is_sparsity_enabled(cls): """Call this method to determine if sparsity is enabled in the model. The typical use case is right after checkpoint has been loaded. """ total,sp100,sp50 = 0,0,0 for module_name, module, p_name, p, mask, pruned in cls.__sparse_parameters: total += 1 mask_sum = mask.sum() mask_numel = mask.numel() if mask_sum == mask_numel: sp100 += 1 elif mask_sum*2 == mask_numel: sp50 += 1 assert (total == sp100 or total == sp50), "Inconsistent model sparsity" if total == sp100: return False elif total == sp50: return True @classmethod def prune_trained_model(cls, model, optimizer): # add mask buffers to model (init_model_for_pruning), augment optimizer (init_optimizer_for_pruning) and compute masks (compute_sparse_masks) cls.init_model_for_pruning(model, mask_calculator="m4n2_1d", verbosity=2, whitelist=[torch.nn.Linear, torch.nn.Conv2d], allow_recompute_mask=False) cls.init_optimizer_for_pruning(optimizer) cls.compute_sparse_masks()
PypiClean
/gcocf-0.10.3.tar.gz/gcocf-0.10.3/.github/ISSUE_TEMPLATE/bug_report.md
--- name: Bug report about: Create a report to help us improve title: 'BUG: ' labels: 'Bug' assignees: 'GILLESMaster, GILLESMaster080' --- **Describe the bug** A clear and concise description of what the bug is. **To Reproduce** Steps to reproduce the behavior: 1. Go to '...' 2. Click on '....' 3. Scroll down to '....' 4. See error **Expected behavior** A clear and concise description of what you expected to happen. **Screenshots** If applicable, add screenshots to help explain your problem. **System (please complete the following information):** - OS: [e.g. Linux] - Version [e.g. 1] **Additional context** Add any other context about the problem here.
PypiClean
/Nerium-0.13.1.tar.gz/Nerium-0.13.1/nerium/app.py
from flask import Flask, jsonify, make_response, request, Response from flask_cors import CORS from marshmallow import INCLUDE, Schema, fields from marshmallow.exceptions import ValidationError from nerium import __version__, commit, csv_result, discovery, formatter, query from nerium.auth import require_api_key from nerium.utils import convert_multidict app = Flask(__name__) app.url_map.strict_slashes = False CORS(app) class ResultRequestSchema(Schema): """Require query_name in valid results request, set format to "default" if not supplied """ class Meta: unknown = INCLUDE query_name = fields.String(required=True) format_ = fields.String(load_default="default", data_key="format") def get_query_result(params): """Get query results from set of request parameters Return tuple with formatted results and status code """ # load against schema, return with error message unless valid try: params = ResultRequestSchema().load(params) except ValidationError as e: return (e.normalized_messages(), 400) # Separate query_name and format from other parameters query_name = params.pop("query_name") format_ = params.pop("format_") # Fetch results from nerium.query query_results = query.get_result_set(query_name, **params) # Handle error from query submission if query_results.error: status_code = getattr(query_results, "status_code", 400) return (dict(error=query_results.error), status_code) # Format results before returning to view format_schema = formatter.get_format(format_) formatted = format_schema.dump(query_results) return (formatted, 200) def parse_query_params(): return request.get_json(silent=True) or convert_multidict( request.args.to_dict(flat=False) ) @app.route("/") @app.route("/v1/") @app.route("/v1/<query_name>/") @app.route("/v1/<query_name>/<format_>") @require_api_key def serve_query_result(query_name="", format_=""): """Parse request and hand params to get_query_result""" params = parse_query_params() if query_name: params["query_name"] = query_name if format_: params["format_"] = format_ if "query_name" not in params.keys(): # If no query_name is in request, treat as heath check; # returns OK with version and git commit detail""" return jsonify({"status": "ok", "version": __version__, "commit": commit}) query_result = get_query_result(params) result = query_result[0] status_code = query_result[1] return jsonify(result), status_code @app.route("/v1/<query_name>/csv") @app.route("/v2/results/<query_name>/csv") @require_api_key def serve_csv_result(query_name): """Parse request and stream CSV back""" params = parse_query_params() query = csv_result.results_to_csv(query_name, **params) if query.error: return (query.error, 400) response = Response(query.result, mimetype='text/csv') response.headers['Content-Disposition'] = f"attachment; filename={query_name}.csv" return response @app.route("/v1/docs/") @require_api_key def serve_report_list(): """Discovery route; returns a list of available reports known to the service""" return jsonify(discovery.list_reports()) @app.route("/v1/<query_name>/docs/") @require_api_key def serve_report_description(query_name): """Discovery route; returns details and metadata about a report by name""" report_descr = discovery.describe_report(query_name) if report_descr.error: status_code = getattr(report_descr, "status_code", 400) return jsonify(dict(error=report_descr.error)), status_code return jsonify(vars(report_descr))
PypiClean
/Flask_Admin-1.6.1-py3-none-any.whl/flask_admin/contrib/mongoengine/ajax.py
import mongoengine from flask_admin._compat import string_types, as_unicode, iteritems from flask_admin.model.ajax import AjaxModelLoader, DEFAULT_PAGE_SIZE class QueryAjaxModelLoader(AjaxModelLoader): def __init__(self, name, model, **options): """ Constructor. :param fields: Fields to run query against """ super(QueryAjaxModelLoader, self).__init__(name, options) self.model = model self.fields = options.get('fields') self._cached_fields = self._process_fields() if not self.fields: raise ValueError('AJAX loading requires `fields` to be specified for %s.%s' % (model, self.name)) def _process_fields(self): remote_fields = [] for field in self.fields: if isinstance(field, string_types): attr = getattr(self.model, field, None) if not attr: raise ValueError('%s.%s does not exist.' % (self.model, field)) remote_fields.append(attr) else: remote_fields.append(field) return remote_fields def format(self, model): if not model: return None return (as_unicode(model.pk), as_unicode(model)) def get_one(self, pk): return self.model.objects.filter(pk=pk).first() def get_list(self, term, offset=0, limit=DEFAULT_PAGE_SIZE): query = self.model.objects if len(term) > 0: criteria = None for field in self._cached_fields: flt = {u'%s__icontains' % field.name: term} if not criteria: criteria = mongoengine.Q(**flt) else: criteria |= mongoengine.Q(**flt) query = query.filter(criteria) if offset: query = query.skip(offset) return query.limit(limit).all() def create_ajax_loader(model, name, field_name, opts): prop = getattr(model, field_name, None) if prop is None: raise ValueError('Model %s does not have field %s.' % (model, field_name)) ftype = type(prop).__name__ if ftype == 'ListField' or ftype == 'SortedListField': prop = prop.field ftype = type(prop).__name__ if ftype != 'ReferenceField': raise ValueError('Dont know how to convert %s type for AJAX loader' % ftype) remote_model = prop.document_type return QueryAjaxModelLoader(name, remote_model, **opts) def process_ajax_references(references, view): def make_name(base, name): if base: return ('%s-%s' % (base, name)).lower() else: return as_unicode(name).lower() def handle_field(field, subdoc, base): ftype = type(field).__name__ if ftype == 'ListField' or ftype == 'SortedListField': child_doc = getattr(subdoc, '_form_subdocuments', {}).get(None) if child_doc: handle_field(field.field, child_doc, base) elif ftype == 'EmbeddedDocumentField': result = {} ajax_refs = getattr(subdoc, 'form_ajax_refs', {}) for field_name, opts in iteritems(ajax_refs): child_name = make_name(base, field_name) if isinstance(opts, dict): loader = create_ajax_loader(field.document_type_obj, child_name, field_name, opts) else: loader = opts result[field_name] = loader references[child_name] = loader subdoc._form_ajax_refs = result child_doc = getattr(subdoc, '_form_subdocuments', None) if child_doc: handle_subdoc(field.document_type_obj, subdoc, base) else: raise ValueError('Failed to process subdocument field %s' % (field,)) def handle_subdoc(model, subdoc, base): documents = getattr(subdoc, '_form_subdocuments', {}) for name, doc in iteritems(documents): field = getattr(model, name, None) if not field: raise ValueError('Invalid subdocument field %s.%s' % (model, name)) handle_field(field, doc, make_name(base, name)) handle_subdoc(view.model, view, '') return references
PypiClean
/Notable-0.4.2.tar.gz/Notable-0.4.2/notable/static/lib/underscore.js
(function(){var n=this,t=n._,r={},e=Array.prototype,u=Object.prototype,i=Function.prototype,a=e.push,o=e.slice,c=e.concat,l=u.toString,f=u.hasOwnProperty,s=e.forEach,p=e.map,h=e.reduce,v=e.reduceRight,d=e.filter,g=e.every,m=e.some,y=e.indexOf,b=e.lastIndexOf,x=Array.isArray,_=Object.keys,j=i.bind,w=function(n){return n instanceof w?n:this instanceof w?(this._wrapped=n,void 0):new w(n)};"undefined"!=typeof exports?("undefined"!=typeof module&&module.exports&&(exports=module.exports=w),exports._=w):n._=w,w.VERSION="1.4.4";var A=w.each=w.forEach=function(n,t,e){if(null!=n)if(s&&n.forEach===s)n.forEach(t,e);else if(n.length===+n.length){for(var u=0,i=n.length;i>u;u++)if(t.call(e,n[u],u,n)===r)return}else for(var a in n)if(w.has(n,a)&&t.call(e,n[a],a,n)===r)return};w.map=w.collect=function(n,t,r){var e=[];return null==n?e:p&&n.map===p?n.map(t,r):(A(n,function(n,u,i){e[e.length]=t.call(r,n,u,i)}),e)};var O="Reduce of empty array with no initial value";w.reduce=w.foldl=w.inject=function(n,t,r,e){var u=arguments.length>2;if(null==n&&(n=[]),h&&n.reduce===h)return e&&(t=w.bind(t,e)),u?n.reduce(t,r):n.reduce(t);if(A(n,function(n,i,a){u?r=t.call(e,r,n,i,a):(r=n,u=!0)}),!u)throw new TypeError(O);return r},w.reduceRight=w.foldr=function(n,t,r,e){var u=arguments.length>2;if(null==n&&(n=[]),v&&n.reduceRight===v)return e&&(t=w.bind(t,e)),u?n.reduceRight(t,r):n.reduceRight(t);var i=n.length;if(i!==+i){var a=w.keys(n);i=a.length}if(A(n,function(o,c,l){c=a?a[--i]:--i,u?r=t.call(e,r,n[c],c,l):(r=n[c],u=!0)}),!u)throw new TypeError(O);return r},w.find=w.detect=function(n,t,r){var e;return E(n,function(n,u,i){return t.call(r,n,u,i)?(e=n,!0):void 0}),e},w.filter=w.select=function(n,t,r){var e=[];return null==n?e:d&&n.filter===d?n.filter(t,r):(A(n,function(n,u,i){t.call(r,n,u,i)&&(e[e.length]=n)}),e)},w.reject=function(n,t,r){return w.filter(n,function(n,e,u){return!t.call(r,n,e,u)},r)},w.every=w.all=function(n,t,e){t||(t=w.identity);var u=!0;return null==n?u:g&&n.every===g?n.every(t,e):(A(n,function(n,i,a){return(u=u&&t.call(e,n,i,a))?void 0:r}),!!u)};var E=w.some=w.any=function(n,t,e){t||(t=w.identity);var u=!1;return null==n?u:m&&n.some===m?n.some(t,e):(A(n,function(n,i,a){return u||(u=t.call(e,n,i,a))?r:void 0}),!!u)};w.contains=w.include=function(n,t){return null==n?!1:y&&n.indexOf===y?n.indexOf(t)!=-1:E(n,function(n){return n===t})},w.invoke=function(n,t){var r=o.call(arguments,2),e=w.isFunction(t);return w.map(n,function(n){return(e?t:n[t]).apply(n,r)})},w.pluck=function(n,t){return w.map(n,function(n){return n[t]})},w.where=function(n,t,r){return w.isEmpty(t)?r?null:[]:w[r?"find":"filter"](n,function(n){for(var r in t)if(t[r]!==n[r])return!1;return!0})},w.findWhere=function(n,t){return w.where(n,t,!0)},w.max=function(n,t,r){if(!t&&w.isArray(n)&&n[0]===+n[0]&&65535>n.length)return Math.max.apply(Math,n);if(!t&&w.isEmpty(n))return-1/0;var e={computed:-1/0,value:-1/0};return A(n,function(n,u,i){var a=t?t.call(r,n,u,i):n;a>=e.computed&&(e={value:n,computed:a})}),e.value},w.min=function(n,t,r){if(!t&&w.isArray(n)&&n[0]===+n[0]&&65535>n.length)return Math.min.apply(Math,n);if(!t&&w.isEmpty(n))return 1/0;var e={computed:1/0,value:1/0};return A(n,function(n,u,i){var a=t?t.call(r,n,u,i):n;e.computed>a&&(e={value:n,computed:a})}),e.value},w.shuffle=function(n){var t,r=0,e=[];return A(n,function(n){t=w.random(r++),e[r-1]=e[t],e[t]=n}),e};var k=function(n){return w.isFunction(n)?n:function(t){return t[n]}};w.sortBy=function(n,t,r){var e=k(t);return w.pluck(w.map(n,function(n,t,u){return{value:n,index:t,criteria:e.call(r,n,t,u)}}).sort(function(n,t){var r=n.criteria,e=t.criteria;if(r!==e){if(r>e||r===void 0)return 1;if(e>r||e===void 0)return-1}return n.index<t.index?-1:1}),"value")};var F=function(n,t,r,e){var u={},i=k(t||w.identity);return A(n,function(t,a){var o=i.call(r,t,a,n);e(u,o,t)}),u};w.groupBy=function(n,t,r){return F(n,t,r,function(n,t,r){(w.has(n,t)?n[t]:n[t]=[]).push(r)})},w.countBy=function(n,t,r){return F(n,t,r,function(n,t){w.has(n,t)||(n[t]=0),n[t]++})},w.sortedIndex=function(n,t,r,e){r=null==r?w.identity:k(r);for(var u=r.call(e,t),i=0,a=n.length;a>i;){var o=i+a>>>1;u>r.call(e,n[o])?i=o+1:a=o}return i},w.toArray=function(n){return n?w.isArray(n)?o.call(n):n.length===+n.length?w.map(n,w.identity):w.values(n):[]},w.size=function(n){return null==n?0:n.length===+n.length?n.length:w.keys(n).length},w.first=w.head=w.take=function(n,t,r){return null==n?void 0:null==t||r?n[0]:o.call(n,0,t)},w.initial=function(n,t,r){return o.call(n,0,n.length-(null==t||r?1:t))},w.last=function(n,t,r){return null==n?void 0:null==t||r?n[n.length-1]:o.call(n,Math.max(n.length-t,0))},w.rest=w.tail=w.drop=function(n,t,r){return o.call(n,null==t||r?1:t)},w.compact=function(n){return w.filter(n,w.identity)};var R=function(n,t,r){return A(n,function(n){w.isArray(n)?t?a.apply(r,n):R(n,t,r):r.push(n)}),r};w.flatten=function(n,t){return R(n,t,[])},w.without=function(n){return w.difference(n,o.call(arguments,1))},w.uniq=w.unique=function(n,t,r,e){w.isFunction(t)&&(e=r,r=t,t=!1);var u=r?w.map(n,r,e):n,i=[],a=[];return A(u,function(r,e){(t?e&&a[a.length-1]===r:w.contains(a,r))||(a.push(r),i.push(n[e]))}),i},w.union=function(){return w.uniq(c.apply(e,arguments))},w.intersection=function(n){var t=o.call(arguments,1);return w.filter(w.uniq(n),function(n){return w.every(t,function(t){return w.indexOf(t,n)>=0})})},w.difference=function(n){var t=c.apply(e,o.call(arguments,1));return w.filter(n,function(n){return!w.contains(t,n)})},w.zip=function(){for(var n=o.call(arguments),t=w.max(w.pluck(n,"length")),r=Array(t),e=0;t>e;e++)r[e]=w.pluck(n,""+e);return r},w.object=function(n,t){if(null==n)return{};for(var r={},e=0,u=n.length;u>e;e++)t?r[n[e]]=t[e]:r[n[e][0]]=n[e][1];return r},w.indexOf=function(n,t,r){if(null==n)return-1;var e=0,u=n.length;if(r){if("number"!=typeof r)return e=w.sortedIndex(n,t),n[e]===t?e:-1;e=0>r?Math.max(0,u+r):r}if(y&&n.indexOf===y)return n.indexOf(t,r);for(;u>e;e++)if(n[e]===t)return e;return-1},w.lastIndexOf=function(n,t,r){if(null==n)return-1;var e=null!=r;if(b&&n.lastIndexOf===b)return e?n.lastIndexOf(t,r):n.lastIndexOf(t);for(var u=e?r:n.length;u--;)if(n[u]===t)return u;return-1},w.range=function(n,t,r){1>=arguments.length&&(t=n||0,n=0),r=arguments[2]||1;for(var e=Math.max(Math.ceil((t-n)/r),0),u=0,i=Array(e);e>u;)i[u++]=n,n+=r;return i},w.bind=function(n,t){if(n.bind===j&&j)return j.apply(n,o.call(arguments,1));var r=o.call(arguments,2);return function(){return n.apply(t,r.concat(o.call(arguments)))}},w.partial=function(n){var t=o.call(arguments,1);return function(){return n.apply(this,t.concat(o.call(arguments)))}},w.bindAll=function(n){var t=o.call(arguments,1);return 0===t.length&&(t=w.functions(n)),A(t,function(t){n[t]=w.bind(n[t],n)}),n},w.memoize=function(n,t){var r={};return t||(t=w.identity),function(){var e=t.apply(this,arguments);return w.has(r,e)?r[e]:r[e]=n.apply(this,arguments)}},w.delay=function(n,t){var r=o.call(arguments,2);return setTimeout(function(){return n.apply(null,r)},t)},w.defer=function(n){return w.delay.apply(w,[n,1].concat(o.call(arguments,1)))},w.throttle=function(n,t){var r,e,u,i,a=0,o=function(){a=new Date,u=null,i=n.apply(r,e)};return function(){var c=new Date,l=t-(c-a);return r=this,e=arguments,0>=l?(clearTimeout(u),u=null,a=c,i=n.apply(r,e)):u||(u=setTimeout(o,l)),i}},w.debounce=function(n,t,r){var e,u;return function(){var i=this,a=arguments,o=function(){e=null,r||(u=n.apply(i,a))},c=r&&!e;return clearTimeout(e),e=setTimeout(o,t),c&&(u=n.apply(i,a)),u}},w.once=function(n){var t,r=!1;return function(){return r?t:(r=!0,t=n.apply(this,arguments),n=null,t)}},w.wrap=function(n,t){return function(){var r=[n];return a.apply(r,arguments),t.apply(this,r)}},w.compose=function(){var n=arguments;return function(){for(var t=arguments,r=n.length-1;r>=0;r--)t=[n[r].apply(this,t)];return t[0]}},w.after=function(n,t){return 0>=n?t():function(){return 1>--n?t.apply(this,arguments):void 0}},w.keys=_||function(n){if(n!==Object(n))throw new TypeError("Invalid object");var t=[];for(var r in n)w.has(n,r)&&(t[t.length]=r);return t},w.values=function(n){var t=[];for(var r in n)w.has(n,r)&&t.push(n[r]);return t},w.pairs=function(n){var t=[];for(var r in n)w.has(n,r)&&t.push([r,n[r]]);return t},w.invert=function(n){var t={};for(var r in n)w.has(n,r)&&(t[n[r]]=r);return t},w.functions=w.methods=function(n){var t=[];for(var r in n)w.isFunction(n[r])&&t.push(r);return t.sort()},w.extend=function(n){return A(o.call(arguments,1),function(t){if(t)for(var r in t)n[r]=t[r]}),n},w.pick=function(n){var t={},r=c.apply(e,o.call(arguments,1));return A(r,function(r){r in n&&(t[r]=n[r])}),t},w.omit=function(n){var t={},r=c.apply(e,o.call(arguments,1));for(var u in n)w.contains(r,u)||(t[u]=n[u]);return t},w.defaults=function(n){return A(o.call(arguments,1),function(t){if(t)for(var r in t)null==n[r]&&(n[r]=t[r])}),n},w.clone=function(n){return w.isObject(n)?w.isArray(n)?n.slice():w.extend({},n):n},w.tap=function(n,t){return t(n),n};var I=function(n,t,r,e){if(n===t)return 0!==n||1/n==1/t;if(null==n||null==t)return n===t;n instanceof w&&(n=n._wrapped),t instanceof w&&(t=t._wrapped);var u=l.call(n);if(u!=l.call(t))return!1;switch(u){case"[object String]":return n==t+"";case"[object Number]":return n!=+n?t!=+t:0==n?1/n==1/t:n==+t;case"[object Date]":case"[object Boolean]":return+n==+t;case"[object RegExp]":return n.source==t.source&&n.global==t.global&&n.multiline==t.multiline&&n.ignoreCase==t.ignoreCase}if("object"!=typeof n||"object"!=typeof t)return!1;for(var i=r.length;i--;)if(r[i]==n)return e[i]==t;r.push(n),e.push(t);var a=0,o=!0;if("[object Array]"==u){if(a=n.length,o=a==t.length)for(;a--&&(o=I(n[a],t[a],r,e)););}else{var c=n.constructor,f=t.constructor;if(c!==f&&!(w.isFunction(c)&&c instanceof c&&w.isFunction(f)&&f instanceof f))return!1;for(var s in n)if(w.has(n,s)&&(a++,!(o=w.has(t,s)&&I(n[s],t[s],r,e))))break;if(o){for(s in t)if(w.has(t,s)&&!a--)break;o=!a}}return r.pop(),e.pop(),o};w.isEqual=function(n,t){return I(n,t,[],[])},w.isEmpty=function(n){if(null==n)return!0;if(w.isArray(n)||w.isString(n))return 0===n.length;for(var t in n)if(w.has(n,t))return!1;return!0},w.isElement=function(n){return!(!n||1!==n.nodeType)},w.isArray=x||function(n){return"[object Array]"==l.call(n)},w.isObject=function(n){return n===Object(n)},A(["Arguments","Function","String","Number","Date","RegExp"],function(n){w["is"+n]=function(t){return l.call(t)=="[object "+n+"]"}}),w.isArguments(arguments)||(w.isArguments=function(n){return!(!n||!w.has(n,"callee"))}),"function"!=typeof/./&&(w.isFunction=function(n){return"function"==typeof n}),w.isFinite=function(n){return isFinite(n)&&!isNaN(parseFloat(n))},w.isNaN=function(n){return w.isNumber(n)&&n!=+n},w.isBoolean=function(n){return n===!0||n===!1||"[object Boolean]"==l.call(n)},w.isNull=function(n){return null===n},w.isUndefined=function(n){return n===void 0},w.has=function(n,t){return f.call(n,t)},w.noConflict=function(){return n._=t,this},w.identity=function(n){return n},w.times=function(n,t,r){for(var e=Array(n),u=0;n>u;u++)e[u]=t.call(r,u);return e},w.random=function(n,t){return null==t&&(t=n,n=0),n+Math.floor(Math.random()*(t-n+1))};var M={escape:{"&":"&amp;","<":"&lt;",">":"&gt;",'"':"&quot;","'":"&#x27;","/":"&#x2F;"}};M.unescape=w.invert(M.escape);var S={escape:RegExp("["+w.keys(M.escape).join("")+"]","g"),unescape:RegExp("("+w.keys(M.unescape).join("|")+")","g")};w.each(["escape","unescape"],function(n){w[n]=function(t){return null==t?"":(""+t).replace(S[n],function(t){return M[n][t]})}}),w.result=function(n,t){if(null==n)return null;var r=n[t];return w.isFunction(r)?r.call(n):r},w.mixin=function(n){A(w.functions(n),function(t){var r=w[t]=n[t];w.prototype[t]=function(){var n=[this._wrapped];return a.apply(n,arguments),D.call(this,r.apply(w,n))}})};var N=0;w.uniqueId=function(n){var t=++N+"";return n?n+t:t},w.templateSettings={evaluate:/<%([\s\S]+?)%>/g,interpolate:/<%=([\s\S]+?)%>/g,escape:/<%-([\s\S]+?)%>/g};var T=/(.)^/,q={"'":"'","\\":"\\","\r":"r","\n":"n"," ":"t","\u2028":"u2028","\u2029":"u2029"},B=/\\|'|\r|\n|\t|\u2028|\u2029/g;w.template=function(n,t,r){var e;r=w.defaults({},r,w.templateSettings);var u=RegExp([(r.escape||T).source,(r.interpolate||T).source,(r.evaluate||T).source].join("|")+"|$","g"),i=0,a="__p+='";n.replace(u,function(t,r,e,u,o){return a+=n.slice(i,o).replace(B,function(n){return"\\"+q[n]}),r&&(a+="'+\n((__t=("+r+"))==null?'':_.escape(__t))+\n'"),e&&(a+="'+\n((__t=("+e+"))==null?'':__t)+\n'"),u&&(a+="';\n"+u+"\n__p+='"),i=o+t.length,t}),a+="';\n",r.variable||(a="with(obj||{}){\n"+a+"}\n"),a="var __t,__p='',__j=Array.prototype.join,"+"print=function(){__p+=__j.call(arguments,'');};\n"+a+"return __p;\n";try{e=Function(r.variable||"obj","_",a)}catch(o){throw o.source=a,o}if(t)return e(t,w);var c=function(n){return e.call(this,n,w)};return c.source="function("+(r.variable||"obj")+"){\n"+a+"}",c},w.chain=function(n){return w(n).chain()};var D=function(n){return this._chain?w(n).chain():n};w.mixin(w),A(["pop","push","reverse","shift","sort","splice","unshift"],function(n){var t=e[n];w.prototype[n]=function(){var r=this._wrapped;return t.apply(r,arguments),"shift"!=n&&"splice"!=n||0!==r.length||delete r[0],D.call(this,r)}}),A(["concat","join","slice"],function(n){var t=e[n];w.prototype[n]=function(){return D.call(this,t.apply(this._wrapped,arguments))}}),w.extend(w.prototype,{chain:function(){return this._chain=!0,this},value:function(){return this._wrapped}})}).call(this);
PypiClean
/NESTML-5.3.0-py3-none-any.whl/NESTML-5.3.0.data/data/doc/models_library/noisy_synapse.rst
noisy_synapse ############# noisy_synapse - Static synapse with Gaussian noise Description +++++++++++ Each presynaptic spike is passed to the postsynaptic partner with a weight sampled as :math:`w + A_\text{noise} \mathcal{N}(0, 1)`. Parameters ++++++++++ .. csv-table:: :header: "Name", "Physical unit", "Default value", "Description" :widths: auto "w", "real", "1", "Synaptic weight" "d", "ms", "1ms", "Synaptic transmission delay" "A_noise", "real", "0.4", "" Source code +++++++++++ The model source code can be found in the NESTML models repository here: `noisy_synapse <https://github.com/nest/nestml/tree/master/models/synapses/noisy_synapse.nestml>`_. Characterisation ++++++++++++++++ .. include:: noisy_synapse_characterisation.rst .. footer:: Generated at 2023-03-23 09:41:54.864688
PypiClean
/BuildNotify-2.1.0.tar.gz/BuildNotify-2.1.0/buildnotifylib/app_menu.py
import sys import webbrowser from functools import partial from typing import List, Callable from PyQt5 import QtCore from PyQt5.QtGui import QIcon from PyQt5.QtWidgets import QMessageBox, QAction, QMenu, QWidget from buildnotifylib.core.project import Project from buildnotifylib.build_icons import BuildIcons from buildnotifylib.config import Config from buildnotifylib.core.distance_of_time import DistanceOfTime from buildnotifylib.preferences import PreferencesDialog from buildnotifylib.version import VERSION class AppMenu(QtCore.QObject): reload_data = QtCore.pyqtSignal() def __init__(self, widget: QWidget, conf: Config, build_icons: BuildIcons): super(AppMenu, self).__init__(widget) self.menu = QMenu(widget) self.conf = conf self.build_icons = build_icons self.create_default_menu_items() def update(self, projects: List[Project]): self.menu.clear() for project in self.sorted_projects(projects): icon = self.build_icons.for_status(project.get_build_status()) self.create_menu_item(project, icon) self.create_default_menu_items() def sorted_projects(self, projects: List[Project]) -> List[Project]: if self.conf.get_sort_by_name(): return sorted(projects, key=lambda p: p.label()) return sorted(projects, key=lambda p: p.get_last_build_time(), reverse=True) def create_default_menu_items(self): self.menu.addSeparator() self.menu.addAction(QAction("About", self.menu, triggered=self.about_clicked)) self.menu.addAction(QAction("Preferences", self.menu, triggered=self.preferences_clicked)) self.menu.addAction(QAction("Exit", self.menu, triggered=self.exit)) def about_clicked(self, widget: QWidget): QMessageBox.about(self.menu, "About BuildNotify %s" % VERSION, "<b>BuildNotify %s</b> has been developed using PyQt5 and serves as a build notification tool for cruise control. In case of any suggestions/bugs," % VERSION + "please visit <a href=\"https://git.io/buildnotify\">https://git.io/buildnotify</a> and provide your feedback.") def preferences_clicked(self, widget: QWidget): preferences = PreferencesDialog(self.conf, self.menu).open() if preferences is not None: self.conf.update_preferences(preferences) self.reload_data.emit() def exit(self, widget: QWidget): sys.exit() def create_menu_item(self, project: Project, icon: QIcon): menu_item_label = project.label() if self.conf.get_value("lastBuildTimeForProject"): menu_item_label = menu_item_label + ", " + DistanceOfTime(project.get_last_build_time()).age() + " ago" action = self.menu.addAction(icon, menu_item_label) action.setIconVisibleInMenu(True) action.triggered.connect(partial(self.open_url, url=project.url)) def open_url(self, url: str): webbrowser.open(url)
PypiClean
/MAnorm-1.3.0.tar.gz/MAnorm-1.3.0/manorm/model.py
import numpy as np from sklearn.linear_model import HuberRegressor from manorm.exceptions import ProcessNotReadyError from manorm.region.utils import classify_peaks_by_overlap, merge_common_peaks class MAmodel(object): def __init__(self, peaks1, peaks2, reads1, reads2): self.peaks1 = peaks1 self.peaks2 = peaks2 self.peaks_merged = None self.reads1 = reads1 self.reads2 = reads2 self.ma_params = None self.processed = False self.fitted = False self.normalized = False def process_peaks(self): """Preprocess peaks.""" self.peaks1, self.peaks2 = classify_peaks_by_overlap(self.peaks1, self.peaks2) self.peaks_merged = merge_common_peaks(self.peaks1, self.peaks2) self.processed = True def _count_reads(self, window_size=2000): """Calculate m values and a values of peaks.""" for chrom in self.peaks1.chroms: for peak in self.peaks1.fetch(chrom): peak.count_reads(self.reads1, self.reads2, window_size) for chrom in self.peaks2.chroms: for peak in self.peaks2.fetch(chrom): peak.count_reads(self.reads1, self.reads2, window_size) for chrom in self.peaks_merged.chroms: for peak in self.peaks_merged.fetch(chrom): peak.count_reads(self.reads1, self.reads2, window_size) def fit_model(self, window_size=2000, summit_dis_cutoff=500): """Fit M-A normalization model.""" if not self.processed: raise ProcessNotReadyError("fit the M-A model", 'process peaks') self._count_reads(window_size=window_size) m_values = [] a_values = [] for chrom in self.peaks_merged.chroms: for peak in self.peaks_merged.fetch(chrom): if peak.summit_dis <= summit_dis_cutoff: m_values.append(peak.m_raw) a_values.append(peak.a_raw) m_values = np.array(m_values) a_values = np.array(a_values) mask = abs(m_values) <= 10 huber = HuberRegressor() huber.fit(a_values[mask].reshape(-1, 1), m_values[mask]) self.ma_params = [huber.intercept_, huber.coef_[0]] self.fitted = True def normalize(self): """Normalize all peaks.""" if not self.fitted: raise ProcessNotReadyError("normalize peaks", "fit the M-A model") intercept = self.ma_params[0] slope = self.ma_params[1] for chrom in self.peaks1.chroms: for peak in self.peaks1.fetch(chrom): peak.normalize(slope=slope, intercept=intercept) for chrom in self.peaks2.chroms: for peak in self.peaks2.fetch(chrom): peak.normalize(slope=slope, intercept=intercept) for chrom in self.peaks_merged.chroms: for peak in self.peaks_merged.fetch(chrom): peak.normalize(slope=slope, intercept=intercept) self.normalized = True
PypiClean
/FreePyBX-1.0-RC1.tar.gz/FreePyBX-1.0-RC1/freepybx/public/js/dojox/widget/FisheyeList.js
define(["dijit","dojo","dojox","dojo/require!dijit/_Widget,dijit/_Templated,dijit/_Container,dijit/_Contained"],function(_1,_2,_3){ _2.provide("dojox.widget.FisheyeList"); _2.require("dijit._Widget"); _2.require("dijit._Templated"); _2.require("dijit._Container"); _2.require("dijit._Contained"); _2.declare("dojox.widget.FisheyeList",[_1._Widget,_1._Templated,_1._Container],{constructor:function(){ this.pos={"x":-1,"y":-1}; this.timerScale=1; },EDGE:{CENTER:0,LEFT:1,RIGHT:2,TOP:3,BOTTOM:4},templateString:"<div class=\"dojoxFisheyeListBar\" dojoAttachPoint=\"containerNode\"></div>",snarfChildDomOutput:true,itemWidth:40,itemHeight:40,itemMaxWidth:150,itemMaxHeight:150,imgNode:null,orientation:"horizontal",isFixed:false,conservativeTrigger:false,effectUnits:2,itemPadding:10,attachEdge:"center",labelEdge:"bottom",postCreate:function(){ var e=this.EDGE; _2.setSelectable(this.domNode,false); var _4=this.isHorizontal=(this.orientation=="horizontal"); this.selectedNode=-1; this.isOver=false; this.hitX1=-1; this.hitY1=-1; this.hitX2=-1; this.hitY2=-1; this.anchorEdge=this._toEdge(this.attachEdge,e.CENTER); this.labelEdge=this._toEdge(this.labelEdge,e.TOP); if(this.labelEdge==e.CENTER){ this.labelEdge=e.TOP; } if(_4){ if(this.anchorEdge==e.LEFT){ this.anchorEdge=e.CENTER; } if(this.anchorEdge==e.RIGHT){ this.anchorEdge=e.CENTER; } if(this.labelEdge==e.LEFT){ this.labelEdge=e.TOP; } if(this.labelEdge==e.RIGHT){ this.labelEdge=e.TOP; } }else{ if(this.anchorEdge==e.TOP){ this.anchorEdge=e.CENTER; } if(this.anchorEdge==e.BOTTOM){ this.anchorEdge=e.CENTER; } if(this.labelEdge==e.TOP){ this.labelEdge=e.LEFT; } if(this.labelEdge==e.BOTTOM){ this.labelEdge=e.LEFT; } } var _5=this.effectUnits; this.proximityLeft=this.itemWidth*(_5-0.5); this.proximityRight=this.itemWidth*(_5-0.5); this.proximityTop=this.itemHeight*(_5-0.5); this.proximityBottom=this.itemHeight*(_5-0.5); if(this.anchorEdge==e.LEFT){ this.proximityLeft=0; } if(this.anchorEdge==e.RIGHT){ this.proximityRight=0; } if(this.anchorEdge==e.TOP){ this.proximityTop=0; } if(this.anchorEdge==e.BOTTOM){ this.proximityBottom=0; } if(this.anchorEdge==e.CENTER){ this.proximityLeft/=2; this.proximityRight/=2; this.proximityTop/=2; this.proximityBottom/=2; } },startup:function(){ this.children=this.getChildren(); this._initializePositioning(); if(!this.conservativeTrigger){ this._onMouseMoveHandle=_2.connect(document.documentElement,"onmousemove",this,"_onMouseMove"); } if(this.isFixed){ this._onScrollHandle=_2.connect(document,"onscroll",this,"_onScroll"); } this._onMouseOutHandle=_2.connect(document.documentElement,"onmouseout",this,"_onBodyOut"); this._addChildHandle=_2.connect(this,"addChild",this,"_initializePositioning"); this._onResizeHandle=_2.connect(window,"onresize",this,"_initializePositioning"); },_initializePositioning:function(){ this.itemCount=this.children.length; this.barWidth=(this.isHorizontal?this.itemCount:1)*this.itemWidth; this.barHeight=(this.isHorizontal?1:this.itemCount)*this.itemHeight; this.totalWidth=this.proximityLeft+this.proximityRight+this.barWidth; this.totalHeight=this.proximityTop+this.proximityBottom+this.barHeight; for(var i=0;i<this.children.length;i++){ this.children[i].posX=this.itemWidth*(this.isHorizontal?i:0); this.children[i].posY=this.itemHeight*(this.isHorizontal?0:i); this.children[i].cenX=this.children[i].posX+(this.itemWidth/2); this.children[i].cenY=this.children[i].posY+(this.itemHeight/2); var _6=this.isHorizontal?this.itemWidth:this.itemHeight; var r=this.effectUnits*_6; var c=this.isHorizontal?this.children[i].cenX:this.children[i].cenY; var _7=this.isHorizontal?this.proximityLeft:this.proximityTop; var _8=this.isHorizontal?this.proximityRight:this.proximityBottom; var _9=this.isHorizontal?this.barWidth:this.barHeight; var _a=r; var _b=r; if(_a>c+_7){ _a=c+_7; } if(_b>(_9-c+_8)){ _b=_9-c+_8; } this.children[i].effectRangeLeft=_a/_6; this.children[i].effectRangeRght=_b/_6; } this.domNode.style.width=this.barWidth+"px"; this.domNode.style.height=this.barHeight+"px"; for(i=0;i<this.children.length;i++){ var _c=this.children[i]; var _d=_c.domNode; _d.style.left=_c.posX+"px"; _d.style.top=_c.posY+"px"; _d.style.width=this.itemWidth+"px"; _d.style.height=this.itemHeight+"px"; _c.imgNode.style.left=this.itemPadding+"%"; _c.imgNode.style.top=this.itemPadding+"%"; _c.imgNode.style.width=(100-2*this.itemPadding)+"%"; _c.imgNode.style.height=(100-2*this.itemPadding)+"%"; } this._calcHitGrid(); },_overElement:function(_e,e){ _e=_2.byId(_e); var _f={x:e.pageX,y:e.pageY}; var _10=_2.position(_e,true); var top=_10.y; var _11=top+_10.h; var _12=_10.x; var _13=_12+_10.w; return (_f.x>=_12&&_f.x<=_13&&_f.y>=top&&_f.y<=_11); },_onBodyOut:function(e){ if(this._overElement(_2.body(),e)){ return; } this._setDormant(e); },_setDormant:function(e){ if(!this.isOver){ return; } this.isOver=false; if(this.conservativeTrigger){ _2.disconnect(this._onMouseMoveHandle); } this._onGridMouseMove(-1,-1); },_setActive:function(e){ if(this.isOver){ return; } this.isOver=true; if(this.conservativeTrigger){ this._onMouseMoveHandle=_2.connect(document.documentElement,"onmousemove",this,"_onMouseMove"); this.timerScale=0; this._onMouseMove(e); this._expandSlowly(); } },_onMouseMove:function(e){ if((e.pageX>=this.hitX1)&&(e.pageX<=this.hitX2)&&(e.pageY>=this.hitY1)&&(e.pageY<=this.hitY2)){ if(!this.isOver){ this._setActive(e); } this._onGridMouseMove(e.pageX-this.hitX1,e.pageY-this.hitY1); }else{ if(this.isOver){ this._setDormant(e); } } },_onScroll:function(){ this._calcHitGrid(); },onResized:function(){ this._calcHitGrid(); },_onGridMouseMove:function(x,y){ this.pos={x:x,y:y}; this._paint(); },_paint:function(){ var x=this.pos.x; var y=this.pos.y; if(this.itemCount<=0){ return; } var pos=this.isHorizontal?x:y; var prx=this.isHorizontal?this.proximityLeft:this.proximityTop; var siz=this.isHorizontal?this.itemWidth:this.itemHeight; var sim=this.isHorizontal?(1-this.timerScale)*this.itemWidth+this.timerScale*this.itemMaxWidth:(1-this.timerScale)*this.itemHeight+this.timerScale*this.itemMaxHeight; var cen=((pos-prx)/siz)-0.5; var _14=(sim/siz)-0.5; if(_14>this.effectUnits){ _14=this.effectUnits; } var _15=0,_16; if(this.anchorEdge==this.EDGE.BOTTOM){ _16=(y-this.proximityTop)/this.itemHeight; _15=(_16>0.5)?1:y/(this.proximityTop+(this.itemHeight/2)); } if(this.anchorEdge==this.EDGE.TOP){ _16=(y-this.proximityTop)/this.itemHeight; _15=(_16<0.5)?1:(this.totalHeight-y)/(this.proximityBottom+(this.itemHeight/2)); } if(this.anchorEdge==this.EDGE.RIGHT){ _16=(x-this.proximityLeft)/this.itemWidth; _15=(_16>0.5)?1:x/(this.proximityLeft+(this.itemWidth/2)); } if(this.anchorEdge==this.EDGE.LEFT){ _16=(x-this.proximityLeft)/this.itemWidth; _15=(_16<0.5)?1:(this.totalWidth-x)/(this.proximityRight+(this.itemWidth/2)); } if(this.anchorEdge==this.EDGE.CENTER){ if(this.isHorizontal){ _15=y/(this.totalHeight); }else{ _15=x/(this.totalWidth); } if(_15>0.5){ _15=1-_15; } _15*=2; } for(var i=0;i<this.itemCount;i++){ var _17=this._weighAt(cen,i); if(_17<0){ _17=0; } this._setItemSize(i,_17*_15); } var _18=Math.round(cen); var _19=0; if(cen<0){ _18=0; }else{ if(cen>this.itemCount-1){ _18=this.itemCount-1; }else{ _19=(cen-_18)*((this.isHorizontal?this.itemWidth:this.itemHeight)-this.children[_18].sizeMain); } } this._positionElementsFrom(_18,_19); },_weighAt:function(cen,i){ var _1a=Math.abs(cen-i); var _1b=((cen-i)>0)?this.children[i].effectRangeRght:this.children[i].effectRangeLeft; return (_1a>_1b)?0:(1-_1a/_1b); },_setItemSize:function(p,_1c){ if(this.children[p].scale==_1c){ return; } this.children[p].scale=_1c; _1c*=this.timerScale; var w=Math.round(this.itemWidth+((this.itemMaxWidth-this.itemWidth)*_1c)); var h=Math.round(this.itemHeight+((this.itemMaxHeight-this.itemHeight)*_1c)); if(this.isHorizontal){ this.children[p].sizeW=w; this.children[p].sizeH=h; this.children[p].sizeMain=w; this.children[p].sizeOff=h; var y=0; if(this.anchorEdge==this.EDGE.TOP){ y=(this.children[p].cenY-(this.itemHeight/2)); }else{ if(this.anchorEdge==this.EDGE.BOTTOM){ y=(this.children[p].cenY-(h-(this.itemHeight/2))); }else{ y=(this.children[p].cenY-(h/2)); } } this.children[p].usualX=Math.round(this.children[p].cenX-(w/2)); this.children[p].domNode.style.top=y+"px"; this.children[p].domNode.style.left=this.children[p].usualX+"px"; }else{ this.children[p].sizeW=w; this.children[p].sizeH=h; this.children[p].sizeOff=w; this.children[p].sizeMain=h; var x=0; if(this.anchorEdge==this.EDGE.LEFT){ x=this.children[p].cenX-(this.itemWidth/2); }else{ if(this.anchorEdge==this.EDGE.RIGHT){ x=this.children[p].cenX-(w-(this.itemWidth/2)); }else{ x=this.children[p].cenX-(w/2); } } this.children[p].domNode.style.left=x+"px"; this.children[p].usualY=Math.round(this.children[p].cenY-(h/2)); this.children[p].domNode.style.top=this.children[p].usualY+"px"; } this.children[p].domNode.style.width=w+"px"; this.children[p].domNode.style.height=h+"px"; if(this.children[p].svgNode){ this.children[p].svgNode.setSize(w,h); } },_positionElementsFrom:function(p,_1d){ var pos=0; var _1e,_1f; if(this.isHorizontal){ _1e="usualX"; _1f="left"; }else{ _1e="usualY"; _1f="top"; } pos=Math.round(this.children[p][_1e]+_1d); if(this.children[p].domNode.style[_1f]!=(pos+"px")){ this.children[p].domNode.style[_1f]=pos+"px"; this._positionLabel(this.children[p]); } var _20=pos; for(var i=p-1;i>=0;i--){ _20-=this.children[i].sizeMain; if(this.children[p].domNode.style[_1f]!=(_20+"px")){ this.children[i].domNode.style[_1f]=_20+"px"; this._positionLabel(this.children[i]); } } var _21=pos; for(i=p+1;i<this.itemCount;i++){ _21+=this.children[i-1].sizeMain; if(this.children[p].domNode.style[_1f]!=(_21+"px")){ this.children[i].domNode.style[_1f]=_21+"px"; this._positionLabel(this.children[i]); } } },_positionLabel:function(itm){ var x=0; var y=0; var mb=_2.marginBox(itm.lblNode); if(this.labelEdge==this.EDGE.TOP){ x=Math.round((itm.sizeW/2)-(mb.w/2)); y=-mb.h; } if(this.labelEdge==this.EDGE.BOTTOM){ x=Math.round((itm.sizeW/2)-(mb.w/2)); y=itm.sizeH; } if(this.labelEdge==this.EDGE.LEFT){ x=-mb.w; y=Math.round((itm.sizeH/2)-(mb.h/2)); } if(this.labelEdge==this.EDGE.RIGHT){ x=itm.sizeW; y=Math.round((itm.sizeH/2)-(mb.h/2)); } itm.lblNode.style.left=x+"px"; itm.lblNode.style.top=y+"px"; },_calcHitGrid:function(){ var pos=_2.coords(this.domNode,true); this.hitX1=pos.x-this.proximityLeft; this.hitY1=pos.y-this.proximityTop; this.hitX2=this.hitX1+this.totalWidth; this.hitY2=this.hitY1+this.totalHeight; },_toEdge:function(inp,def){ return this.EDGE[inp.toUpperCase()]||def; },_expandSlowly:function(){ if(!this.isOver){ return; } this.timerScale+=0.2; this._paint(); if(this.timerScale<1){ setTimeout(_2.hitch(this,"_expandSlowly"),10); } },destroyRecursive:function(){ _2.disconnect(this._onMouseOutHandle); _2.disconnect(this._onMouseMoveHandle); _2.disconnect(this._addChildHandle); if(this.isFixed){ _2.disconnect(this._onScrollHandle); } _2.disconnect(this._onResizeHandle); this.inherited("destroyRecursive",arguments); }}); _2.declare("dojox.widget.FisheyeListItem",[_1._Widget,_1._Templated,_1._Contained],{iconSrc:"",label:"",id:"",templateString:"<div class=\"dojoxFisheyeListItem\">"+" <img class=\"dojoxFisheyeListItemImage\" dojoAttachPoint=\"imgNode\" dojoAttachEvent=\"onmouseover:onMouseOver,onmouseout:onMouseOut,onclick:onClick\">"+" <div class=\"dojoxFisheyeListItemLabel\" dojoAttachPoint=\"lblNode\"></div>"+"</div>",_isNode:function(wh){ if(typeof Element=="function"){ try{ return wh instanceof Element; } catch(e){ } }else{ return wh&&!isNaN(wh.nodeType); } return false; },_hasParent:function(_22){ return Boolean(_22&&_22.parentNode&&this._isNode(_22.parentNode)); },postCreate:function(){ var _23; if((this.iconSrc.toLowerCase().substring(this.iconSrc.length-4)==".png")&&_2.isIE<7){ if(this._hasParent(this.imgNode)&&this.id!=""){ _23=this.imgNode.parentNode; _23.setAttribute("id",this.id); } this.imgNode.style.filter="progid:DXImageTransform.Microsoft.AlphaImageLoader(src='"+this.iconSrc+"', sizingMethod='scale')"; this.imgNode.src=this._blankGif.toString(); }else{ if(this._hasParent(this.imgNode)&&this.id!=""){ _23=this.imgNode.parentNode; _23.setAttribute("id",this.id); } this.imgNode.src=this.iconSrc; } if(this.lblNode){ this.lblNode.appendChild(document.createTextNode(this.label)); } _2.setSelectable(this.domNode,false); this.startup(); },startup:function(){ this.parent=this.getParent(); },onMouseOver:function(e){ if(!this.parent.isOver){ this.parent._setActive(e); } if(this.label!=""){ _2.addClass(this.lblNode,"dojoxFishSelected"); this.parent._positionLabel(this); } },onMouseOut:function(e){ _2.removeClass(this.lblNode,"dojoxFishSelected"); },onClick:function(e){ }}); });
PypiClean
/BotEXBotBase-3.1.3.tar.gz/BotEXBotBase-3.1.3/discord/iterators.py
import asyncio import datetime from .errors import NoMoreItems from .utils import time_snowflake, maybe_coroutine from .object import Object from .audit_logs import AuditLogEntry class _AsyncIterator: __slots__ = () def get(self, **attrs): def predicate(elem): for attr, val in attrs.items(): nested = attr.split("__") obj = elem for attribute in nested: obj = getattr(obj, attribute) if obj != val: return False return True return self.find(predicate) async def find(self, predicate): while True: try: elem = await self.next() except NoMoreItems: return None ret = await maybe_coroutine(predicate, elem) if ret: return elem def map(self, func): return _MappedAsyncIterator(self, func) def filter(self, predicate): return _FilteredAsyncIterator(self, predicate) async def flatten(self): ret = [] while True: try: item = await self.next() except NoMoreItems: return ret else: ret.append(item) def __aiter__(self): return self async def __anext__(self): try: msg = await self.next() except NoMoreItems: raise StopAsyncIteration() else: return msg def _identity(x): return x class _MappedAsyncIterator(_AsyncIterator): def __init__(self, iterator, func): self.iterator = iterator self.func = func async def next(self): # this raises NoMoreItems and will propagate appropriately item = await self.iterator.next() return await maybe_coroutine(self.func, item) class _FilteredAsyncIterator(_AsyncIterator): def __init__(self, iterator, predicate): self.iterator = iterator if predicate is None: predicate = _identity self.predicate = predicate async def next(self): getter = self.iterator.next pred = self.predicate while True: # propagate NoMoreItems similar to _MappedAsyncIterator item = await getter() ret = await maybe_coroutine(pred, item) if ret: return item class ReactionIterator(_AsyncIterator): def __init__(self, message, emoji, limit=100, after=None): self.message = message self.limit = limit self.after = after state = message._state self.getter = state.http.get_reaction_users self.state = state self.emoji = emoji self.guild = message.guild self.channel_id = message.channel.id self.users = asyncio.Queue(loop=state.loop) async def next(self): if self.users.empty(): await self.fill_users() try: return self.users.get_nowait() except asyncio.QueueEmpty: raise NoMoreItems() async def fill_users(self): # this is a hack because >circular imports< from .user import User if self.limit > 0: retrieve = self.limit if self.limit <= 100 else 100 after = self.after.id if self.after else None data = await self.getter( self.message.id, self.channel_id, self.emoji, retrieve, after=after ) if data: self.limit -= retrieve self.after = Object(id=int(data[-1]["id"])) if self.guild is None: for element in reversed(data): await self.users.put(User(state=self.state, data=element)) else: for element in reversed(data): member_id = int(element["id"]) member = self.guild.get_member(member_id) if member is not None: await self.users.put(member) else: await self.users.put(User(state=self.state, data=element)) class HistoryIterator(_AsyncIterator): """Iterator for receiving a channel's message history. The messages endpoint has two behaviours we care about here: If `before` is specified, the messages endpoint returns the `limit` newest messages before `before`, sorted with newest first. For filling over 100 messages, update the `before` parameter to the oldest message received. Messages will be returned in order by time. If `after` is specified, it returns the `limit` oldest messages after `after`, sorted with newest first. For filling over 100 messages, update the `after` parameter to the newest message received. If messages are not reversed, they will be out of order (99-0, 199-100, so on) A note that if both before and after are specified, before is ignored by the messages endpoint. Parameters ----------- messageable: :class:`abc.Messageable` Messageable class to retrieve message history fro. limit : int Maximum number of messages to retrieve before : :class:`Message` or id-like Message before which all messages must be. after : :class:`Message` or id-like Message after which all messages must be. around : :class:`Message` or id-like Message around which all messages must be. Limit max 101. Note that if limit is an even number, this will return at most limit+1 messages. reverse: bool If set to true, return messages in oldest->newest order. Recommended when using with "after" queries with limit over 100, otherwise messages will be out of order. """ def __init__(self, messageable, limit, before=None, after=None, around=None, reverse=None): if isinstance(before, datetime.datetime): before = Object(id=time_snowflake(before, high=False)) if isinstance(after, datetime.datetime): after = Object(id=time_snowflake(after, high=True)) if isinstance(around, datetime.datetime): around = Object(id=time_snowflake(around)) self.messageable = messageable self.limit = limit self.before = before self.after = after self.around = around if reverse is None: self.reverse = after is not None else: self.reverse = reverse self._filter = None # message dict -> bool self.state = self.messageable._state self.logs_from = self.state.http.logs_from self.messages = asyncio.Queue(loop=self.state.loop) if self.around: if self.limit is None: raise ValueError("history does not support around with limit=None") if self.limit > 101: raise ValueError("history max limit 101 when specifying around parameter") elif self.limit == 101: self.limit = 100 # Thanks discord elif self.limit == 1: raise ValueError("Use get_message.") self._retrieve_messages = self._retrieve_messages_around_strategy if self.before and self.after: self._filter = lambda m: self.after.id < int(m["id"]) < self.before.id elif self.before: self._filter = lambda m: int(m["id"]) < self.before.id elif self.after: self._filter = lambda m: self.after.id < int(m["id"]) elif self.before and self.after: if self.reverse: self._retrieve_messages = self._retrieve_messages_after_strategy self._filter = lambda m: int(m["id"]) < self.before.id else: self._retrieve_messages = self._retrieve_messages_before_strategy self._filter = lambda m: int(m["id"]) > self.after.id elif self.after: self._retrieve_messages = self._retrieve_messages_after_strategy else: self._retrieve_messages = self._retrieve_messages_before_strategy async def next(self): if self.messages.empty(): await self.fill_messages() try: return self.messages.get_nowait() except asyncio.QueueEmpty: raise NoMoreItems() def _get_retrieve(self): l = self.limit if l is None: r = 100 elif l <= 100: r = l else: r = 100 self.retrieve = r return r > 0 async def flatten(self): # this is similar to fill_messages except it uses a list instead # of a queue to place the messages in. result = [] channel = await self.messageable._get_channel() self.channel = channel while self._get_retrieve(): data = await self._retrieve_messages(self.retrieve) if len(data) < 100: self.limit = 0 # terminate the infinite loop if self.reverse: data = reversed(data) if self._filter: data = filter(self._filter, data) for element in data: result.append(self.state.create_message(channel=channel, data=element)) return result async def fill_messages(self): if not hasattr(self, "channel"): # do the required set up channel = await self.messageable._get_channel() self.channel = channel if self._get_retrieve(): data = await self._retrieve_messages(self.retrieve) if self.limit is None and len(data) < 100: self.limit = 0 # terminate the infinite loop if self.reverse: data = reversed(data) if self._filter: data = filter(self._filter, data) channel = self.channel for element in data: await self.messages.put(self.state.create_message(channel=channel, data=element)) async def _retrieve_messages(self, retrieve): """Retrieve messages and update next parameters.""" pass async def _retrieve_messages_before_strategy(self, retrieve): """Retrieve messages using before parameter.""" before = self.before.id if self.before else None data = await self.logs_from(self.channel.id, retrieve, before=before) if len(data): if self.limit is not None: self.limit -= retrieve self.before = Object(id=int(data[-1]["id"])) return data async def _retrieve_messages_after_strategy(self, retrieve): """Retrieve messages using after parameter.""" after = self.after.id if self.after else None data = await self.logs_from(self.channel.id, retrieve, after=after) if len(data): if self.limit is not None: self.limit -= retrieve self.after = Object(id=int(data[0]["id"])) return data async def _retrieve_messages_around_strategy(self, retrieve): """Retrieve messages using around parameter.""" if self.around: around = self.around.id if self.around else None data = await self.logs_from(self.channel.id, retrieve, around=around) self.around = None return data return [] class AuditLogIterator(_AsyncIterator): def __init__( self, guild, limit=None, before=None, after=None, reverse=None, user_id=None, action_type=None, ): if isinstance(before, datetime.datetime): before = Object(id=time_snowflake(before, high=False)) if isinstance(after, datetime.datetime): after = Object(id=time_snowflake(after, high=True)) self.guild = guild self.loop = guild._state.loop self.request = guild._state.http.get_audit_logs self.limit = limit self.before = before self.user_id = user_id self.action_type = action_type self.after = after self._users = {} self._state = guild._state if reverse is None: self.reverse = after is not None else: self.reverse = reverse self._filter = None # entry dict -> bool self.entries = asyncio.Queue(loop=self.loop) if self.before and self.after: if self.reverse: self._strategy = self._after_strategy self._filter = lambda m: int(m["id"]) < self.before.id else: self._strategy = self._before_strategy self._filter = lambda m: int(m["id"]) > self.after.id elif self.after: self._strategy = self._after_strategy else: self._strategy = self._before_strategy async def _before_strategy(self, retrieve): before = self.before.id if self.before else None data = await self.request( self.guild.id, limit=retrieve, user_id=self.user_id, action_type=self.action_type, before=before, ) entries = data.get("audit_log_entries", []) if len(data) and entries: if self.limit is not None: self.limit -= retrieve self.before = Object(id=int(entries[-1]["id"])) return data.get("users", []), entries async def _after_strategy(self, retrieve): after = self.after.id if self.after else None data = await self.request( self.guild.id, limit=retrieve, user_id=self.user_id, action_type=self.action_type, after=after, ) entries = data.get("audit_log_entries", []) if len(data) and entries: if self.limit is not None: self.limit -= retrieve self.after = Object(id=int(entries[0]["id"])) return data.get("users", []), entries async def next(self): if self.entries.empty(): await self._fill() try: return self.entries.get_nowait() except asyncio.QueueEmpty: raise NoMoreItems() def _get_retrieve(self): l = self.limit if l is None: r = 100 elif l <= 100: r = l else: r = 100 self.retrieve = r return r > 0 async def _fill(self): from .user import User if self._get_retrieve(): users, data = await self._strategy(self.retrieve) if self.limit is None and len(data) < 100: self.limit = 0 # terminate the infinite loop if self.reverse: data = reversed(data) if self._filter: data = filter(self._filter, data) for user in users: u = User(data=user, state=self._state) self._users[u.id] = u for element in data: # TODO: remove this if statement later if element["action_type"] is None: continue await self.entries.put( AuditLogEntry(data=element, users=self._users, guild=self.guild) )
PypiClean
/NREL-jade-0.9.9.tar.gz/NREL-jade-0.9.9/jade/cli/run.py
import logging import os import sys import click from jade.common import OUTPUT_DIR from jade.events import StructuredErrorLogEvent, EVENT_CATEGORY_ERROR, EVENT_NAME_UNHANDLED_ERROR from jade.loggers import log_event, setup_logging, setup_event_logging from jade.jobs.job_post_process import JobPostProcess from jade.utils.utils import get_cli_string, load_data from jade.exceptions import InvalidExtension from jade.extensions.registry import Registry, ExtensionClassType @click.argument("extension") @click.option( "-n", "--name", required=True, type=str, help="The name of the job that needs to run.", ) @click.option("-o", "--output", default=OUTPUT_DIR, show_default=True, help="Output directory.") @click.option("--config-file", required=True, help="Job configuration file") @click.option( "-f", "--output-format", default="csv", show_default=True, help="Output format for data (csv or json).", ) @click.option( "--verbose", is_flag=True, default=False, show_default=True, help="Enable verbose log output." ) @click.command() def run(extension, **kwargs): """Runs individual job.""" registry = Registry() if not registry.is_registered(extension): raise InvalidExtension(f"Extension '{extension}' is not registered.") # Parse Argument config_file = kwargs["config_file"] name = kwargs["name"] output = kwargs["output"] output_format = kwargs["output_format"] verbose = kwargs["verbose"] level = logging.DEBUG if verbose else logging.INFO # Create directory for current job job_dir = os.path.join(output, name) os.makedirs(job_dir, exist_ok=True) # Structural logging setup event_file = os.path.join(job_dir, "events.log") setup_event_logging(event_file, mode="a") # General logging setup log_file = os.path.join(job_dir, "run.log") general_logger = setup_logging( extension, log_file, console_level=logging.ERROR, file_level=level, ) general_logger.info(get_cli_string()) # Create config for run try: cli = registry.get_extension_class(extension, ExtensionClassType.CLI) ret = cli.run(config_file, name, output, output_format, verbose) except Exception as err: msg = f"unexpected exception in run '{extension}' job={name} - {err}" general_logger.exception(msg) event = StructuredErrorLogEvent( source=name, category=EVENT_CATEGORY_ERROR, name=EVENT_NAME_UNHANDLED_ERROR, message=msg, ) log_event(event) ret = 1 if ret == 0: try: config = load_data(config_file) if "job_post_process_config" in config.keys(): post_process = JobPostProcess( module_name=config["job_post_process_config"]["module"], class_name=config["job_post_process_config"]["class"], data=config["job_post_process_config"]["data"], job_name=name, output=output, ) post_process.run(config_file=config_file, output=output) except Exception as err: msg = f"unexpected exception in post-process '{extension}' job={name} - {err}" general_logger.exception(msg) event = StructuredErrorLogEvent( source=name, category=EVENT_CATEGORY_ERROR, name=EVENT_NAME_UNHANDLED_ERROR, message=msg, ) log_event(event) ret = 1 sys.exit(ret)
PypiClean
/Authcode-1.6.0-py3-none-any.whl/authcode/auth_authorization_mixin.py
import functools import logging from uuid import uuid4 from ._compat import to_unicode class AuthorizationMixin(object): # Useful for setting a cookie only if the CSRF token has changed. csrf_token_has_changed = False def get_csrf_token(self, session=None): logger = logging.getLogger(__name__) if session is None: session = self.session csrf_token = session.get(self.csrf_key) if not csrf_token: logger.debug(u'New CSFR token') csrf_token = self.make_csrf_token() session[self.csrf_key] = csrf_token if callable(getattr(session, 'save', None)): session.save() return csrf_token def make_csrf_token(self): self.csrf_token_has_changed = True return str(uuid4()).replace('-', '') def protected(self, *tests, **kwargs): """Factory of decorators for limit the access to views. :tests: *function, optional One or more functions that takes the args and kwargs of the view and returns either `True` or `False`. All test must return True to show the view. Options: :role: str, optional Test for the user having a role with this name. :roles: list, optional Test for the user having **any** role in this list of names. :csrf: bool, None, optional If ``None`` (the default), the decorator will check the value of the CSFR token for POST, PUT or DELETE requests. If ``True`` it will do the same also for all requests. If ``False``, the value of the CSFR token will not be checked. :url_sign_in: str, function, optional If any required condition fail, redirect to this place. Override the default URL. This can also be a callable. :request: obj, optional Overwrite the request for testing. The rest of the ``key=value`` pairs in ``kwargs`` are interpreted as tests. The user must have a property `key` with a value equals to `value`. If the user has a method named `key`, that method is called with `value` as a single argument and must return True to show the view. """ _role = kwargs.pop('role', None) _roles = kwargs.pop('roles', None) or [] _csrf = kwargs.pop('csrf', None) _url_sign_in = kwargs.pop('url_sign_in', None) _request = kwargs.pop('request', None) if _role: _roles.append(_role) _roles = [to_unicode(r) for r in _roles] _tests = tests _user_tests = kwargs def decorator(f): @functools.wraps(f) def wrapper(*args, **kwargs): logger = logging.getLogger(__name__) request = _request or self.request or args and args[0] url_sign_in = self._get_url_sign_in(request, _url_sign_in) user = self.get_user() if not user: return self._login_required(request, url_sign_in) if hasattr(user, 'has_role') and _roles: if not user.has_role(*_roles): logger.debug(u'User `{0}`: has_role fail'.format(user.login)) logger.debug(u'User roles: {0}'.format([r.name for r in user.roles])) return self.wsgi.raise_forbidden() for test in _tests: test_pass = test(user, *args, **kwargs) if not test_pass: logger.debug(u'User `{0}`: test fail'.format(user.login)) return self.wsgi.raise_forbidden() for name, value in _user_tests.items(): user_test = getattr(user, name) test_pass = user_test(value, *args, **kwargs) if not test_pass: logger.debug(u'User `{0}`: test fail'.format(user.login)) return self.wsgi.raise_forbidden() disable_csrf = _csrf == False # noqa if (not self.wsgi.is_idempotent(request) and not disable_csrf) or _csrf: if not self.csrf_token_is_valid(request): logger.debug(u'User `{0}`: invalid CSFR token'.format(user.login)) return self.wsgi.raise_forbidden("CSFR token isn't valid") return f(*args, **kwargs) return wrapper return decorator def replace_flask_route(self, bp, *args, **kwargs): """Replace the Flask `app.route` or `blueprint.route` with a version that first apply the protected decorator to the view, so all views are automatically protected.""" protected = self.protected def protected_route(rule, **options): """Like :meth:`Flask.route` but for a blueprint. The endpoint for the :func:`url_for` function is prefixed with the name of the blueprint. """ def decorator(f): endpoint = options.pop("endpoint", f.__name__) protected_f = protected(*args, **kwargs)(f) bp.add_url_rule(rule, endpoint, protected_f, **options) return f return decorator bp.route = protected_route def csrf_token_is_valid(self, request, session=None): token = self._get_csrf_token_from_request(request) return token and self._csrf_token_is_valid(token, session) def _csrf_token_is_valid(self, token, session=None): new_token = self.get_csrf_token(session=session) return new_token == token def _login_required(self, request, url_sign_in): self.session[self.redirect_key] = self.wsgi.get_full_path(request) if callable(getattr(self.session, 'save', None)): self.session.save() return self.wsgi.redirect(url_sign_in) def _get_url_sign_in(self, request, url_sign_in): url_sign_in = url_sign_in or self.url_sign_in if callable(url_sign_in): url_sign_in = url_sign_in(request) return url_sign_in or '/' def _get_csrf_token_from_request(self, request): token = self.wsgi.get_from_params(request, self.csrf_key) or \ self.wsgi.get_from_headers(request, self.csrf_header) or \ self.wsgi.get_from_headers(request, self.csrf_header_alt) return token
PypiClean
/FastNLP-1.0.1.tar.gz/FastNLP-1.0.1/fastNLP/core/vocabulary.py
r""" .. todo:: doc """ __all__ = [ "Vocabulary", "VocabularyOption", ] from collections import Counter from functools import partial from functools import wraps from typing import List, Callable, Union from fastNLP.core.dataset import DataSet from fastNLP.core.utils.utils import Option from fastNLP.core.utils.utils import _is_iterable from .log import logger import io class VocabularyOption(Option): """ """ def __init__(self, max_size=None, min_freq=None, padding='<pad>', unknown='<unk>'): super().__init__( max_size=max_size, min_freq=min_freq, padding=padding, unknown=unknown ) def _check_build_vocab(func: Callable): r""" A decorator to make sure the indexing is built before used. :param func: 传入的callable函数 """ @wraps(func) # to solve missing docstring def _wrapper(self, *args, **kwargs): if self._word2idx is None or self.rebuild is True: self.build_vocab() return func(self, *args, **kwargs) return _wrapper def _check_build_status(func): r""" A decorator to check whether the vocabulary updates after the last build. :param func: 用户传入要修饰的callable函数 """ @wraps(func) # to solve missing docstring def _wrapper(self, *args, **kwargs): if self.rebuild is False: self.rebuild = True if self.max_size is not None and len(self.word_count) >= self.max_size: logger.warning("Vocabulary has reached the max size {} when calling {} method. " "Adding more words may cause unexpected behaviour of Vocabulary. ".format( self.max_size, func.__name__)) return func(self, *args, **kwargs) return _wrapper class Vocabulary(object): r""" 用于构建, 存储和使用 `str` 到 `int` 的一一映射:: from fastNLP.core import Vocabulary vocab = Vocabulary() word_list = "this is a word list".split() # vocab更新自己的字典,输入为list列表 vocab.update(word_list) vocab["word"] # str to int vocab.to_word(5) # int to str :param max_size: `Vocabulary` 的最大大小, 即能存储词的最大数量 若为 ``None`` , 则不限制大小。 :param min_freq: 能被记录下的词在文本中的最小出现频率, 应大于或等于 1。 若小于该频率, 词语将被视为 `unknown`. 若为 ``None`` , 所有文本中的词都被记录。 :param padding: padding的字符. 如果设置为 ``None`` , 则vocabulary中不考虑padding, 也不计入词表大小,为 ``None`` 的情况多在为 label 建立 Vocabulary 的情况。 :param unknown: unknown的字符,所有未被记录的词在转为 :class:`int` 时将被视为 `unknown` 。 如果设置为 ``None`` ,则 vocabulary 中不考虑 `unknown`, 也不计入词表大小。 为 ``None`` 的情况多在为 labe l建立 Vocabulary 的情况 """ def __init__(self, max_size:int=None, min_freq:int=None, padding:str='<pad>', unknown:str='<unk>'): self.max_size = max_size self.min_freq = min_freq self.word_count = Counter() self.unknown = unknown self.padding = padding self._word2idx = None self._idx2word = None self.rebuild = True # 用于承载不需要单独创建entry的词语,具体见from_dataset()方法 self._no_create_word = Counter() @property @_check_build_vocab def word2idx(self): return self._word2idx @word2idx.setter def word2idx(self, value): self._word2idx = value @property @_check_build_vocab def idx2word(self): return self._idx2word @idx2word.setter def idx2word(self, value): self._word2idx = value @_check_build_status def update(self, word_lst: list, no_create_entry:bool=False): r""" 依次增加序列中词在词典中的出现频率 :param word_lst: 列表形式的词语,如 word_list=['I', 'am', 'a', 'Chinese'],列表中的每个词会计算出现频率并加入到词典中。 :param no_create_entry: 如果词语来自于非训练集建议设置为 ``True`` 。 * 如果为 ``True`` -- 则不会有这个词语创建一个单独的 entry ,它将一直被指向 ``<UNK>`` 的表示; * 如果为 ``False`` -- 为这个词创建一个单独的 entry。如果这个词来自于验证集或训练集,一般设置为True,如果来自于训练集一 般设置为``False``; 有以下两种情况: 如果新加入一个 word ,且 ``no_create_entry`` 为 ``True``,但这个词之前已经在 Vocabulary 中且并不是 ``no_create_entry`` 的,则还是会为这个词创建一个单独的 vector ; 如果 ``no_create_entry`` 为 ``False`` ,但这个词之 前已经在 Vocabulary 中且并不是 ``no_create_entry的`` ,则这个词将认为是需要创建单独的 vector 的。 """ self._add_no_create_entry(word_lst, no_create_entry) self.word_count.update(word_lst) return self @_check_build_status def add(self, word:str, no_create_entry:bool=False): r""" 增加一个新词在词典中的出现频率 :param word: 要添加进字典的新词, ``word`` 为一个字符串 :param no_create_entry: 如果词语来自于非训练集建议设置为 ``True`` 。 * 如果为 ``True`` -- 则不会有这个词语创建一个单独的 entry ,它将一直被指向 ``<UNK>`` 的表示; * 如果为 ``False`` -- 为这个词创建一个单独的 entry。如果这个词来自于验证集或训练集,一般设置为 ``True`` ,如果来自于训练集一 般设置为 ``False``; 有以下两种情况: 如果新加入一个 word ,且 ``no_create_entry`` 为 ``True``,但这个词之前已经在 Vocabulary 中且并不是 ``no_create_entry`` 的,则还是会为这个词创建一个单独的 vector ; 如果 ``no_create_entry`` 为 ``False`` ,但这个词之 前已经在 Vocabulary 中且并不是 ``no_create_entry的`` ,则这个词将认为是需要创建单独的 vector 的。 """ self._add_no_create_entry(word, no_create_entry) self.word_count[word] += 1 return self def _add_no_create_entry(self, word:Union[str, List[str]], no_create_entry:bool): r""" 在新加入word时,检查_no_create_word的设置。 :param word: 要添加的新词或者是 :class:`List`类型的新词,如 word='I' 或者 word=['I', 'am', 'a', 'Chinese'] 均可 :param no_create_entry: 如果词语来自于非训练集建议设置为 ``True`` 。 * 如果为 ``True`` -- 则不会有这个词语创建一个单独的 entry ,它将一直被指向 ``<UNK>`` 的表示; * 如果为 ``False`` -- 为这个词创建一个单独的 entry。如果这个词来自于验证集或训练集,一般设置为 ``True`` ,如果来自于训练集一 般设置为 ``False``; :return: """ if isinstance(word, str) or not _is_iterable(word): word = [word] for w in word: if no_create_entry and self.word_count.get(w, 0) == self._no_create_word.get(w, 0): self._no_create_word[w] += 1 elif not no_create_entry and w in self._no_create_word: self._no_create_word.pop(w) @_check_build_status def add_word(self, word:str, no_create_entry:bool=False): r""" 增加一个新词在词典中的出现频率 :param word: 要添加进字典的新词, ``word`` 为一个字符串 :param no_create_entry: 如果词语来自于非训练集建议设置为 ``True`` 。 * 如果为 ``True`` -- 则不会有这个词语创建一个单独的 entry ,它将一直被指向 ``<UNK>`` 的表示; * 如果为 ``False`` -- 为这个词创建一个单独的 entry。如果这个词来自于验证集或训练集,一般设置为 ``True`` ,如果来自于训练集一 般设置为 ``False``; 有以下两种情况: 如果新加入一个 word ,且 ``no_create_entry`` 为 ``True``,但这个词之前已经在 Vocabulary 中且并不是 ``no_create_entry`` 的,则还是会为这个词创建一个单独的 vector ; 如果 ``no_create_entry`` 为 ``False`` ,但这个词之 前已经在 Vocabulary 中且并不是 ``no_create_entry的`` ,则这个词将认为是需要创建单独的 vector 的。 """ self.add(word, no_create_entry=no_create_entry) @_check_build_status def add_word_lst(self, word_lst: List[str], no_create_entry:bool=False): r""" 依次增加序列中词在词典中的出现频率 :param word_lst: 需要添加的新词的 list 序列,如 word_lst=['I', 'am', 'a', 'Chinese'] 。 :param no_create_entry: 如果词语来自于非训练集建议设置为 ``True`` 。 * 如果为 ``True`` -- 则不会有这个词语创建一个单独的 entry ,它将一直被指向 ``<UNK>`` 的表示; * 如果为 ``False`` -- 为这个词创建一个单独的 entry。如果这个词来自于验证集或训练集,一般设置为 ``True`` ,如果来自于训练集一 般设置为 ``False``; 有以下两种情况: 如果新加入一个 word ,且 ``no_create_entry`` 为 ``True``,但这个词之前已经在 Vocabulary 中且并不是 ``no_create_entry`` 的,则还是会为这个词创建一个单独的 vector ; 如果 ``no_create_entry`` 为 ``False`` ,但这个词之 前已经在 Vocabulary 中且并不是 ``no_create_entry的`` ,则这个词将认为是需要创建单独的 vector 的。 """ self.update(word_lst, no_create_entry=no_create_entry) return self def build_vocab(self): r""" 根据已经出现的词和出现频率构建词典。注意:重复构建可能会改变词典的大小, 但已经记录在词典中的词,不会改变对应的 :class:`int` """ if self._word2idx is None: self._word2idx = {} if self.padding is not None: self._word2idx[self.padding] = len(self._word2idx) if (self.unknown is not None) and (self.unknown != self.padding): self._word2idx[self.unknown] = len(self._word2idx) max_size = min(self.max_size, len(self.word_count)) if self.max_size else None words = self.word_count.most_common(max_size) if self.min_freq is not None: words = filter(lambda kv: kv[1] >= self.min_freq, words) if self._word2idx is not None: words = filter(lambda kv: kv[0] not in self._word2idx, words) start_idx = len(self._word2idx) self._word2idx.update({w: i + start_idx for i, (w, _) in enumerate(words)}) self.build_reverse_vocab() self.rebuild = False return self def build_reverse_vocab(self): r""" 基于 `word to index` dict, 构建 `index to word` dict. """ self._idx2word = {i: w for w, i in self._word2idx.items()} return self @_check_build_vocab def __len__(self): return len(self._word2idx) @_check_build_vocab def __contains__(self, item:str): r""" 检查词是否被记录 :param item: the word :return: True or False """ return item in self._word2idx def has_word(self, w:str): r""" 检查词是否被记录:: has_abc = vocab.has_word('abc') # equals to has_abc = 'abc' in vocab :param item: 输入的str类型的词 :return: ``True`` or ``False`` """ return self.__contains__(w) @_check_build_vocab def __getitem__(self, w): r""" 支持从字典中直接得到词语的index,例如:: vocab[w] """ if w in self._word2idx: return self._word2idx[w] if self.unknown is not None: return self._word2idx[self.unknown] else: raise ValueError("word `{}` not in vocabulary".format(w)) @_check_build_vocab def index_dataset(self, *datasets, field_name:Union[List, str], new_field_name:Union[List, str, None]=None): r""" 将 ``DataSet`` 中对应 field 的词转为数字,例如:: # remember to use `field_name` vocab.index_dataset(train_data, dev_data, test_data, field_name='words') :param datasets: 其类型为 :class:`~fastNLP.core.dataset.DataSet` 或者 :class:`List` [ :class:`~fastNLP.core.dataset.DataSet` ], 即需要处理的一个或多个数据集 :param field_name: 需要转为 index 的 field, 若有多个 DataSet, 每个 DataSet 都必须有此 field. 目前支持 :class:`str` , :class:`List` [ :class:`str` ] :param new_field_name: 保存结果的 field_name。 若为 ``None`` , 将覆盖原 field。 """ def index_instance(field): r""" 有几种情况, str, 1d-list, 2d-list :param ins: :return: """ if isinstance(field, str) or not _is_iterable(field): return self.to_index(field) else: if isinstance(field[0], str) or not _is_iterable(field[0]): return [self.to_index(w) for w in field] else: if not isinstance(field[0][0], str) and _is_iterable(field[0][0]): raise RuntimeError("Only support field with 2 dimensions.") return [[self.to_index(c) for c in w] for w in field] new_field_name = new_field_name or field_name if type(new_field_name) == type(field_name): if isinstance(new_field_name, list): assert len(new_field_name) == len(field_name), "new_field_name should have same number elements with " \ "field_name." elif isinstance(new_field_name, str): field_name = [field_name] new_field_name = [new_field_name] else: raise TypeError("field_name and new_field_name can only be str or List[str].") for idx, dataset in enumerate(datasets): if isinstance(dataset, DataSet): ds_lst = [dataset] elif _is_iterable(dataset): ds_lst = list(dataset) else: raise TypeError(f"Only DataSet type is allowed, instead of {type(dataset)}.") try: for ds in ds_lst: for f_n, n_f_n in zip(field_name, new_field_name): ds.apply_field(index_instance, field_name=f_n, new_field_name=n_f_n, progress_bar=None) except Exception as e: logger.error("When processing the `{}` dataset, the following error occurred.".format(idx)) raise e return self @property def _no_create_word_length(self): return len(self._no_create_word) def from_dataset(self, *datasets, field_name:Union[str,List[str]], no_create_entry_dataset=None): r""" 使用dataset的对应field中词构建词典:: # remember to use `field_name` vocab.from_dataset(train_data1, train_data2, field_name='words', no_create_entry_dataset=[test_data1, test_data2]) :param datasets: 其类型为 :class:`~fastNLP.core.dataset.DataSet` 或者 List[:class:`~fastNLP.core.dataset.DataSet`]。 :param field_name: 构建词典所使用的 field(s), 支持一个或多个 field,若有多个 DataSet, 每个 DataSet 都必须有这些 field. 目前支持的field结构: ``str`` , ``List[str]`` :param no_create_entry_dataset: 可以传入 :class:`~fastNLP.core.dataset.DataSet`, :class:`List` [ :class:`~fastNLP.core.dataset.DataSet` ] 或者 ``None`` (默认),建议直接将非训练数据都传入到这个参数。该选项用于接下来的模型会使用预训练的 embedding (包括 ``glove``, ``word2vec`` , ``elmo`` 与 ``bert`` )且会 finetune 的情况。如果仅使用来自于训练集的数据建立词表,会导致测试集与验证集中的数据无法充分利用到来自于预训练 embedding 的信息,所以在建立词表的时候将测试集与验证集考虑进来会使得最终的结果更好。 如果一个词出现在了训练集中,但是没在预训练模型中, embedding 会为它用 ``<UNK>`` 初始化;但如果它是单独的一个 vector ,并且 finetune embedding 的话,这个词在更新之后可能会有更好的表示;而如果这个词仅出现在了验证集或者测试集中,那么就不能为它们单独建立 vector,而应该让它指向 ``<UNK>`` 这个 vector 的值。所以只位于 ``no_create_entry_dataset`` 中的 token 将首先从预训练的词表中寻找它的表示,如果找到了,就使用该表示; 如果没有找到,则认 为该词的表示应该为 ``<UNK>`` 的表示。 :return: Vocabulary 自身 """ if isinstance(field_name, str): field_name = [field_name] elif not isinstance(field_name, list): raise TypeError('invalid argument field_name: {}'.format(field_name)) def construct_vocab(ins, no_create_entry=False): for fn in field_name: field = ins[fn] if isinstance(field, str) or not _is_iterable(field): self.add_word(field, no_create_entry=no_create_entry) else: if isinstance(field[0], str) or not _is_iterable(field[0]): for word in field: self.add_word(word, no_create_entry=no_create_entry) else: if not isinstance(field[0][0], str) and _is_iterable(field[0][0]): raise RuntimeError("Only support field with 2 dimensions.") for words in field: for word in words: self.add_word(word, no_create_entry=no_create_entry) for idx, dataset in enumerate(datasets): if isinstance(dataset, DataSet): ds_lst = [dataset] elif _is_iterable(dataset): ds_lst = list(dataset) else: raise TypeError(f"Only DataSet type is allowed, instead of {type(dataset)}.") try: for ds in ds_lst: ds.apply(construct_vocab, progress_bar=None) except BaseException as e: logger.error("When processing the `{}` dataset, the following error occurred:".format(idx)) raise e if no_create_entry_dataset is not None: partial_construct_vocab = partial(construct_vocab, no_create_entry=True) if isinstance(no_create_entry_dataset, DataSet): no_create_entry_dataset.apply(partial_construct_vocab, progress_bar=None) elif isinstance(no_create_entry_dataset, list): for dataset in no_create_entry_dataset: if not isinstance(dataset, DataSet): raise TypeError("Only DataSet type is allowed.") dataset.apply(partial_construct_vocab, progress_bar=None) return self def _is_word_no_create_entry(self, word:str): r""" 判断当前的word是否是不需要创建entry的,具体参见from_dataset的说明 :param word: 输入的str类型的词语 :return: bool值的判断结果 """ return word in self._no_create_word def to_index(self, w:str): r""" 将词转为数字。 若词不在词典中被记录, 将视为 `unknown`, 若 ``unknown=None`` , 将抛出 ``ValueError`` :: index = vocab.to_index('abc') # equals to index = vocab['abc'] :param w: 需要输入的词语 :return: 词语 ``w`` 对应的 :class:`int`类型的 index """ return self.__getitem__(w) @property @_check_build_vocab def unknown_idx(self): r""" 获得 ``unknown`` 对应的数字. """ if self.unknown is None: return None return self._word2idx[self.unknown] @property @_check_build_vocab def padding_idx(self): r""" 获得 ``padding`` 对应的数字 """ if self.padding is None: return None return self._word2idx[self.padding] @_check_build_vocab def to_word(self, idx: int): r""" 给定一个数字, 将其转为对应的词. :param idx: :return: ``idx`` 对应的词 """ return self._idx2word[idx] def clear(self): r""" 删除 :class:Vocabulary`` 中的词表数据。相当于重新初始化一下。 :return: 自身 """ self.word_count.clear() self._word2idx = None self._idx2word = None self.rebuild = True self._no_create_word.clear() return self def __getstate__(self): r""" 用来从 pickle 中加载 data """ len(self) # make sure vocab has been built state = self.__dict__.copy() # no need to pickle _idx2word as it can be constructed from _word2idx del state['_idx2word'] return state def __setstate__(self, state): r""" 支持 pickle 的保存,保存到 pickle 的 data state """ self.__dict__.update(state) self.build_reverse_vocab() def __repr__(self): return "Vocabulary({}...)".format(list(self.word_count.keys())[:5]) @_check_build_vocab def __iter__(self): # 依次(word1, 0), (word1, 1) for index in range(len(self._word2idx)): yield self.to_word(index), index def save(self, filepath: Union[str, io.StringIO]): r""" 保存当前词表。 :param filepath: 词表储存路径 """ if isinstance(filepath, io.IOBase): assert filepath.writable() f = filepath elif isinstance(filepath, str): try: f = open(filepath, 'w', encoding='utf-8') except Exception as e: raise e else: raise TypeError("Illegal `path`.") f.write(f'max_size\t{self.max_size}\n') f.write(f'min_freq\t{self.min_freq}\n') f.write(f'unknown\t{self.unknown}\n') f.write(f'padding\t{self.padding}\n') f.write(f'rebuild\t{self.rebuild}\n') f.write('\n') # idx: 如果idx为-2, 说明还没有进行build; 如果idx为-1,说明该词未编入 # no_create_entry: 如果为1,说明该词是no_create_entry; 0 otherwise # word \t count \t idx \t no_create_entry \n idx = -2 for word, count in self.word_count.items(): if self._word2idx is not None: idx = self._word2idx.get(word, -1) is_no_create_entry = int(self._is_word_no_create_entry(word)) f.write(f'{word}\t{count}\t{idx}\t{is_no_create_entry}\n') if isinstance(filepath, str): # 如果是file的话就关闭 f.close() @staticmethod def load(filepath: Union[str,io.StringIO]): r""" 从文件路径中加载数据 :param filepath: 词表的读取路径 :return: 读取的 :class:`Vocabulary` """ if isinstance(filepath, io.IOBase): assert filepath.writable() f = filepath elif isinstance(filepath, str): try: f = open(filepath, 'r', encoding='utf-8') except Exception as e: raise e else: raise TypeError("Illegal `path`.") vocab = Vocabulary() for line in f: line = line.strip('\n') if line: name, value = line.split() if name in ('max_size', 'min_freq'): value = int(value) if value!='None' else None setattr(vocab, name, value) elif name in ('unknown', 'padding'): value = value if value!='None' else None setattr(vocab, name, value) elif name == 'rebuild': vocab.rebuild = True if value=='True' else False else: break word_counter = {} no_create_entry_counter = {} word2idx = {} for line in f: line = line.strip('\n') if line: parts = line.split('\t') word,count,idx,no_create_entry = parts[0], int(parts[1]), int(parts[2]), int(parts[3]) if idx >= 0: word2idx[word] = idx word_counter[word] = count if no_create_entry: no_create_entry_counter[word] = count word_counter = Counter(word_counter) no_create_entry_counter = Counter(no_create_entry_counter) if len(word2idx)>0: if vocab.padding: word2idx[vocab.padding] = 0 if vocab.unknown: word2idx[vocab.unknown] = 1 if vocab.padding else 0 idx2word = {value:key for key,value in word2idx.items()} vocab.word_count = word_counter vocab._no_create_word = no_create_entry_counter if word2idx: vocab._word2idx = word2idx vocab._idx2word = idx2word if isinstance(filepath, str): # 如果是file的话就关闭 f.close() return vocab
PypiClean
/NeutronImaging-1.2-py3-none-any.whl/neutronimaging/util.py
import os from typing import Generator def in_jupyter() -> bool: """check if current kernel is running as notebook backend""" try: from IPython import get_ipython kernel_name = get_ipython().__class__.__name__ state = True if "ZMQ" in kernel_name else False except NameError: state = False return state def probe_folder(root: str = ".") -> dict: """return folder structure as a dictionary""" return { os.path.basename(root): [ os.path.join(root, me) if os.path.isfile(os.path.join(root, me)) else probe_folder(os.path.join(root, me)) for me in os.listdir(root) ] } def _flatten_str_list(inlist: list) -> Generator: """Flatten a n-dimension nested list""" for item in inlist: if isinstance(item, str): yield item else: yield from _flatten_str_list(item) def dir_tree_to_list(dir_tree: dict, flatten=True, sort=True) -> list: """Convert a dir tree (dict) to nested list""" _imglist = [] for k, v in dir_tree.items(): _imglist += [ me if not isinstance(me, dict) else dir_tree_to_list(me) for me in v ] _imglist = list(_flatten_str_list(_imglist)) if flatten else _imglist return sorted(_imglist) if sort else _imglist def convert_epics_timestamp_to_rfc3339_timestamp(epics_timestamp: float) -> float: # TIFF files from CG1D have EPICS timestamps. From the Controls # Wiki: # # > EPICS timestamp. The timestamp is made when the image is read # > out from the camera. Format is seconds.nanoseconds since Jan 1st # > 00:00 1990. # Convert seconds since "EPICS epoch" to seconds since the "UNIX # epoch" so that Python can understand it. I got the offset by # calculating the number of seconds between the two epochs at # https://www.epochconverter.com/ EPOCH_OFFSET = 631152000 unix_epoch_timestamp = EPOCH_OFFSET + epics_timestamp return unix_epoch_timestamp if __name__ == "__main__": pass
PypiClean
/MaterialDjango-0.2.5.tar.gz/MaterialDjango-0.2.5/materialdjango/static/materialdjango/components/bower_components/web-animations-js/src/keyframe-interpolations.js
(function(shared, scope, testing) { scope.convertEffectInput = function(effectInput) { var keyframes = shared.normalizeKeyframes(effectInput); var propertySpecificKeyframeGroups = makePropertySpecificKeyframeGroups(keyframes); var interpolations = makeInterpolations(propertySpecificKeyframeGroups); return function(target, fraction) { if (fraction != null) { interpolations.filter(function(interpolation) { return fraction >= interpolation.applyFrom && fraction < interpolation.applyTo; }).forEach(function(interpolation) { var offsetFraction = fraction - interpolation.startOffset; var localDuration = interpolation.endOffset - interpolation.startOffset; var scaledLocalTime = localDuration == 0 ? 0 : interpolation.easingFunction(offsetFraction / localDuration); scope.apply(target, interpolation.property, interpolation.interpolation(scaledLocalTime)); }); } else { for (var property in propertySpecificKeyframeGroups) if (property != 'offset' && property != 'easing' && property != 'composite') scope.clear(target, property); } }; }; function makePropertySpecificKeyframeGroups(keyframes) { var propertySpecificKeyframeGroups = {}; for (var i = 0; i < keyframes.length; i++) { for (var member in keyframes[i]) { if (member != 'offset' && member != 'easing' && member != 'composite') { var propertySpecificKeyframe = { offset: keyframes[i].offset, easing: keyframes[i].easing, value: keyframes[i][member] }; propertySpecificKeyframeGroups[member] = propertySpecificKeyframeGroups[member] || []; propertySpecificKeyframeGroups[member].push(propertySpecificKeyframe); } } } for (var groupName in propertySpecificKeyframeGroups) { var group = propertySpecificKeyframeGroups[groupName]; if (group[0].offset != 0 || group[group.length - 1].offset != 1) { throw { type: DOMException.NOT_SUPPORTED_ERR, name: 'NotSupportedError', message: 'Partial keyframes are not supported' }; } } return propertySpecificKeyframeGroups; } function makeInterpolations(propertySpecificKeyframeGroups) { var interpolations = []; for (var groupName in propertySpecificKeyframeGroups) { var keyframes = propertySpecificKeyframeGroups[groupName]; for (var i = 0; i < keyframes.length - 1; i++) { var startIndex = i; var endIndex = i + 1; var startOffset = keyframes[startIndex].offset; var endOffset = keyframes[endIndex].offset; var applyFrom = startOffset; var applyTo = endOffset; if (i == 0) { applyFrom = -Infinity; WEB_ANIMATIONS_TESTING && console.assert(startOffset == 0); if (endOffset == 0) { endIndex = startIndex; } } if (i == keyframes.length - 2) { applyTo = Infinity; WEB_ANIMATIONS_TESTING && console.assert(endOffset == 1); if (startOffset == 1) { startIndex = endIndex; } } interpolations.push({ applyFrom: applyFrom, applyTo: applyTo, startOffset: keyframes[startIndex].offset, endOffset: keyframes[endIndex].offset, easingFunction: shared.parseEasingFunction(keyframes[startIndex].easing), property: groupName, interpolation: scope.propertyInterpolation(groupName, keyframes[startIndex].value, keyframes[endIndex].value) }); } } interpolations.sort(function(leftInterpolation, rightInterpolation) { return leftInterpolation.startOffset - rightInterpolation.startOffset; }); return interpolations; } if (WEB_ANIMATIONS_TESTING) { testing.makePropertySpecificKeyframeGroups = makePropertySpecificKeyframeGroups; testing.makeInterpolations = makeInterpolations; } })(webAnimationsShared, webAnimations1, webAnimationsTesting);
PypiClean
/Aestate-1.1.0.tar.gz/Aestate-1.1.0/aestate/work/sql/ProxyOpera.py
from concurrent.futures import ThreadPoolExecutor pool = ThreadPoolExecutor() class DbOperation(object): """ 迁移重要操作到此类 """ def __init__(self, *args, **kwargs): self.args = args self.kwargs = kwargs def start(self, *args, **kwargs): """ 开始任务 Attributes: func:调用指定的方法 """ # 执行的函数体 func = kwargs['func'] # 线程独立 _lock = kwargs['t_local'] name = kwargs['__task_uuid__'] # # 设置任务 # _kw = aj.load(aj.parse(_lock)) _kw = _lock.__dict__ kwargs.update(_kw) _t = pool.submit(lambda x, y: func(*x, **y), args, kwargs) # _t = threading.Thread(target=func, args=args, kwargs=kwargs, name=name) # if not _lock.close_log: # ALog.log(obj=_t, msg='RUNNING', task_name=name, LogObject=log_obj) result = _t.result() # 返回结果 return result[name] def __find_all__(self, *args, **kwargs): """ 任务方法 """ return self.__find_by_field__(*args, **kwargs) def __find_by_field__(self, *args, **kwargs): """ 任务方法 """ fields = kwargs['config_obj'].parse_key(*args, is_field=True, left=kwargs['sqlFields'].left_subscript, right=kwargs['sqlFields'].right_subscript) sql_str = kwargs['sqlFields'].find_str + fields + kwargs['sqlFields'].from_str + kwargs['__table_name__'] kwargs['sql'] = sql_str return self.__find_many__(**kwargs) def __find_many__(self, *args, **kwargs): """ 任务方法 """ # kwargs['conf_obj'] = config_obj # kwargs = kwargs['ParseUtil'].find_print_sql(**kwargs) return self.__find_sql__(**kwargs) def __find_sql__(self, *args, **kwargs): """ 任务方法 """ kwargs = kwargs['ParseUtil'].find_print_sql(**kwargs) _rs = kwargs['db_util'].select(**kwargs) result = [] if _rs: for i in _rs: obj = kwargs['ParseUtil'].parse_obj(i, kwargs['instance']) result.append(obj) return { kwargs['__task_uuid__']: result } def __insert__(self, *args, **kwargs): """ :param pojo: pojo对象 任务方法 """ kwargs = kwargs['ParseUtil'].find_print_sql(**kwargs) kwargs = kwargs['ParseUtil'].find_last_id(**kwargs) kwargs['ParseUtil'].fieldExist(kwargs, 'pojo', raise_exception=True) if 'many' in kwargs and kwargs['many']: # 多行插入 这个先取出sql语句,params无作用 for item in kwargs['pojo']: filed_list = kwargs['config_obj'].parse_insert_pojo(item, __table_name__=kwargs['__table_name__'], insert_str=kwargs['sqlFields'].insert_str, values_str=kwargs['sqlFields'].values_str) if 'params' not in kwargs.keys() or not isinstance(kwargs['params'], list): kwargs['params'] = [] kwargs['sql'] = filed_list['sql'] kwargs['params'].append(filed_list['params']) else: filed_list = kwargs['config_obj'].parse_insert_pojo(kwargs['pojo'], __table_name__=kwargs['__table_name__'], insert_str=kwargs['sqlFields'].insert_str, values_str=kwargs['sqlFields'].values_str) kwargs.update(filed_list) return { kwargs['__task_uuid__']: kwargs['db_util'].insert(**kwargs) } def __update__(self, *args, **kwargs): kwargs = kwargs['ParseUtil'].find_print_sql(**kwargs) kwargs = kwargs['ParseUtil'].find_last_id(**kwargs) kwargs['sql'], kwargs['params'] = kwargs['config_obj'].parse_update(kwargs['pojo'], kwargs['key']) return { kwargs['__task_uuid__']: kwargs['db_util'].update(**kwargs) } def __remove__(self, *args, **kwargs): kwargs = kwargs['ParseUtil'].find_print_sql(**kwargs) kwargs = kwargs['ParseUtil'].find_last_id(**kwargs) kwargs['sql'], kwargs['params'] = kwargs['config_obj'].parse_remove(kwargs['pojo'], kwargs['key']) return { kwargs['__task_uuid__']: kwargs['db_util'].update(**kwargs) }
PypiClean
/OctoBot-Evaluators-1.9.1.tar.gz/OctoBot-Evaluators-1.9.1/octobot_evaluators/evaluators/__init__.py
import octobot_evaluators.enums from octobot_evaluators.evaluators import abstract_evaluator from octobot_evaluators.evaluators.abstract_evaluator import ( AbstractEvaluator, ) from octobot_evaluators.evaluators import evaluator_factory from octobot_evaluators.evaluators import realtime_evaluator from octobot_evaluators.evaluators import social_evaluator from octobot_evaluators.evaluators import TA_evaluator from octobot_evaluators.evaluators import scripted_evaluator from octobot_evaluators.evaluators import abstract_util from octobot_evaluators.evaluators import strategy_evaluator from octobot_evaluators.evaluators.evaluator_factory import ( create_evaluator, create_and_start_all_type_evaluators, create_evaluators, create_temporary_evaluator_with_local_config, ) from octobot_evaluators.evaluators.realtime_evaluator import ( RealTimeEvaluator, ) from octobot_evaluators.evaluators.social_evaluator import ( SocialEvaluator, ) from octobot_evaluators.evaluators.TA_evaluator import ( TAEvaluator, ) from octobot_evaluators.evaluators.scripted_evaluator import ( ScriptedEvaluator, ) from octobot_evaluators.evaluators.abstract_util import ( AbstractUtil, ) from octobot_evaluators.evaluators.strategy_evaluator import ( StrategyEvaluator, ) EvaluatorClassTypes = { octobot_evaluators.enums.EvaluatorMatrixTypes.TA.value: TAEvaluator, octobot_evaluators.enums.EvaluatorMatrixTypes.SOCIAL.value: SocialEvaluator, octobot_evaluators.enums.EvaluatorMatrixTypes.REAL_TIME.value: RealTimeEvaluator, octobot_evaluators.enums.EvaluatorMatrixTypes.SCRIPTED.value: ScriptedEvaluator, octobot_evaluators.enums.EvaluatorMatrixTypes.STRATEGIES.value: StrategyEvaluator } evaluator_class_str_to_matrix_type_dict = { "TAEvaluator": octobot_evaluators.enums.EvaluatorMatrixTypes.TA, "SocialEvaluator": octobot_evaluators.enums.EvaluatorMatrixTypes.SOCIAL, "RealTimeEvaluator": octobot_evaluators.enums.EvaluatorMatrixTypes.REAL_TIME, "ScriptedEvaluator": octobot_evaluators.enums.EvaluatorMatrixTypes.SCRIPTED, "StrategyEvaluator": octobot_evaluators.enums.EvaluatorMatrixTypes.STRATEGIES } __all__ = [ "RealTimeEvaluator", "AbstractEvaluator", "SocialEvaluator", "TAEvaluator", "ScriptedEvaluator", "AbstractUtil", "StrategyEvaluator", "EvaluatorClassTypes", "create_evaluator", "create_and_start_all_type_evaluators", "create_evaluators", "create_temporary_evaluator_with_local_config", "evaluator_class_str_to_matrix_type_dict", ]
PypiClean
/Mezzanine-6.0.0.tar.gz/Mezzanine-6.0.0/mezzanine/blog/management/base.py
from urllib.parse import urlparse from django.contrib.auth import get_user_model from django.contrib.redirects.models import Redirect from django.contrib.sites.models import Site from django.core.management.base import BaseCommand, CommandError from django.utils.encoding import force_str from django.utils.html import strip_tags from mezzanine.blog.models import BlogCategory, BlogPost from mezzanine.conf import settings from mezzanine.core.models import CONTENT_STATUS_DRAFT, CONTENT_STATUS_PUBLISHED from mezzanine.generic.models import Keyword, ThreadedComment from mezzanine.pages.models import RichTextPage from mezzanine.utils.html import decode_entities User = get_user_model() class BaseImporterCommand(BaseCommand): """ Base importer command for blogging platform specific management commands to subclass when importing blog posts into Mezzanine. The ``handle_import`` method should be overridden to provide the import mechanism specific to the blogging platform being dealt with. """ def add_arguments(self, parser): parser.add_argument( "-m", "--mezzanine-user", dest="mezzanine_user", help="Mezzanine username to assign the imported blog posts to.", ) parser.add_argument( "--noinput", action="store_false", dest="interactive", help="Do NOT prompt for input of any kind. " "Fields will be truncated if too long.", ) parser.add_argument( "-n", "--navigation", action="store_true", dest="in_navigation", help="Add any imported pages to navigation", ) parser.add_argument( "-f", "--footer", action="store_true", dest="in_footer", help="Add any imported pages to footer navigation", ) def __init__(self, **kwargs): self.posts = [] self.pages = [] super().__init__(**kwargs) def add_post( self, title=None, content=None, old_url=None, pub_date=None, tags=None, categories=None, comments=None, ): """ Adds a post to the post list for processing. - ``title`` and ``content`` are strings for the post. - ``old_url`` is a string that a redirect will be created for. - ``pub_date`` is assumed to be a ``datetime`` object. - ``tags`` and ``categories`` are sequences of strings. - ``comments`` is a sequence of dicts - each dict should be the return value of ``add_comment``. """ if not title: title = strip_tags(content).split(". ")[0] title = decode_entities(title) if categories is None: categories = [] if tags is None: tags = [] if comments is None: comments = [] self.posts.append( { "title": force_str(title), "publish_date": pub_date, "content": force_str(content), "categories": categories, "tags": tags, "comments": comments, "old_url": old_url, } ) return self.posts[-1] def add_page( self, title=None, content=None, old_url=None, tags=None, old_id=None, old_parent_id=None, ): """ Adds a page to the list of pages to be imported - used by the Wordpress importer. """ if not title: text = decode_entities(strip_tags(content)).replace("\n", " ") title = text.split(". ")[0] if tags is None: tags = [] self.pages.append( { "title": title, "content": content, "tags": tags, "old_url": old_url, "old_id": old_id, "old_parent_id": old_parent_id, } ) def add_comment( self, post=None, name=None, email=None, pub_date=None, website=None, body=None ): """ Adds a comment to the post provided. """ if post is None: if not self.posts: raise CommandError("Cannot add comments without posts") post = self.posts[-1] post["comments"].append( { "user_name": name, "user_email": email, "submit_date": pub_date, "user_url": website, "comment": body, } ) def trunc(self, model, prompt, **fields): """ Truncates fields values for the given model. Prompts for a new value if truncation occurs. """ for field_name, value in fields.items(): field = model._meta.get_field(field_name) max_length = getattr(field, "max_length", None) if not max_length: continue elif not prompt: fields[field_name] = value[:max_length] continue while len(value) > max_length: encoded_value = value.encode("utf-8") new_value = input( "The value for the field %s.%s exceeds " "its maximum length of %s chars: %s\n\nEnter a new value " "for it, or press return to have it truncated: " % (model.__name__, field_name, max_length, encoded_value) ) value = new_value if new_value else value[:max_length] fields[field_name] = value return fields def handle(self, *args, **options): """ Processes the converted data into the Mezzanine database correctly. Attributes: mezzanine_user: the user to put this data in against date_format: the format the dates are in for posts and comments """ mezzanine_user = options.get("mezzanine_user") site = Site.objects.get_current() verbosity = int(options.get("verbosity", 1)) prompt = options.get("interactive") # Validate the Mezzanine user. if mezzanine_user is None: raise CommandError("No Mezzanine user has been specified") try: mezzanine_user = User.objects.get(username=mezzanine_user) except User.DoesNotExist: raise CommandError("Invalid Mezzanine user: %s" % mezzanine_user) # Run the subclassed ``handle_import`` and save posts, tags, # categories, and comments to the DB. self.handle_import(options) for post_data in self.posts: categories = post_data.pop("categories") tags = post_data.pop("tags") comments = post_data.pop("comments") old_url = post_data.pop("old_url") post_data = self.trunc(BlogPost, prompt, **post_data) initial = { "title": post_data.pop("title"), "user": mezzanine_user, } if post_data["publish_date"] is None: post_data["status"] = CONTENT_STATUS_DRAFT post, created = BlogPost.objects.get_or_create(**initial) for k, v in post_data.items(): setattr(post, k, v) post.save() if created and verbosity >= 1: print("Imported post: %s" % post) for name in categories: cat = self.trunc(BlogCategory, prompt, title=name) if not cat["title"]: continue cat, created = BlogCategory.objects.get_or_create(**cat) if created and verbosity >= 1: print("Imported category: %s" % cat) post.categories.add(cat) for comment in comments: comment = self.trunc(ThreadedComment, prompt, **comment) comment["site"] = site post.comments.create(**comment) if verbosity >= 1: print("Imported comment by: %s" % comment["user_name"]) self.add_meta(post, tags, prompt, verbosity, old_url) # Create any pages imported (Wordpress can include pages) in_menus = [] footer = [ menu[0] for menu in settings.PAGE_MENU_TEMPLATES if menu[-1] == "pages/menus/footer.html" ] if options["in_navigation"]: in_menus = [menu[0] for menu in settings.PAGE_MENU_TEMPLATES] if footer and not options["in_footer"]: in_menus.remove(footer[0]) elif footer and options["in_footer"]: in_menus = footer parents = [] for page in self.pages: tags = page.pop("tags") old_url = page.pop("old_url") old_id = page.pop("old_id") old_parent_id = page.pop("old_parent_id") page = self.trunc(RichTextPage, prompt, **page) page["status"] = CONTENT_STATUS_PUBLISHED page["in_menus"] = in_menus page, created = RichTextPage.objects.get_or_create(**page) if created and verbosity >= 1: print("Imported page: %s" % page) self.add_meta(page, tags, prompt, verbosity, old_url) parents.append( { "old_id": old_id, "old_parent_id": old_parent_id, "page": page, } ) for obj in parents: if obj["old_parent_id"]: for parent in parents: if parent["old_id"] == obj["old_parent_id"]: obj["page"].parent = parent["page"] obj["page"].save() break def add_meta(self, obj, tags, prompt, verbosity, old_url=None): """ Adds tags and a redirect for the given obj, which is a blog post or a page. """ for tag in tags: keyword = self.trunc(Keyword, prompt, title=tag) keyword, created = Keyword.objects.get_or_create_iexact(**keyword) obj.keywords.create(keyword=keyword) if created and verbosity >= 1: print("Imported tag: %s" % keyword) if old_url is not None: old_path = urlparse(old_url).path if not old_path.strip("/"): return redirect = self.trunc(Redirect, prompt, old_path=old_path) redirect["site"] = Site.objects.get_current() redirect, created = Redirect.objects.get_or_create(**redirect) redirect.new_path = obj.get_absolute_url() redirect.save() if created and verbosity >= 1: print("Created redirect for: %s" % old_url) def handle_import(self, options): """ Should be overridden by subclasses - performs the conversion from the originating data source into the lists of posts and comments ready for processing. """ raise NotImplementedError
PypiClean
/Audit-Alembic-0.1.0.tar.gz/Audit-Alembic-0.1.0/ci/bootstrap.py
from __future__ import absolute_import, print_function, unicode_literals import os import sys from os.path import abspath from os.path import dirname from os.path import exists from os.path import join if __name__ == "__main__": base_path = dirname(dirname(abspath(__file__))) print("Project path: {0}".format(base_path)) env_path = join(base_path, ".tox", "bootstrap") if sys.platform == "win32": bin_path = join(env_path, "Scripts") else: bin_path = join(env_path, "bin") if not exists(env_path): import subprocess print("Making bootstrap env in: {0} ...".format(env_path)) try: subprocess.check_call(["virtualenv", env_path]) except subprocess.CalledProcessError: subprocess.check_call([sys.executable, "-m", "virtualenv", env_path]) print("Installing `jinja2` into bootstrap environment...") subprocess.check_call([join(bin_path, "pip"), "install", "jinja2"]) activate = join(bin_path, "activate_this.py") # noinspection PyCompatibility exec(compile(open(activate, "rb").read(), activate, "exec"), dict(__file__=activate)) import jinja2 import subprocess jinja = jinja2.Environment( loader=jinja2.FileSystemLoader(join(base_path, "ci", "templates")), trim_blocks=True, lstrip_blocks=True, keep_trailing_newline=True ) tox_environments = [ line.strip() # WARNING: 'tox' must be installed globally or in the project's virtualenv for line in subprocess.check_output(['tox', '--listenvs'], universal_newlines=True).splitlines() ] tox_environments = [line for line in tox_environments if line not in ['clean', 'report', 'docs', 'check']] for name in os.listdir(join("ci", "templates")): with open(join(base_path, name), "w") as fh: fh.write(jinja.get_template(name).render(tox_environments=tox_environments)) print("Wrote {}".format(name)) print("DONE.")
PypiClean
/Abies-0.0.5.tar.gz/Abies-0.0.5/extern/pybind11/docs/advanced/pycpp/numpy.rst
.. _numpy: NumPy ##### Buffer protocol =============== Python supports an extremely general and convenient approach for exchanging data between plugin libraries. Types can expose a buffer view [#f2]_, which provides fast direct access to the raw internal data representation. Suppose we want to bind the following simplistic Matrix class: .. code-block:: cpp class Matrix { public: Matrix(size_t rows, size_t cols) : m_rows(rows), m_cols(cols) { m_data = new float[rows*cols]; } float *data() { return m_data; } size_t rows() const { return m_rows; } size_t cols() const { return m_cols; } private: size_t m_rows, m_cols; float *m_data; }; The following binding code exposes the ``Matrix`` contents as a buffer object, making it possible to cast Matrices into NumPy arrays. It is even possible to completely avoid copy operations with Python expressions like ``np.array(matrix_instance, copy = False)``. .. code-block:: cpp py::class_<Matrix>(m, "Matrix", py::buffer_protocol()) .def_buffer([](Matrix &m) -> py::buffer_info { return py::buffer_info( m.data(), /* Pointer to buffer */ sizeof(float), /* Size of one scalar */ py::format_descriptor<float>::format(), /* Python struct-style format descriptor */ 2, /* Number of dimensions */ { m.rows(), m.cols() }, /* Buffer dimensions */ { sizeof(float) * m.cols(), /* Strides (in bytes) for each index */ sizeof(float) } ); }); Supporting the buffer protocol in a new type involves specifying the special ``py::buffer_protocol()`` tag in the ``py::class_`` constructor and calling the ``def_buffer()`` method with a lambda function that creates a ``py::buffer_info`` description record on demand describing a given matrix instance. The contents of ``py::buffer_info`` mirror the Python buffer protocol specification. .. code-block:: cpp struct buffer_info { void *ptr; py::ssize_t itemsize; std::string format; py::ssize_t ndim; std::vector<py::ssize_t> shape; std::vector<py::ssize_t> strides; }; To create a C++ function that can take a Python buffer object as an argument, simply use the type ``py::buffer`` as one of its arguments. Buffers can exist in a great variety of configurations, hence some safety checks are usually necessary in the function body. Below, you can see a basic example on how to define a custom constructor for the Eigen double precision matrix (``Eigen::MatrixXd``) type, which supports initialization from compatible buffer objects (e.g. a NumPy matrix). .. code-block:: cpp /* Bind MatrixXd (or some other Eigen type) to Python */ typedef Eigen::MatrixXd Matrix; typedef Matrix::Scalar Scalar; constexpr bool rowMajor = Matrix::Flags & Eigen::RowMajorBit; py::class_<Matrix>(m, "Matrix", py::buffer_protocol()) .def(py::init([](py::buffer b) { typedef Eigen::Stride<Eigen::Dynamic, Eigen::Dynamic> Strides; /* Request a buffer descriptor from Python */ py::buffer_info info = b.request(); /* Some sanity checks ... */ if (info.format != py::format_descriptor<Scalar>::format()) throw std::runtime_error("Incompatible format: expected a double array!"); if (info.ndim != 2) throw std::runtime_error("Incompatible buffer dimension!"); auto strides = Strides( info.strides[rowMajor ? 0 : 1] / (py::ssize_t)sizeof(Scalar), info.strides[rowMajor ? 1 : 0] / (py::ssize_t)sizeof(Scalar)); auto map = Eigen::Map<Matrix, 0, Strides>( static_cast<Scalar *>(info.ptr), info.shape[0], info.shape[1], strides); return Matrix(map); })); For reference, the ``def_buffer()`` call for this Eigen data type should look as follows: .. code-block:: cpp .def_buffer([](Matrix &m) -> py::buffer_info { return py::buffer_info( m.data(), /* Pointer to buffer */ sizeof(Scalar), /* Size of one scalar */ py::format_descriptor<Scalar>::format(), /* Python struct-style format descriptor */ 2, /* Number of dimensions */ { m.rows(), m.cols() }, /* Buffer dimensions */ { sizeof(Scalar) * (rowMajor ? m.cols() : 1), sizeof(Scalar) * (rowMajor ? 1 : m.rows()) } /* Strides (in bytes) for each index */ ); }) For a much easier approach of binding Eigen types (although with some limitations), refer to the section on :doc:`/advanced/cast/eigen`. .. seealso:: The file :file:`tests/test_buffers.cpp` contains a complete example that demonstrates using the buffer protocol with pybind11 in more detail. .. [#f2] http://docs.python.org/3/c-api/buffer.html Arrays ====== By exchanging ``py::buffer`` with ``py::array`` in the above snippet, we can restrict the function so that it only accepts NumPy arrays (rather than any type of Python object satisfying the buffer protocol). In many situations, we want to define a function which only accepts a NumPy array of a certain data type. This is possible via the ``py::array_t<T>`` template. For instance, the following function requires the argument to be a NumPy array containing double precision values. .. code-block:: cpp void f(py::array_t<double> array); When it is invoked with a different type (e.g. an integer or a list of integers), the binding code will attempt to cast the input into a NumPy array of the requested type. This feature requires the :file:`pybind11/numpy.h` header to be included. Note that :file:`pybind11/numpy.h` does not depend on the NumPy headers, and thus can be used without declaring a build-time dependency on NumPy; NumPy>=1.7.0 is a runtime dependency. Data in NumPy arrays is not guaranteed to packed in a dense manner; furthermore, entries can be separated by arbitrary column and row strides. Sometimes, it can be useful to require a function to only accept dense arrays using either the C (row-major) or Fortran (column-major) ordering. This can be accomplished via a second template argument with values ``py::array::c_style`` or ``py::array::f_style``. .. code-block:: cpp void f(py::array_t<double, py::array::c_style | py::array::forcecast> array); The ``py::array::forcecast`` argument is the default value of the second template parameter, and it ensures that non-conforming arguments are converted into an array satisfying the specified requirements instead of trying the next function overload. There are several methods on arrays; the methods listed below under references work, as well as the following functions based on the NumPy API: - ``.dtype()`` returns the type of the contained values. - ``.strides()`` returns a pointer to the strides of the array (optionally pass an integer axis to get a number). - ``.flags()`` returns the flag settings. ``.writable()`` and ``.owndata()`` are directly available. - ``.offset_at()`` returns the offset (optionally pass indices). - ``.squeeze()`` returns a view with length-1 axes removed. - ``.view(dtype)`` returns a view of the array with a different dtype. - ``.reshape({i, j, ...})`` returns a view of the array with a different shape. ``.resize({...})`` is also available. - ``.index_at(i, j, ...)`` gets the count from the beginning to a given index. There are also several methods for getting references (described below). Structured types ================ In order for ``py::array_t`` to work with structured (record) types, we first need to register the memory layout of the type. This can be done via ``PYBIND11_NUMPY_DTYPE`` macro, called in the plugin definition code, which expects the type followed by field names: .. code-block:: cpp struct A { int x; double y; }; struct B { int z; A a; }; // ... PYBIND11_MODULE(test, m) { // ... PYBIND11_NUMPY_DTYPE(A, x, y); PYBIND11_NUMPY_DTYPE(B, z, a); /* now both A and B can be used as template arguments to py::array_t */ } The structure should consist of fundamental arithmetic types, ``std::complex``, previously registered substructures, and arrays of any of the above. Both C++ arrays and ``std::array`` are supported. While there is a static assertion to prevent many types of unsupported structures, it is still the user's responsibility to use only "plain" structures that can be safely manipulated as raw memory without violating invariants. Vectorizing functions ===================== Suppose we want to bind a function with the following signature to Python so that it can process arbitrary NumPy array arguments (vectors, matrices, general N-D arrays) in addition to its normal arguments: .. code-block:: cpp double my_func(int x, float y, double z); After including the ``pybind11/numpy.h`` header, this is extremely simple: .. code-block:: cpp m.def("vectorized_func", py::vectorize(my_func)); Invoking the function like below causes 4 calls to be made to ``my_func`` with each of the array elements. The significant advantage of this compared to solutions like ``numpy.vectorize()`` is that the loop over the elements runs entirely on the C++ side and can be crunched down into a tight, optimized loop by the compiler. The result is returned as a NumPy array of type ``numpy.dtype.float64``. .. code-block:: pycon >>> x = np.array([[1, 3], [5, 7]]) >>> y = np.array([[2, 4], [6, 8]]) >>> z = 3 >>> result = vectorized_func(x, y, z) The scalar argument ``z`` is transparently replicated 4 times. The input arrays ``x`` and ``y`` are automatically converted into the right types (they are of type ``numpy.dtype.int64`` but need to be ``numpy.dtype.int32`` and ``numpy.dtype.float32``, respectively). .. note:: Only arithmetic, complex, and POD types passed by value or by ``const &`` reference are vectorized; all other arguments are passed through as-is. Functions taking rvalue reference arguments cannot be vectorized. In cases where the computation is too complicated to be reduced to ``vectorize``, it will be necessary to create and access the buffer contents manually. The following snippet contains a complete example that shows how this works (the code is somewhat contrived, since it could have been done more simply using ``vectorize``). .. code-block:: cpp #include <pybind11/pybind11.h> #include <pybind11/numpy.h> namespace py = pybind11; py::array_t<double> add_arrays(py::array_t<double> input1, py::array_t<double> input2) { py::buffer_info buf1 = input1.request(), buf2 = input2.request(); if (buf1.ndim != 1 || buf2.ndim != 1) throw std::runtime_error("Number of dimensions must be one"); if (buf1.size != buf2.size) throw std::runtime_error("Input shapes must match"); /* No pointer is passed, so NumPy will allocate the buffer */ auto result = py::array_t<double>(buf1.size); py::buffer_info buf3 = result.request(); double *ptr1 = static_cast<double *>(buf1.ptr); double *ptr2 = static_cast<double *>(buf2.ptr); double *ptr3 = static_cast<double *>(buf3.ptr); for (size_t idx = 0; idx < buf1.shape[0]; idx++) ptr3[idx] = ptr1[idx] + ptr2[idx]; return result; } PYBIND11_MODULE(test, m) { m.def("add_arrays", &add_arrays, "Add two NumPy arrays"); } .. seealso:: The file :file:`tests/test_numpy_vectorize.cpp` contains a complete example that demonstrates using :func:`vectorize` in more detail. Direct access ============= For performance reasons, particularly when dealing with very large arrays, it is often desirable to directly access array elements without internal checking of dimensions and bounds on every access when indices are known to be already valid. To avoid such checks, the ``array`` class and ``array_t<T>`` template class offer an unchecked proxy object that can be used for this unchecked access through the ``unchecked<N>`` and ``mutable_unchecked<N>`` methods, where ``N`` gives the required dimensionality of the array: .. code-block:: cpp m.def("sum_3d", [](py::array_t<double> x) { auto r = x.unchecked<3>(); // x must have ndim = 3; can be non-writeable double sum = 0; for (py::ssize_t i = 0; i < r.shape(0); i++) for (py::ssize_t j = 0; j < r.shape(1); j++) for (py::ssize_t k = 0; k < r.shape(2); k++) sum += r(i, j, k); return sum; }); m.def("increment_3d", [](py::array_t<double> x) { auto r = x.mutable_unchecked<3>(); // Will throw if ndim != 3 or flags.writeable is false for (py::ssize_t i = 0; i < r.shape(0); i++) for (py::ssize_t j = 0; j < r.shape(1); j++) for (py::ssize_t k = 0; k < r.shape(2); k++) r(i, j, k) += 1.0; }, py::arg().noconvert()); To obtain the proxy from an ``array`` object, you must specify both the data type and number of dimensions as template arguments, such as ``auto r = myarray.mutable_unchecked<float, 2>()``. If the number of dimensions is not known at compile time, you can omit the dimensions template parameter (i.e. calling ``arr_t.unchecked()`` or ``arr.unchecked<T>()``. This will give you a proxy object that works in the same way, but results in less optimizable code and thus a small efficiency loss in tight loops. Note that the returned proxy object directly references the array's data, and only reads its shape, strides, and writeable flag when constructed. You must take care to ensure that the referenced array is not destroyed or reshaped for the duration of the returned object, typically by limiting the scope of the returned instance. The returned proxy object supports some of the same methods as ``py::array`` so that it can be used as a drop-in replacement for some existing, index-checked uses of ``py::array``: - ``.ndim()`` returns the number of dimensions - ``.data(1, 2, ...)`` and ``r.mutable_data(1, 2, ...)``` returns a pointer to the ``const T`` or ``T`` data, respectively, at the given indices. The latter is only available to proxies obtained via ``a.mutable_unchecked()``. - ``.itemsize()`` returns the size of an item in bytes, i.e. ``sizeof(T)``. - ``.ndim()`` returns the number of dimensions. - ``.shape(n)`` returns the size of dimension ``n`` - ``.size()`` returns the total number of elements (i.e. the product of the shapes). - ``.nbytes()`` returns the number of bytes used by the referenced elements (i.e. ``itemsize()`` times ``size()``). .. seealso:: The file :file:`tests/test_numpy_array.cpp` contains additional examples demonstrating the use of this feature. Ellipsis ======== Python 3 provides a convenient ``...`` ellipsis notation that is often used to slice multidimensional arrays. For instance, the following snippet extracts the middle dimensions of a tensor with the first and last index set to zero. In Python 2, the syntactic sugar ``...`` is not available, but the singleton ``Ellipsis`` (of type ``ellipsis``) can still be used directly. .. code-block:: python a = ... # a NumPy array b = a[0, ..., 0] The function ``py::ellipsis()`` function can be used to perform the same operation on the C++ side: .. code-block:: cpp py::array a = /* A NumPy array */; py::array b = a[py::make_tuple(0, py::ellipsis(), 0)]; .. versionchanged:: 2.6 ``py::ellipsis()`` is now also available in Python 2. Memory view =========== For a case when we simply want to provide a direct accessor to C/C++ buffer without a concrete class object, we can return a ``memoryview`` object. Suppose we wish to expose a ``memoryview`` for 2x4 uint8_t array, we can do the following: .. code-block:: cpp const uint8_t buffer[] = { 0, 1, 2, 3, 4, 5, 6, 7 }; m.def("get_memoryview2d", []() { return py::memoryview::from_buffer( buffer, // buffer pointer { 2, 4 }, // shape (rows, cols) { sizeof(uint8_t) * 4, sizeof(uint8_t) } // strides in bytes ); }) This approach is meant for providing a ``memoryview`` for a C/C++ buffer not managed by Python. The user is responsible for managing the lifetime of the buffer. Using a ``memoryview`` created in this way after deleting the buffer in C++ side results in undefined behavior. We can also use ``memoryview::from_memory`` for a simple 1D contiguous buffer: .. code-block:: cpp m.def("get_memoryview1d", []() { return py::memoryview::from_memory( buffer, // buffer pointer sizeof(uint8_t) * 8 // buffer size ); }) .. note:: ``memoryview::from_memory`` is not available in Python 2. .. versionchanged:: 2.6 ``memoryview::from_memory`` added.
PypiClean
/NativDebugging-35.tar.gz/NativDebugging-35/src/DumpBase.py
from abc import ABCMeta, abstractmethod from .Interfaces import ReadError from struct import pack class DumpBase( object ): """ Basic functions to save entier memory snapshot to file """ DUMP_TYPE_NATIV_DEBUGGING = 0 DUMP_TYPE_RAW = 1 @abstractmethod def getMemoryMap(self): """ Return a dict with infromation about all memory regions. dict[baseAddress] = (name, regionSize, regionAttributes) """ raise NotImplementedError("Pure function call") def dumpToFile( self, dumpFile, dumpType=None, comments=None, isVerbose=False ): PAGE_SIZE = 0x400 if None == dumpType: dumpType = self.DUMP_TYPE_NATIV_DEBUGGING if not isinstance(dumpFile, file): dumpFile = open(dumpFile, 'wb') if self.DUMP_TYPE_NATIV_DEBUGGING == dumpType: self._writeDumpHeader(dumpFile) memMap = self.getMemoryMap() addresses = list(memMap.keys()) addresses.sort() for addr in addresses: regionInfo = memMap[addr] regionName = regionInfo[0] regionSize = regionInfo[1] regionAttrib = regionInfo[2] bytesLeft = regionSize if self.DUMP_TYPE_NATIV_DEBUGGING == dumpType: self._writeAtom(dumpFile, 'REGN', [ pack('>Q', addr), pack('>Q', regionSize), pack('>L', regionAttrib), self._makeAtom('NAME', regionName) ] ) dumpFile.write('DATA' + pack('>Q',regionSize)) while 0 < bytesLeft: if bytesLeft > PAGE_SIZE: currentReadSize = PAGE_SIZE else: currentReadSize = bytesLeft try: page = self.readMemory(addr, currentReadSize) except ReadError: if isVerbose: print("Failed to read data from address %x to %x" % (addr, addr + currentReadSize)) page = b'\x00' * currentReadSize bytesLeft -= currentReadSize addr += currentReadSize dumpFile.write(page) if None != comments and self.DUMP_TYPE_NATIV_DEBUGGING == dumpType: self._writeAtom(dumpFile, b'CMNT', comments) def _writeDumpHeader(self, dumpFile): self._writeAtom(dumpFile, b'NDMD', b'') self._writeAtom(dumpFile, b'INFO', [ pack('>L', self.getPointerSize()), pack('>L', self.getDefaultDataSize()), self.getEndianity() ] ) def _writeAtom(self, dumpFile, name, data): if len(name) != 4: raise Exception("Invalid tag name %s" % name) totalLength = 0 if isinstance(data, list): for x in data: totalLength += len(x) else: totalLength = len(data) dumpFile.write(name) dumpFile.write(pack('>Q', totalLength)) if isinstance(data, list): for x in data: dumpFile.write(x) else: dumpFile.write(data) def _makeAtom(self, name, data): if len(name) != 4: raise Exception("Invalid tag name %s" % name) allData = '' if isinstance(data, list): for x in data: allData += x else: allData = data result = name + \ pack('>Q', len(allData)) + \ allData return result
PypiClean
/ComputeJobRecorder-0.1.1-py3-none-any.whl/ComputeJobRecorder-0.1.1.data/scripts/cjr_record.py
# This file is part of 'compute_job_recorder' # A library for recording compute job progress. # # Copyright 2019 Pete Bunting # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Purpose: Command line tool for running the system. # # Author: Pete Bunting # Email: pfb@aber.ac.uk # Date: 08/02/2019 # Version: 1.0 # # History: # Version 1.0 - Created. import argparse import json import cjrlib.cjr_recorder if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("-s", "--status", type=str, required=True, default=None, choices=["START", "UPDATE", "FINISH"], help="Specify the job status.") parser.add_argument("-j", "--jobname", type=str, required=True, help="Specify the job name, a generic name for a group of jobs.") parser.add_argument("-t", "--taskid", type=str, required=True, help="Specify a job ID, unique within the 'jobname'.") parser.add_argument("-v", "--version", type=int, default=0, required=False, help="Specify the version of the job and task.") parser.add_argument("-i", "--taskinfo", type=str, required=True, help='''Specify the status info, this is stored in JSON and should be provided in that format: * START - input parameters, helpful to include enough information to re-run the job. * UPDATE - any information on job progress. * FINISH - information on completion. ''') parser.add_argument("--printprogress", action='store_true', default=False, help="Specify that progress statements should be printed to the console - " "useful for debugging.") args = parser.parse_args() if args.status == "START": status = cjrlib.cjr_recorder.JobStatus.START elif args.status == "FINISH": status = cjrlib.cjr_recorder.JobStatus.FINISH elif args.status == "UPDATE": status = cjrlib.cjr_recorder.JobStatus.UPDATE else: raise Exception("Status provided was not recognised.") # Parse the task_info_str = args.taskinfo task_info_dict = json.loads(task_info_str) cjrlib.cjr_recorder.record_task_status(status, args.jobname, args.taskid, args.version, task_info_dict, args.printprogress)
PypiClean
/Flask-Maple-0.5.6.tar.gz/Flask-Maple-0.5.6/flask_maple/serializer.py
from sqlalchemy import inspect from sqlalchemy.orm.interfaces import (ONETOMANY, MANYTOMANY) from sqlalchemy.types import DateTime, Integer from flask_sqlalchemy import Pagination class Column(object): def __init__(self, model): self.inp = inspect(model) self.columns = self.inp.columns @property def primary_columns(self): return [column for column in self.columns if column.primary_key] @property def nullable_columns(self): return [column for column in self.columns if column.nullable] @property def notnullable_columns(self): return [ column for column in self.columns if not column.nullable and not column.primary_key ] @property def unique_columns(self): return [column for column in self.columns if column.unique] @property def relation_columns(self): return [relation for relation in self.inp.relationships] @property def datetime_columns(self): return [ column for column in self.columns if isinstance(column.type, DateTime) ] @property def integer_columns(self): return [ column for column in self.columns if isinstance(column.type, Integer) ] @property def foreign_keys(self): columns = [] [columns.extend(list(column.foreign_keys)) for column in self.columns] return [i.parent for i in columns] class PageInfo(object): ''' just for flask_sqlalchemy ''' def __init__(self, paginate): self.paginate = paginate def as_dict(self): pageinfo = { 'items': True, 'pages': self.paginate.pages, 'has_prev': self.paginate.has_prev, 'page': self.paginate.page, 'has_next': self.paginate.has_next, 'iter_pages': list( self.paginate.iter_pages( left_edge=1, left_current=2, right_current=3, right_edge=1)) } return pageinfo class Field(object): def __init__(self, source, args={}, default=None): self.source = source self.args = args self.default = default def data(self, instance): if hasattr(instance, self.source): source = getattr(instance, self.source) if not callable(source): return source return source(**self.args) return self.default class Serializer(object): def __init__(self, instance, **kwargs): meta = self.Meta self.instance = instance self.depth = kwargs['depth'] if 'depth' in kwargs else meta.depth self.include = kwargs[ 'include'] if 'include' in kwargs else meta.include self.exclude = kwargs[ 'exclude'] if 'exclude' in kwargs else meta.exclude self.extra = kwargs['extra'] if 'extra' in kwargs else meta.extra def __new__(self, *args, **kwargs): meta = self.Meta for _meta in ['include', 'exclude', 'extra']: if not hasattr(meta, _meta): setattr(meta, _meta, []) if not hasattr(meta, 'depth'): setattr(meta, 'depth', 2) return object.__new__(self) @property def data(self): if isinstance(self.instance, Pagination): self.instance = self.instance.items if isinstance(self.instance, list): return self._serializerlist(self.instance, self.depth) return self._serializer(self.instance, self.depth) def _serializerlist(self, instances, depth): results = [] for instance in instances: result = self._serializer(instance, depth) if result: results.append(result) return results def _serializer(self, instance, depth): result = {} if depth == 0: return result depth -= 1 model_class = self.get_model_class(instance) inp = self.get_inspect(model_class) model_data = self._serializer_model(inp, instance, depth) relation_data = self._serializer_relation(inp, instance, depth) extra_data = self._serializer_extra(instance) result.update(model_data) result.update(relation_data) result.update(extra_data) return result def _serializer_extra(self, instance): extra = self.extra result = {} for e in extra: # extra_column = getattr(self, e) # if isinstance(extra_column, Field): # result[e] = extra_column.data(instance) # else: extra_column = getattr(instance, e) result[e] = extra_column if not callable( extra_column) else extra_column() return result def _serializer_model(self, inp, instance, depth): result = {} model_columns = self.get_model_columns(inp) for column in model_columns: result[column] = getattr(instance, column) return result def _serializer_relation(self, inp, instance, depth): result = {} relation_columns = self.get_relation_columns(inp) for relation in relation_columns: column = relation.key serializer = Serializer if hasattr(self, column): serializer = getattr(self, column) if relation.direction in [ONETOMANY, MANYTOMANY ] and relation.uselist: children = getattr(instance, column) if relation.lazy == 'dynamic': children = children.all() result[column] = serializer( children, exclude=[relation.back_populates], depth=depth).data if children else [] else: child = getattr(instance, column) if relation.lazy == 'dynamic': child = child.first() result[column] = serializer( child, exclude=[relation.back_populates], depth=depth).data if child else {} return result def get_model_class(self, instance): return getattr(instance, '__class__') def get_inspect(self, model_class): return inspect(model_class) def get_model_columns(self, inp): if self.include: model_columns = [ column.name for column in inp.columns if column.name in self.include ] elif self.exclude: model_columns = [ column.name for column in inp.columns if column.name not in self.exclude ] else: model_columns = [column.name for column in inp.columns] return model_columns def get_relation_columns(self, inp): if self.include: relation_columns = [ relation for relation in inp.relationships if relation.key in self.include ] elif self.exclude: relation_columns = [ relation for relation in inp.relationships if relation.key not in self.exclude ] else: relation_columns = [relation for relation in inp.relationships] return relation_columns class Meta: depth = 2 include = [] exclude = [] extra = []
PypiClean
/LFake-18.9.0.tar.gz/LFake-18.9.0/lfake/providers/person/es_CL/__init__.py
from collections import OrderedDict from itertools import zip_longest from typing import Dict from ..es import Provider as PersonProvider class Provider(PersonProvider): formats_male = OrderedDict( [ ("{{given_name_male}} {{last_name}} {{last_name}}", 0.55), ("{{first_name_male}} {{last_name}} {{last_name}}", 0.25), ("{{first_name_male}} {{last_name}}", 0.17), ("{{given_name_male}} {{last_name}}-{{last_name}} {{last_name}}", 0.01), ("{{first_name_male}} {{last_name}}-{{last_name}} {{last_name}}", 0.01), ("{{first_name_male}} {{last_name}}-{{last_name}}", 0.01), ] ) formats_female = OrderedDict( [ ("{{given_name_female}} {{last_name}} {{last_name}}", 0.55), ("{{first_name_female}} {{last_name}} {{last_name}}", 0.25), ("{{first_name_female}} {{last_name}}", 0.17), ("{{given_name_female}} {{last_name}}-{{last_name}} {{last_name}}", 0.01), ("{{first_name_female}} {{last_name}}-{{last_name}} {{last_name}}", 0.01), ("{{first_name_female}} {{last_name}}-{{last_name}}", 0.01), ] ) formats = OrderedDict( [ ("formats_male", 0.48), ("formats_female", 0.52), ] ) # Sources for names data: # Servicio de Registro Civil e Identificación # Inquiry under Law of Transparency #AK002T0020771 for names and last names # https://docs.google.com/spreadsheets/d/1yJ2wVnlttoBaCMS-xWyw7fbUqe6xdYpg/edit?usp=sharing&ouid=105306283136031380407 # Data was truncated to 500 items for each category # 500 male first names, weighted first_names_male: Dict[str, float] = OrderedDict( [ ("José", 0.05357441), ("Juan", 0.05188295), ("Luis", 0.04369026), ("Carlos", 0.02752294), ("Jorge", 0.02148181), ("Manuel", 0.01846196), ("Francisco", 0.01755076), ("Víctor", 0.01596373), ("Cristian", 0.01564751), ("Pedro", 0.01471588), ("Sebastián", 0.01369752), ("Matías", 0.01313522), ("Felipe", 0.01255329), ("Benjamín", 0.01251180), ("Miguel", 0.01246343), ("Diego", 0.01215592), ("Rodrigo", 0.01212607), ("Héctor", 0.01203257), ("Sergio", 0.01171687), ("Daniel", 0.01127892), ("Eduardo", 0.01096382), ("Pablo", 0.01053620), ("Patricio", 0.01010251), ("Claudio", 0.00996611), ("Ricardo", 0.00938327), ("Vicente", 0.00932498), ("Fernando", 0.00900068), ("Mario", 0.00887763), ("Alejandro", 0.00886620), ("Javier", 0.00854411), ("Oscar", 0.00834245), ("Jaime", 0.00819461), ("Roberto", 0.00812813), ("Mauricio", 0.00789297), ("Martín", 0.00782052), ("Joaquín", 0.00753223), ("David", 0.00750623), ("Gabriel", 0.00734777), ("Marcelo", 0.00727643), ("Ignacio", 0.00682999), ("Raúl", 0.00676584), ("Guillermo", 0.00675521), ("Cristóbal", 0.00644608), ("Julio", 0.00639650), ("Tomás", 0.00638588), ("Marco", 0.00621497), ("Andrés", 0.00616670), ("Maximiliano", 0.00584847), ("César", 0.00522869), ("Hugo", 0.00493089), ("Bastián", 0.00487160), ("Nelson", 0.00476677), ("Gonzalo", 0.00475513), ("Lucas", 0.00456965), ("Mateo", 0.00453424), ("Iván", 0.00451005), ("Álvaro", 0.00443902), ("Fabián", 0.00422673), ("Jonathan", 0.00415701), ("Esteban", 0.00414517), ("Hernán", 0.00410914), ("Alonso", 0.00409913), ("Ángel", 0.00405187), ("Leonardo", 0.00399470), ("Gustavo", 0.00399227), ("Ramón", 0.00398701), ("Santiago", 0.00396991), ("Rafael", 0.00378140), ("Enrique", 0.00360958), ("Camilo", 0.00356728), ("Alex", 0.00356607), ("Alexis", 0.00356172), ("Antonio", 0.00353500), ("Christian", 0.00353399), ("Franco", 0.00352286), ("René", 0.00335560), ("Rubén", 0.00330075), ("Alberto", 0.00309433), ("Samuel", 0.00303513), ("Emilio", 0.00299425), ("Marcos", 0.00285198), ("Renato", 0.00282477), ("Máximo", 0.00270364), ("Luciano", 0.00268897), ("Alfredo", 0.00264688), ("Jean", 0.00260822), ("Arturo", 0.00260529), ("Osvaldo", 0.00252191), ("Germán", 0.00252150), ("Kevin", 0.00250956), ("Gaspar", 0.00245138), ("Segundo", 0.00244025), ("Bruno", 0.00235060), ("Ernesto", 0.00232257), ("Elías", 0.00225062), ("Domingo", 0.00223383), ("Rodolfo", 0.00223342), ("Humberto", 0.00222290), ("Ariel", 0.00221673), ("Julián", 0.00219548), ("Gerardo", 0.00219072), ("Alexander", 0.00217655), ("Richard", 0.00216310), ("Omar", 0.00213557), ("Cristopher", 0.00212606), ("Alfonso", 0.00196720), ("Simón", 0.00195900), ("Moisés", 0.00194736), ("Bernardo", 0.00194210), ("Orlando", 0.00188382), ("John", 0.00173183), ("Jesús", 0.00171827), ("Michael", 0.00170411), ("Emiliano", 0.00156255), ("Damián", 0.00155516), ("Rolando", 0.00154747), ("Armando", 0.00154524), ("Alan", 0.00153917), ("Angelo", 0.00147067), ("Ismael", 0.00143454), ("Danilo", 0.00142665), ("Isaac", 0.00140581), ("Leandro", 0.00140439), ("Joel", 0.00140378), ("Dante", 0.00139913), ("Adolfo", 0.00137201), ("Amaro", 0.00136118), ("Félix", 0.00135673), ("Ian", 0.00134115), ("Lorenzo", 0.00133740), ("Abraham", 0.00133123), ("Bryan", 0.00132516), ("Thomas", 0.00131969), ("Christopher", 0.00128317), ("Facundo", 0.00127446), ("Erick", 0.00125453), ("Freddy", 0.00125392), ("Leonel", 0.00123996), ("Walter", 0.00121962), ("Eugenio", 0.00120859), ("Wilson", 0.00119958), ("Aníbal", 0.00119230), ("Nicolás", 0.00119088), ("León", 0.00117166), ("Salvador", 0.00116255), ("Edgardo", 0.00115972), ("Boris", 0.00114120), ("Adrián", 0.00112126), ("Robinson", 0.00112066), ("Brayan", 0.00108676), ("William", 0.00108544), ("Reinaldo", 0.00106288), ("Jesus", 0.00106187), ("Isaías", 0.00104578), ("Dylan", 0.00103870), ("Aldo", 0.00102959), ("Gastón", 0.00101087), ("Benjamin", 0.00100581), ("Eric", 0.00100409), ("Ronald", 0.00098709), ("Aarón", 0.00098254), ("Paulo", 0.00097039), ("Octavio", 0.00092577), ("Mariano", 0.00092243), ("Erwin", 0.00091636), ("Hans", 0.00090816), ("Enzo", 0.00090715), ("Abel", 0.00089723), ("Valentín", 0.00089400), ("Guido", 0.00089126), ("Augusto", 0.00086516), ("Heriberto", 0.00084694), ("Axel", 0.00084563), ("Cristofer", 0.00084350), ("Jordán", 0.00083177), ("Darío", 0.00082074), ("Israel", 0.00081760), ("Clemente", 0.00081163), ("Giovanni", 0.00079473), ("Johan", 0.00078937), ("Josue", 0.00078927), ("Jhon", 0.00078643), ("Rigoberto", 0.00077662), ("Néstor", 0.00076640), ("Edgar", 0.00075314), ("Yerko", 0.00074808), ("Robert", 0.00074596), ("Exequiel", 0.00074444), ("Waldo", 0.00073958), ("Brian", 0.00073260), ("Lukas", 0.00072683), ("Henry", 0.00069354), ("Brandon", 0.00069243), ("Fredy", 0.00068656), ("Williams", 0.00067968), ("Paul", 0.00067907), ("Cesar", 0.00067047), ("Gregorio", 0.00066066), ("Jairo", 0.00065823), ("Raimundo", 0.00063212), ("Liam", 0.00062231), ("Mathias", 0.00062008), ("Martin", 0.00061401), ("Jimmy", 0.00060774), ("Gilberto", 0.00060763), ("Federico", 0.00060237), ("Dagoberto", 0.00059782), ("Max", 0.00058922), ("Wladimir", 0.00058851), ("Milton", 0.00058001), ("Braulio", 0.00057586), ("Michel", 0.00057566), ("Edwin", 0.00057424), ("Edison", 0.00056089), ("Fidel", 0.00055360), ("Jeremy", 0.00055147), ("Benito", 0.00054975), ("Efraín", 0.00054814), ("Horacio", 0.00054743), ("Erik", 0.00054358), ("Mauro", 0.00054085), ("Ramiro", 0.00053164), ("Leopoldo", 0.00052931), ("Ítalo", 0.00052830), ("Joseph", 0.00051272), ("Pascual", 0.00051120), ("Marcelino", 0.00050877), ("Eliseo", 0.00050705), ("Byron", 0.00049845), ("Santino", 0.00049653), ("Oliver", 0.00049056), ("Gael", 0.00048894), ("Darwin", 0.00048074), ("Misael", 0.00047933), ("Adán", 0.00047781), ("Baltazar", 0.00047528), ("Edmundo", 0.00047326), ("Bayron", 0.00046840), ("Anthony", 0.00046759), ("Emanuel", 0.00046374), ("Paolo", 0.00046101), ("Arnoldo", 0.00045919), ("Emmanuel", 0.00045727), ("Ulises", 0.00044978), ("Dilan", 0.00044523), ("Rogelio", 0.00044442), ("Nibaldo", 0.00043531), ("Cristhian", 0.00043147), ("Jeremías", 0.00042732), ("Johnny", 0.00042469), ("Sandro", 0.00042297), ("Thiago", 0.00042256), ("Flavio", 0.00042206), ("Elvis", 0.00041882), ("James", 0.00041700), ("Jacob", 0.00041528), ("Vladimir", 0.00040576), ("Marcial", 0.00040222), ("Herman", 0.00039838), ("Aurelio", 0.00039342), ("Arnaldo", 0.00038532), ("Saúl", 0.00038330), ("Edward", 0.00038269), ("Franklin", 0.00037359), ("Santos", 0.00036913), ("Florencio", 0.00036579), ("Erasmo", 0.00036013), ("Roger", 0.00035446), ("Cristobal", 0.00035426), ("Juvenal", 0.00035315), ("Fermín", 0.00034819), ("Joshua", 0.00034697), ("Frank", 0.00034627), ("Ezequiel", 0.00034596), ("Benedicto", 0.00034535), ("Gerald", 0.00034455), ("Lautaro", 0.00033979), ("Wilfredo", 0.00033949), ("Abelardo", 0.00033797), ("Gerson", 0.00033665), ("Joan", 0.00033341), ("Leónidas", 0.00033271), ("Patrick", 0.00033038), ("Matteo", 0.00032916), ("Ruperto", 0.00032765), ("Emerson", 0.00032016), ("Danny", 0.00031773), ("Nolberto", 0.00031712), ("Gino", 0.00031611), ("Amador", 0.00031571), ("Bernardino", 0.00031378), ("Andy", 0.00031125), ("Demian", 0.00031055), ("Eladio", 0.00030994), ("Piero", 0.00030559), ("Yonathan", 0.00029274), ("Agustin", 0.00028990), ("Peter", 0.00028828), ("Tomas", 0.00028798), ("Borja", 0.00028748), ("Jonatan", 0.00028748), ("Jhonny", 0.00028059), ("Nicanor", 0.00028039), ("Genaro", 0.00028009), ("Jason", 0.00027948), ("Celso", 0.00027857), ("Sixto", 0.00027756), ("Eleodoro", 0.00027645), ("Evaristo", 0.00027604), ("Teodoro", 0.00027594), ("Maicol", 0.00027554), ("Washington", 0.00027493), ("Aquiles", 0.00027260), ("Román", 0.00026876), ("Rosendo", 0.00026532), ("Aliro", 0.00026461), ("Rosamel", 0.00026349), ("Harold", 0.00026279), ("Justo", 0.00025843), ("Florentino", 0.00024690), ("Anselmo", 0.00024488), ("Hipólito", 0.00024467), ("Allan", 0.00024245), ("Edgard", 0.00024214), ("Eusebio", 0.00024184), ("Eliecer", 0.00023810), ("Jacinto", 0.00023698), ("Froilán", 0.00023678), ("Steven", 0.00023668), ("George", 0.00023526), ("Charles", 0.00023162), ("Belisario", 0.00023121), ("Valentino", 0.00023071), ("Pierre", 0.00022858), ("Fabio", 0.00022636), ("Junior", 0.00022605), ("Tito", 0.00022605), ("Salomón", 0.00022494), ("Clodomiro", 0.00022393), ("Gary", 0.00022312), ("Dionisio", 0.00022282), ("Alamiro", 0.00022150), ("Edson", 0.00021938), ("Renzo", 0.00021927), ("Denis", 0.00021887), ("Noah", 0.00021877), ("Anderson", 0.00021836), ("Amaru", 0.00021614), ("Edinson", 0.00021371), ("Delfín", 0.00021361), ("Bernabé", 0.00021098), ("Iker", 0.00020956), ("Matheo", 0.00020865), ("Belarmino", 0.00020845), ("Douglas", 0.00020511), ("Desiderio", 0.00020450), ("Alexi", 0.00020308), ("Isidro", 0.00020288), ("Ethan", 0.00020268), ("Elian", 0.00019964), ("Mirko", 0.00019772), ("Américo", 0.00019701), ("Demetrio", 0.00019600), ("Gumercindo", 0.00019408), ("Andrew", 0.00019327), ("Ciro", 0.00019286), ("Milán", 0.00019256), ("Stefano", 0.00019256), ("Remigio", 0.00019226), ("Thomás", 0.00019216), ("Leoncio", 0.00018973), ("Neftalí", 0.00018770), ("Wilmer", 0.00018760), ("Heraldo", 0.00018669), ("Josué", 0.00018608), ("Eleazar", 0.00018568), ("Ronny", 0.00018447), ("Justin", 0.00018366), ("Nahuel", 0.00018204), ("Yordan", 0.00018163), ("Jhonatan", 0.00018113), ("Tránsito", 0.00017991), ("Silvio", 0.00017870), ("Artemio", 0.00017688), ("Lucio", 0.00017637), ("Galvarino", 0.00017576), ("Narciso", 0.00017516), ("Eloy", 0.00017435), ("Aladino", 0.00017303), ("Wenceslao", 0.00017232), ("Nestor", 0.00017202), ("Feliciano", 0.00017182), ("Lisandro", 0.00017091), ("Yonatan", 0.00017081), ("Ramon", 0.00017040), ("Rudy", 0.00017040), ("Yeison", 0.00017000), ("Maikol", 0.00016939), ("Bairon", 0.00016868), ("Albert", 0.00016858), ("Avelino", 0.00016706), ("Jerson", 0.00016625), ("Herminio", 0.00016473), ("Andre", 0.00016362), ("Modesto", 0.00016352), ("Armin", 0.00016342), ("Cristián", 0.00016210), ("Atilio", 0.00016200), ("Custodio", 0.00016200), ("Dennis", 0.00016190), ("Gregory", 0.00016129), ("Jefferson", 0.00016099), ("Teófilo", 0.00016079), ("Lionel", 0.00015978), ("Willy", 0.00015978), ("Rómulo", 0.00015967), ("Carlo", 0.00015765), ("Igor", 0.00015664), ("Reynaldo", 0.00015563), ("Lino", 0.00015522), ("Basilio", 0.00015492), ("Marcel", 0.00015431), ("Blas", 0.00015381), ("Johann", 0.00015330), ("Eulogio", 0.00015310), ("Eleuterio", 0.00015209), ("Lian", 0.00015148), ("Isidoro", 0.00015117), ("Xavier", 0.00014986), ("Ivo", 0.00014976), ("Abdón", 0.00014935), ("Harry", 0.00014885), ("Alessandro", 0.00014753), ("Simon", 0.00014662), ("Arsenio", 0.00014601), ("Bladimir", 0.00014359), ("Jonas", 0.00014318), ("Cristhofer", 0.00014257), ("Joao", 0.00014237), ("Franz", 0.00014207), ("Jeison", 0.00014197), ("Milovan", 0.00014176), ("Floridor", 0.00014136), ("Jerónimo", 0.00013944), ("Tulio", 0.00013893), ("Jair", 0.00013782), ("Marlon", 0.00013772), ("Samir", 0.00013772), ("Onofre", 0.00013660), ("Percy", 0.00013509), ("Rony", 0.00013438), ("Yuri", 0.00013418), ("Jerman", 0.00013367), ("Giovanny", 0.00013286), ("Matthew", 0.00013205), ("Gian", 0.00013134), ("Jordan", 0.00013094), ("Abner", 0.00013013), ("Alain", 0.00012942), ("Ceferino", 0.00012912), ("Yohan", 0.00012912), ("Roque", 0.00012891), ("Eithan", 0.00012770), ("Paulino", 0.00012760), ("Rudecindo", 0.00012750), ("Mark", 0.00012740), ("Norman", 0.00012568), ("Fabrizio", 0.00012446), ("Norberto", 0.00012244), ("Kurt", 0.00012203), ("Gianfranco", 0.00012193), ("Johans", 0.00012102), ("Olegario", 0.00012041), ("Christofer", 0.00011981), ("Maykol", 0.00011839), ("Hermes", 0.00011829), ("Celestino", 0.00011788), ("Albino", 0.00011768), ("Fabricio", 0.00011738), ("Giancarlo", 0.00011738), ("Derek", 0.00011718), ("Iñaki", 0.00011687), ("Jan", 0.00011687), ("Zacarías", 0.00011596), ("Said", 0.00011586), ("Hardy", 0.00011566), ("Ronaldo", 0.00011556), ("Aron", 0.00011414), ("Eydan", 0.00011323), ("Elio", 0.00011313), ("Lenin", 0.00011262), ("Victoriano", 0.00011232), ("Jhoan", 0.00011110), ("Dany", 0.00011070), ("Eduard", 0.00011040), ("Gerónimo", 0.00010989), ("Cipriano", 0.00010979), ("Victorino", 0.00010908), ("Cornelio", 0.00010807), ("Anyelo", 0.00010797), ] ) # 500 female first names, weighted first_names_female: Dict[str, float] = OrderedDict( [ ("María", 0.09500510), ("Ana", 0.02063161), ("Rosa", 0.01863127), ("Claudia", 0.01307437), ("Carolina", 0.01284289), ("Camila", 0.01283978), ("Patricia", 0.01267301), ("Catalina", 0.01188959), ("Javiera", 0.01138562), ("Sofía", 0.01127980), ("Daniela", 0.01091069), ("Constanza", 0.01049726), ("Francisca", 0.01047776), ("Valentina", 0.01038257), ("Carmen", 0.00923868), ("Margarita", 0.00852030), ("Juana", 0.00831674), ("Sandra", 0.00805135), ("Marcela", 0.00804935), ("Fernanda", 0.00779061), ("Elizabeth", 0.00749475), ("Verónica", 0.00723435), ("Martina", 0.00696652), ("Isidora", 0.00684806), ("Alejandra", 0.00682778), ("Cecilia", 0.00669337), ("Antonia", 0.00647906), ("Emilia", 0.00646743), ("Paola", 0.00644926), ("Marta", 0.00641635), ("Mónica", 0.00632094), ("Andrea", 0.00620359), ("Paula", 0.00598596), ("Gloria", 0.00587238), ("Isabel", 0.00583215), ("Pamela", 0.00573874), ("Florencia", 0.00561851), ("Katherine", 0.00555291), ("Laura", 0.00550238), ("Paulina", 0.00547535), ("Teresa", 0.00543800), ("Natalia", 0.00532886), ("Silvia", 0.00527810), ("Jessica", 0.00525306), ("Gabriela", 0.00523566), ("Gladys", 0.00515411), ("Bárbara", 0.00513106), ("Josefa", 0.00509771), ("Alicia", 0.00499510), ("Antonella", 0.00498789), ("Nicole", 0.00473403), ("Victoria", 0.00468760), ("Anahí", 0.00467751), ("Carla", 0.00463840), ("Agustina", 0.00455208), ("Karen", 0.00454133), ("Jacqueline", 0.00452925), ("Sara", 0.00451917), ("Luz", 0.00446099), ("Nancy", 0.00444426), ("Lorena", 0.00440536), ("Viviana", 0.00438287), ("Sonia", 0.00437256), ("Ximena", 0.00432957), ("Olga", 0.00431705), ("Amanda", 0.00416989), ("Elena", 0.00416524), ("Maite", 0.00408014), ("Luisa", 0.00407449), ("Susana", 0.00390373), ("Blanca", 0.00381785), ("Karina", 0.00380766), ("Macarena", 0.00380378), ("Ruth", 0.00376111), ("Marisol", 0.00360221), ("Eliana", 0.00359900), ("Ángela", 0.00356044), ("Angélica", 0.00356022), ("Cristina", 0.00355102), ("Julia", 0.00347921), ("Trinidad", 0.00343445), ("Valeria", 0.00338414), ("Evelyn", 0.00333128), ("Isabella", 0.00325449), ("Norma", 0.00320319), ("Tamara", 0.00317216), ("Adriana", 0.00311011), ("Ingrid", 0.00307764), ("Lucía", 0.00300461), ("Fabiola", 0.00299597), ("Lidia", 0.00294179), ("Belén", 0.00293359), ("Magdalena", 0.00291375), ("Romina", 0.00289048), ("Ignacia", 0.00286256), ("Erika", 0.00278266), ("Rocío", 0.00277291), ("Miriam", 0.00270354), ("Edith", 0.00266919), ("Elsa", 0.00266343), ("Graciela", 0.00265867), ("Karla", 0.00263407), ("Julieta", 0.00261091), ("Irma", 0.00259816), ("Berta", 0.00258276), ("Raquel", 0.00255539), ("Inés", 0.00255317), ("Mercedes", 0.00253755), ("Hilda", 0.00251306), ("Maritza", 0.00246818), ("Mariana", 0.00246364), ("Beatriz", 0.00236591), ("Roxana", 0.00232612), ("Vanessa", 0.00232081), ("Josefina", 0.00229687), ("Emma", 0.00227183), ("Renata", 0.00225942), ("Yolanda", 0.00224435), ("Clara", 0.00222451), ("Pía", 0.00218019), ("Flor", 0.00215260), ("Mariela", 0.00212600), ("Myriam", 0.00203758), ("Yasna", 0.00200090), ("Marcia", 0.00199669), ("Elisa", 0.00198904), ("Paz", 0.00194017), ("Emily", 0.00193962), ("Nelly", 0.00192488), ("Monserrat", 0.00192222), ("Leonor", 0.00191879), ("Jeannette", 0.00191757), ("Jocelyn", 0.00191502), ("Ema", 0.00191380), ("Soledad", 0.00191236), ("Elba", 0.00189751), ("Anaís", 0.00184055), ("Violeta", 0.00179800), ("Iris", 0.00178692), ("Génesis", 0.00177296), ("Fresia", 0.00176886), ("Diana", 0.00176775), ("Matilde", 0.00176520), ("Liliana", 0.00176066), ("Alexandra", 0.00174559), ("Jennifer", 0.00173451), ("Solange", 0.00170714), ("Aurora", 0.00170326), ("Loreto", 0.00169617), ("Amelia", 0.00168398), ("Johanna", 0.00166415), ("Mia", 0.00161240), ("Bernardita", 0.00160320), ("Denisse", 0.00159733), ("Rosario", 0.00159101), ("Amalia", 0.00158392), ("Eva", 0.00156874), ("Ester", 0.00154159), ("Nataly", 0.00152530), ("Ivonne", 0.00149826), ("Nora", 0.00149317), ("Lilian", 0.00149294), ("Irene", 0.00147322), ("Marina", 0.00147156), ("Valeska", 0.00145039), ("Maribel", 0.00143433), ("Sylvia", 0.00141926), ("Millaray", 0.00139299), ("Michelle", 0.00138103), ("Bernarda", 0.00137715), ("Pilar", 0.00135809), ("Virginia", 0.00135443), ("Marianela", 0.00133482), ("Noemí", 0.00131133), ("Aída", 0.00130257), ("Tania", 0.00129448), ("Eugenia", 0.00129304), ("Doris", 0.00129249), ("Catherine", 0.00129072), ("Consuelo", 0.00128385), ("Estefanía", 0.00128218), ("Matilda", 0.00128130), ("Dominga", 0.00128119), ("Judith", 0.00126933), ("Rebeca", 0.00126235), ("Carol", 0.00125082), ("Mirta", 0.00124949), ("Tatiana", 0.00120462), ("Amparo", 0.00119276), ("Cynthia", 0.00119165), ("Guillermina", 0.00118877), ("Olivia", 0.00118301), ("Rafaela", 0.00117791), ("Jenny", 0.00116251), ("Silvana", 0.00116007), ("Marjorie", 0.00114821), ("Paloma", 0.00114245), ("Magaly", 0.00113879), ("Marlene", 0.00113181), ("Mireya", 0.00113059), ("Krishna", 0.00110544), ("Nicol", 0.00110045), ("Leslie", 0.00109081), ("Yesenia", 0.00108915), ("Ámbar", 0.00107386), ("Elvira", 0.00106732), ("Georgina", 0.00106178), ("Leticia", 0.00106145), ("Jimena", 0.00103064), ("Noelia", 0.00102544), ("Adela", 0.00100870), ("Dominique", 0.00100760), ("Colomba", 0.00100649), ("Nadia", 0.00098277), ("Pascal", 0.00095119), ("Stephanie", 0.00094787), ("Erica", 0.00094111), ("Luciana", 0.00092726), ("Yessica", 0.00092682), ("Johana", 0.00092405), ("Melissa", 0.00092050), ("Lissette", 0.00091972), ("Celia", 0.00090355), ("Alondra", 0.00090199), ("Priscila", 0.00090199), ("Abigail", 0.00089667), ("Mabel", 0.00089656), ("Rita", 0.00089158), ("Karin", 0.00089113), ("Angelina", 0.00088980), ("Lucila", 0.00088172), ("Geraldine", 0.00087795), ("Priscilla", 0.00087562), ("Delia", 0.00086022), ("Carola", 0.00085324), ("Mayra", 0.00084072), ("Danitza", 0.00083916), ("Rossana", 0.00083861), ("Samantha", 0.00083673), ("Filomena", 0.00082819), ("Brenda", 0.00082387), ("Jazmín", 0.00081756), ("Scarlett", 0.00081745), ("Damaris", 0.00081257), ("Esperanza", 0.00080792), ("Lucy", 0.00079429), ("Vania", 0.00079074), ("Oriana", 0.00077456), ("Zoila", 0.00076891), ("Yessenia", 0.00076381), ("Rayén", 0.00076282), ("Tiare", 0.00074564), ("Danae", 0.00074121), ("Dayana", 0.00073966), ("Katalina", 0.00073766), ("Sophia", 0.00072658), ("Thiare", 0.00072459), ("Francesca", 0.00072248), ("Manuela", 0.00072104), ("Fanny", 0.00071672), ("Anita", 0.00071594), ("Mary", 0.00070520), ("Joselyn", 0.00069655), ("Marie", 0.00069001), ("Vilma", 0.00068846), ("Eloísa", 0.00068026), ("Jeanette", 0.00067882), ("Hortensia", 0.00067749), ("Ernestina", 0.00067727), ("Alba", 0.00067428), ("Dina", 0.00066896), ("Haydée", 0.00066342), ("Lía", 0.00066187), ("Montserrat", 0.00065433), ("Debora", 0.00064480), ("Dafne", 0.00064414), ("Herminia", 0.00064104), ("Corina", 0.00062464), ("Giovanna", 0.00062397), ("Rosalía", 0.00062187), ("Yaritza", 0.00061965), ("Guadalupe", 0.00061522), ("Alison", 0.00060480), ("Celeste", 0.00060214), ("Aylin", 0.00059970), ("Carmela", 0.00058619), ("Cindy", 0.00058441), ("Susan", 0.00058064), ("Zunilda", 0.00058031), ("Mirtha", 0.00057943), ("Almendra", 0.00057920), ("Kimberly", 0.00057776), ("Regina", 0.00057577), ("Martha", 0.00057444), ("Kiara", 0.00057355), ("Estela", 0.00056990), ("Maira", 0.00056923), ("Zulema", 0.00056868), ("Estrella", 0.00054895), ("Gisela", 0.00054873), ("Ida", 0.00054840), ("Pascuala", 0.00054541), ("Petronila", 0.00054053), ("Uberlinda", 0.00053998), ("Ayleen", 0.00053588), ("Allison", 0.00053111), ("Franchesca", 0.00053023), ("Mayte", 0.00052934), ("Aracely", 0.00052890), ("Gilda", 0.00052723), ("Pascale", 0.00052602), ("Clementina", 0.00052457), ("Luzmira", 0.00052336), ("Yenny", 0.00052302), ("Margot", 0.00051859), ("Natalie", 0.00051505), ("Mía", 0.00051482), ("Yenifer", 0.00051416), ("Bianca", 0.00050441), ("Cinthia", 0.00050341), ("Rafaella", 0.00050053), ("Maura", 0.00049898), ("Claudina", 0.00049599), ("Melanie", 0.00049222), ("Daisy", 0.00049100), ("Erna", 0.00048114), ("Sabina", 0.00047803), ("Scarlet", 0.00047205), ("Nathaly", 0.00046850), ("Mirna", 0.00046773), ("Nilda", 0.00046751), ("Lina", 0.00046673), ("Ada", 0.00046596), ("Makarena", 0.00045909), ("Astrid", 0.00045753), ("Gina", 0.00045720), ("Celinda", 0.00045676), ("Leontina", 0.00045388), ("Jenifer", 0.00045078), ("Marilyn", 0.00044834), ("Yohana", 0.00044701), ("Grace", 0.00044668), ("Ashley", 0.00044479), ("Janet", 0.00044479), ("Ninoska", 0.00044379), ("Anahis", 0.00044280), ("Teresita", 0.00044280), ("Adelina", 0.00044246), ("Elcira", 0.00044246), ("Pabla", 0.00044158), ("Maricel", 0.00044058), ("Elisabeth", 0.00043981), ("Jovita", 0.00043881), ("Caroline", 0.00043859), ("Nathalie", 0.00043792), ("Isolina", 0.00043061), ("Delfina", 0.00043016), ("Angie", 0.00042850), ("Fiorella", 0.00042130), ("Dora", 0.00041975), ("Giselle", 0.00041676), ("Yanet", 0.00041310), ("Yoselin", 0.00041299), ("Alice", 0.00041077), ("Edita", 0.00041044), ("Fabiana", 0.00041000), ("Nayareth", 0.00040933), ("Genoveva", 0.00040678), ("Helen", 0.00040590), ("Vivian", 0.00040390), ("Lucrecia", 0.00040246), ("Herminda", 0.00040213), ("Luna", 0.00040113), ("Scarleth", 0.00040113), ("Monica", 0.00040036), ("Marion", 0.00039969), ("Orfelina", 0.00039659), ("Digna", 0.00039426), ("Yasmín", 0.00039382), ("Marcelina", 0.00039127), ("Lisette", 0.00039061), ("Linda", 0.00038939), ("Katherinne", 0.00038928), ("Amy", 0.00038894), ("Nidia", 0.00038551), ("Ivette", 0.00038418), ("Yanira", 0.00038407), ("Milena", 0.00038096), ("Emelina", 0.00037897), ("Flora", 0.00037831), ("Estefany", 0.00037786), ("Esmeralda", 0.00037509), ("Francia", 0.00037487), ("Vanesa", 0.00036423), ("Araceli", 0.00036346), ("Edelmira", 0.00036335), ("Yanina", 0.00036324), ("Helena", 0.00036091), ("Darling", 0.00035936), ("Clorinda", 0.00035814), ("Betty", 0.00035747), ("Veronica", 0.00035747), ("Juliana", 0.00035603), ("Tabita", 0.00035348), ("Jeniffer", 0.00035171), ("Otilia", 0.00035094), ("Nieves", 0.00034938), ("Amaya", 0.00034916), ("Esther", 0.00034839), ("Leyla", 0.00034828), ("Maricela", 0.00034794), ("Alejandrina", 0.00034761), ("Jenniffer", 0.00034728), ("Rose", 0.00034584), ("Jacinta", 0.00034362), ("Albertina", 0.00033997), ("Lucinda", 0.00033808), ("Aurelia", 0.00033708), ("Juanita", 0.00033697), ("Rosalba", 0.00033664), ("Adelaida", 0.00033199), ("Denise", 0.00033154), ("Mery", 0.00033121), ("Alexia", 0.00033066), ("Enriqueta", 0.00032955), ("Katia", 0.00032933), ("Nélida", 0.00032922), ("Evelin", 0.00032722), ("Brígida", 0.00032645), ("Dolores", 0.00032545), ("Anna", 0.00032467), ("Florinda", 0.00032013), ("Gricelda", 0.00031836), ("América", 0.00031736), ("Doralisa", 0.00031703), ("Ramona", 0.00031603), ("Cinthya", 0.00031470), ("Gisselle", 0.00031381), ("Yesica", 0.00031381), ("Scarlette", 0.00031370), ("Úrsula", 0.00031326), ("Daniella", 0.00031248), ("Alma", 0.00031204), ("Clarisa", 0.00030916), ("Deyanira", 0.00030905), ("Amada", 0.00030872), ("Karol", 0.00030816), ("Kelly", 0.00030761), ("Leidy", 0.00030683), ("Yuliana", 0.00030650), ("Lourdes", 0.00030440), ("Flavia", 0.00030318), ("Natacha", 0.00030185), ("Lorenza", 0.00029830), ("Marisel", 0.00029819), ("Rocio", 0.00029764), ("Clotilde", 0.00029675), ("Ariela", 0.00029664), ("Marisa", 0.00029631), ("Nayaret", 0.00029608), ("Soraya", 0.00029608), ("Antonieta", 0.00029431), ("Ruby", 0.00029110), ("Melany", 0.00029065), ("Magali", 0.00028977), ("Barbara", 0.00028777), ("Yamilet", 0.00028556), ("Anastasia", 0.00028511), ("Elia", 0.00028434), ("Lesly", 0.00028412), ("Deisy", 0.00028367), ("Milagros", 0.00028013), ("Jael", 0.00027924), ("Florentina", 0.00027880), ("Katerine", 0.00027791), ("Madeleine", 0.00027758), ("Ayelén", 0.00027658), ("Francis", 0.00027547), ("Wilma", 0.00027525), ("Mariluz", 0.00027492), ("Natali", 0.00027381), ("Nury", 0.00027359), ("Giuliana", 0.00027337), ("Gema", 0.00027315), ("Massiel", 0.00027293), ("Rachel", 0.00027270), ("Paulette", 0.00027248), ("Micaela", 0.00027137), ("Dania", 0.00026905), ("Natividad", 0.00026849), ("Yocelyn", 0.00026783), ("Yanara", 0.00026528), ("Katherin", 0.00026473), ("Sarah", 0.00026461), ("Melania", 0.00026439), ("Sarai", 0.00026384), ("Perla", 0.00026207), ("Sabrina", 0.00026118), ("Muriel", 0.00026007), ("Cintia", 0.00025985), ] ) @property def first_names(self): """Returns a list of weighted first names, male and female.""" if not hasattr(self, "_first_names"): self._first_names = OrderedDict() for a, b in zip_longest(self.first_names_male.items(), self.first_names_female.items()): if a is not None: name, weight = a self._first_names[name] = weight / 2 if b is not None: name, weight = b self._first_names[name] = weight / 2 return self._first_names # 500 last names, weighted last_names = OrderedDict( [ ("González", 0.02683604), ("Muñoz", 0.02047480), ("Rojas", 0.01508949), ("Díaz", 0.01491392), ("Pérez", 0.01227842), ("Soto", 0.01044305), ("Rodríguez", 0.00997861), ("Contreras", 0.00993588), ("Silva", 0.00932900), ("López", 0.00920382), ("Morales", 0.00901722), ("Sepúlveda", 0.00880392), ("Martínez", 0.00870346), ("Hernández", 0.00867623), ("Torres", 0.00844247), ("Flores", 0.00836659), ("Ramírez", 0.00809392), ("Fuentes", 0.00808812), ("Castillo", 0.00801363), ("Espinoza", 0.00788287), ("Araya", 0.00787643), ("Reyes", 0.00758987), ("Gutiérrez", 0.00753243), ("Valenzuela", 0.00751303), ("Castro", 0.00732126), ("Vargas", 0.00724265), ("Sánchez", 0.00722920), ("Vásquez", 0.00699836), ("Fernández", 0.00677539), ("Álvarez", 0.00659731), ("Gómez", 0.00658808), ("Tapia", 0.00631937), ("Herrera", 0.00623804), ("Cortés", 0.00613157), ("García", 0.00612128), ("Carrasco", 0.00605067), ("Núñez", 0.00597788), ("Jara", 0.00568990), ("Vergara", 0.00543105), ("Rivera", 0.00538544), ("Figueroa", 0.00513368), ("Riquelme", 0.00501507), ("Bravo", 0.00496506), ("Miranda", 0.00492273), ("Vera", 0.00488902), ("Molina", 0.00478491), ("Vega", 0.00463878), ("Sandoval", 0.00456813), ("Campos", 0.00453386), ("Ortiz", 0.00437677), ("Orellana", 0.00435350), ("Salazar", 0.00429255), ("Zúñiga", 0.00426568), ("Olivares", 0.00425670), ("Romero", 0.00414512), ("Gallardo", 0.00413093), ("Garrido", 0.00407209), ("Alarcón", 0.00407085), ("Guzmán", 0.00403413), ("Parra", 0.00390092), ("Saavedra", 0.00387443), ("Peña", 0.00387328), ("Aguilera", 0.00384177), ("Navarro", 0.00382743), ("Henríquez", 0.00381134), ("Cáceres", 0.00371244), ("Pizarro", 0.00370441), ("Godoy", 0.00367051), ("Aravena", 0.00365821), ("Jiménez", 0.00359039), ("Escobar", 0.00355175), ("Ruiz", 0.00353889), ("Leiva", 0.00348804), ("Medina", 0.00344091), ("Vidal", 0.00337984), ("Cárdenas", 0.00335514), ("Yáñez", 0.00334424), ("Salinas", 0.00333792), ("Valdés", 0.00333438), ("Moreno", 0.00325766), ("Lagos", 0.00318407), ("Maldonado", 0.00318255), ("Bustos", 0.00308706), ("Pino", 0.00302189), ("Carvajal", 0.00294762), ("Palma", 0.00294040), ("Alvarado", 0.00291871), ("Ortega", 0.00289513), ("Sanhueza", 0.00287199), ("Navarrete", 0.00286994), ("Guerrero", 0.00285879), ("Ramos", 0.00285476), ("Paredes", 0.00283341), ("Sáez", 0.00282436), ("Bustamante", 0.00280019), ("Toro", 0.00279548), ("Poblete", 0.00277637), ("Mora", 0.00274113), ("Donoso", 0.00272059), ("Velásquez", 0.00271278), ("Venegas", 0.00270150), ("Acuña", 0.00267882), ("Pinto", 0.00267108), ("Acevedo", 0.00266916), ("Toledo", 0.00262872), ("Quezada", 0.00261595), ("Farías", 0.00260009), ("Aguilar", 0.00259665), ("San Martín", 0.00259182), ("Arriagada", 0.00259178), ("Rivas", 0.00255249), ("Cerda", 0.00253610), ("Salas", 0.00250877), ("Cornejo", 0.00250865), ("Arias", 0.00247106), ("Cabrera", 0.00245006), ("Durán", 0.00244504), ("Hidalgo", 0.00242676), ("Arancibia", 0.00242276), ("Marín", 0.00240593), ("Méndez", 0.00239469), ("Troncoso", 0.00234412), ("Osorio", 0.00234024), ("Ulloa", 0.00232537), ("Inostroza", 0.00231406), ("Villarroel", 0.00231381), ("Delgado", 0.00228236), ("Cuevas", 0.00227765), ("Ríos", 0.00226799), ("Pacheco", 0.00225965), ("Calderón", 0.00225919), ("Lara", 0.00224862), ("Ojeda", 0.00223799), ("León", 0.00220174), ("Correa", 0.00219774), ("Villalobos", 0.00215563), ("Ponce", 0.00212502), ("Barrera", 0.00209673), ("Burgos", 0.00209540), ("Chávez", 0.00209403), ("Cifuentes", 0.00208313), ("Catalán", 0.00208213), ("Moya", 0.00206590), ("Concha", 0.00201908), ("Ávila", 0.00200483), ("Zapata", 0.00199565), ("Guerra", 0.00197511), ("Salgado", 0.00195438), ("Barría", 0.00193901), ("Alfaro", 0.00191432), ("Gajardo", 0.00189681), ("Uribe", 0.00188327), ("Meza", 0.00185182), ("Astudillo", 0.00183289), ("Aguirre", 0.00182031), ("Cruz", 0.00181786), ("Becerra", 0.00180856), ("Retamal", 0.00180751), ("Mendoza", 0.00179192), ("Neira", 0.00178706), ("Pereira", 0.00178309), ("Ahumada", 0.00176419), ("Villegas", 0.00175511), ("Valdebenito", 0.00173854), ("Pavez", 0.00173026), ("Barrientos", 0.00170380), ("Jorquera", 0.00169141), ("Moraga", 0.00168413), ("Cárcamo", 0.00167957), ("Valencia", 0.00167161), ("Gálvez", 0.00166746), ("Lobos", 0.00166690), ("Barraza", 0.00165862), ("Canales", 0.00165701), ("Guajardo", 0.00165624), ("Araneda", 0.00164477), ("Mansilla", 0.00162051), ("Urrutia", 0.00160508), ("Mancilla", 0.00159963), ("Abarca", 0.00159944), ("Andrade", 0.00158767), ("Quiroz", 0.00158624), ("Valdivia", 0.00158485), ("Ibarra", 0.00158271), ("Mella", 0.00157726), ("Gatica", 0.00157255), ("Leal", 0.00156976), ("Cid", 0.00154797), ("Mardones", 0.00152328), ("Riveros", 0.00152269), ("Albornoz", 0.00151925), ("Cisternas", 0.00151761), ("Vallejos", 0.00151693), ("Solís", 0.00150807), ("Baeza", 0.00150525), ("Gaete", 0.00147643), ("Fuentealba", 0.00147544), ("Manríquez", 0.00147026), ("Córdova", 0.00146422), ("Rebolledo", 0.00145805), ("Caro", 0.00145344), ("Suárez", 0.00143779), ("Carrillo", 0.00142716), ("Carreño", 0.00140997), ("Cofré", 0.00140222), ("Oyarzún", 0.00140036), ("Varas", 0.00138394), ("Santibáñez", 0.00136064), ("Barra", 0.00136061), ("Márquez", 0.00135707), ("Fuenzalida", 0.00131692), ("Zamora", 0.00131596), ("Arenas", 0.00131267), ("Opazo", 0.00130920), ("Cabezas", 0.00130372), ("Pardo", 0.00127540), ("Vilches", 0.00126641), ("Santander", 0.00126170), ("Berríos", 0.00124955), ("Roa", 0.00124847), ("Véliz", 0.00123772), ("Arévalo", 0.00122129), ("Rubio", 0.00120847), ("Montecinos", 0.00120057), ("Robles", 0.00119641), ("Plaza", 0.00119366), ("Ibáñez", 0.00119093), ("Parada", 0.00117860), ("Meneses", 0.00117822), ("Briones", 0.00117429), ("Mena", 0.00117398), ("Huerta", 0.00116162), ("Román", 0.00115523), ("Zamorano", 0.00114932), ("Mamani", 0.00113704), ("Rosales", 0.00113646), ("Peralta", 0.00112319), ("Cancino", 0.00111678), ("Faúndez", 0.00111285), ("Maturana", 0.00111164), ("Beltrán", 0.00110835), ("Oyarzo", 0.00110764), ("Jaramillo", 0.00110631), ("Jofré", 0.00110141), ("Tobar", 0.00109837), ("Aguayo", 0.00109791), ("Palacios", 0.00109289), ("Avendaño", 0.00108908), ("Galaz", 0.00108412), ("Gallegos", 0.00107582), ("Urra", 0.00107492), ("Zambrano", 0.00106761), ("Ayala", 0.00106246), ("Cortez", 0.00105490), ("Santana", 0.00105177), ("Olguín", 0.00104610), ("Riffo", 0.00104121), ("Astorga", 0.00103681), ("Garcés", 0.00103603), ("Villanueva", 0.00103454), ("Hermosilla", 0.00102636), ("Marchant", 0.00102556), ("Arce", 0.00101592), ("Bastías", 0.00101118), ("Galleguillos", 0.00100511), ("Suazo", 0.00100378), ("Monsalve", 0.00099612), ("Rubilar", 0.00098757), ("Lillo", 0.00098546), ("Padilla", 0.00098472), ("Candia", 0.00098237), ("Quintana", 0.00098128), ("Almonacid", 0.00097657), ("Lizama", 0.00096650), ("Cabello", 0.00096566), ("Espinosa", 0.00096337), ("Duarte", 0.00095256), ("Osses", 0.00094444), ("Cartes", 0.00094150), ("Barrios", 0.00093806), ("Loyola", 0.00093697), ("Novoa", 0.00093524), ("Seguel", 0.00093452), ("Norambuena", 0.00093397), ("Mellado", 0.00093307), ("Serrano", 0.00092513), ("Leyton", 0.00091829), ("Carmona", 0.00091801), ("Montenegro", 0.00091004), ("Segovia", 0.00090726), ("Cea", 0.00088448), ("Benavides", 0.00088352), ("Hormazábal", 0.00088324), ("Verdugo", 0.00088157), ("Jerez", 0.00087726), ("Martinez", 0.00087525), ("Mondaca", 0.00087385), ("Segura", 0.00087376), ("Pastén", 0.00086416), ("Oliva", 0.00085762), ("Cordero", 0.00085374), ("Aranda", 0.00084897), ("Céspedes", 0.00084814), ("Urbina", 0.00084485), ("Briceño", 0.00084439), ("Luna", 0.00083924), ("Matus", 0.00083599), ("Cisterna", 0.00083484), ("Varela", 0.00083373), ("Echeverría", 0.00083342), ("Aedo", 0.00082765), ("Bahamondes", 0.00082669), ("Altamirano", 0.00082598), ("Merino", 0.00082487), ("Arellano", 0.00082462), ("Matamala", 0.00082121), ("Elgueta", 0.00081083), ("Hurtado", 0.00081043), ("Brito", 0.00080209), ("Barahona", 0.00079001), ("Valderrama", 0.00078669), ("Madrid", 0.00078592), ("Estay", 0.00078471), ("Aburto", 0.00078080), ("Bórquez", 0.00077910), ("Acosta", 0.00077774), ("Órdenes", 0.00077433), ("Fierro", 0.00077414), ("Domínguez", 0.00077262), ("Lizana", 0.00076764), ("Villagra", 0.00076584), ("Alegría", 0.00076534), ("Maureira", 0.00075208), ("Urzúa", 0.00075118), ("Oyarce", 0.00074914), ("Trujillo", 0.00074390), ("Olave", 0.00074362), ("Ferrada", 0.00074062), ("Rosas", 0.00073020), ("Bugueño", 0.00072636), ("Vivanco", 0.00072540), ("Lorca", 0.00072113), ("Rozas", 0.00072075), ("Montero", 0.00072035), ("Águila", 0.00071803), ("Montoya", 0.00071493), ("Zepeda", 0.00071261), ("Vicencio", 0.00071137), ("Garay", 0.00069454), ("Gamboa", 0.00069389), ("Lazo", 0.00069274), ("Aliaga", 0.00069215), ("Villagrán", 0.00068574), ("Aros", 0.00068193), ("Aránguiz", 0.00068044), ("Baez", 0.00067759), ("Pozo", 0.00067759), ("Belmar", 0.00067734), ("Casanova", 0.00066929), ("Bernal", 0.00066644), ("Machuca", 0.00066572), ("Escalona", 0.00066507), ("Ávalos", 0.00066461), ("Quinteros", 0.00066039), ("Collao", 0.00065640), ("Letelier", 0.00064540), ("Quispe", 0.00064078), ("Marambio", 0.00063951), ("Mejías", 0.00063561), ("Saldivia", 0.00063496), ("Armijo", 0.00063393), ("Orrego", 0.00063127), ("Piña", 0.00062780), ("Chacón", 0.00062674), ("Bello", 0.00062597), ("Rocha", 0.00062355), ("Pinilla", 0.00062318), ("Parraguez", 0.00061441), ("Oñate", 0.00060908), ("Iturra", 0.00060459), ("Arredondo", 0.00060270), ("Fredes", 0.00060217), ("Jaque", 0.00059945), ("Blanco", 0.00059935), ("Chamorro", 0.00059864), ("Quiroga", 0.00059483), ("Chandía", 0.00059424), ("Ceballos", 0.00059158), ("Saldías", 0.00059148), ("Barros", 0.00058888), ("Llanos", 0.00058866), ("Benítez", 0.00058522), ("Peñaloza", 0.00058491), ("Páez", 0.00058426), ("Pulgar", 0.00058302), ("Melo", 0.00058290), ("Ruz", 0.00057822), ("Medel", 0.00057689), ("Ampuero", 0.00057673), ("Avilés", 0.00057590), ("Pincheira", 0.00057351), ("Bascuñán", 0.00057302), ("Azócar", 0.00057168), ("Villa", 0.00057078), ("Tello", 0.00057047), ("Luengo", 0.00056787), ("Ovalle", 0.00056645), ("Madariaga", 0.00056164), ("Celis", 0.00056130), ("Cubillos", 0.00055932), ("Prado", 0.00055635), ("Angulo", 0.00055579), ("Estrada", 0.00055418), ("Arroyo", 0.00055303), ("Mercado", 0.00054947), ("Castañeda", 0.00054829), ("Barriga", 0.00054575), ("Lucero", 0.00054559), ("Valladares", 0.00054274), ("Coronado", 0.00053983), ("Pineda", 0.00053896), ("Rojo", 0.00053760), ("Ibacache", 0.00053747), ("Quijada", 0.00053639), ("Bahamonde", 0.00052744), ("Zurita", 0.00052424), ("Salamanca", 0.00051517), ("Galdames", 0.00051507), ("Ferreira", 0.00051433), ("Santos", 0.00051231), ("Labra", 0.00051173), ("Naranjo", 0.00051021), ("Badilla", 0.00051011), ("Veloso", 0.00050866), ("Prieto", 0.00050785), ("Villar", 0.00050785), ("Ormeño", 0.00050776), ("Ossandón", 0.00050754), ("Lira", 0.00050624), ("Bobadilla", 0.00050571), ("Apablaza", 0.00050395), ("Cepeda", 0.00050252), ("Paz", 0.00050252), ("Sierra", 0.00049617), ("Esparza", 0.00049574), ("Zavala", 0.00049530), ("Quintanilla", 0.00049459), ("Veas", 0.00049134), ("Sobarzo", 0.00048920), ("Videla", 0.00048811), ("Fonseca", 0.00047584), ("Toloza", 0.00047113), ("Agüero", 0.00046766), ("Olmos", 0.00046568), ("Arteaga", 0.00046562), ("Allende", 0.00046472), ("Montecino", 0.00046395), ("Quiñones", 0.00045976), ("Agurto", 0.00045958), ("Zárate", 0.00045933), ("Villablanca", 0.00045911), ("Guevara", 0.00045679), ("Solar", 0.00045577), ("Cruces", 0.00045391), ("Retamales", 0.00045140), ("Alvarez", 0.00045037), ("Astete", 0.00044954), ("De La Fuente", 0.00044650), ("Aracena", 0.00043996), ("Alvear", 0.00043910), ("Millán", 0.00043160), ("Zenteno", 0.00043135), ("Erices", 0.00043101), ("Meléndez", 0.00043064), ("Carrera", 0.00042884), ("Olea", 0.00042800), ("Cavieres", 0.00042779), ("Moncada", 0.00042583), ("Cares", 0.00042565), ("Vejar", 0.00042546), ("Arcos", 0.00042432), ("Montes", 0.00042150), ("Encina", 0.00041985), ("Fica", 0.00041784), ("Inzunza", 0.00041641), ("Droguett", 0.00041195), ("Caballero", 0.00041127), ("Lazcano", 0.00040950), ("Bruna", 0.00040805), ("Olmedo", 0.00040802), ("Corvalán", 0.00040634), ("Morán", 0.00040365), ("Olate", 0.00040114), ("Allendes", 0.00039928), ("Saldaña", 0.00039903), ("Viveros", 0.00039723), ("Moyano", 0.00039609), ("Choque", 0.00039550), ("Dinamarca", 0.00039107), ("Adasme", 0.00039098), ] ) prefixes_male = ("Sr.", "Dr.", "Don") prefixes_female = ("Srta.", "Sra.", "Dra.", "Doña") def name(self) -> str: # Select format, then generate name format: str = self.random_element(self.formats) pattern: str = self.random_element(getattr(self, format)) return self.generator.parse(pattern) def given_name(self) -> str: """Generates a composite given name with two unique names""" if self.random_int(0, 1) == 1: source = self.first_names_female else: source = self.first_names_male names = self.random_elements(source, length=2, unique=True) # type: ignore[var-annotated] return " ".join(names) def given_name_male(self) -> str: """Generates a composite male given name with two unique names""" names = self.random_elements(self.first_names_male, length=2, unique=True) # type: ignore[var-annotated] return " ".join(names) def given_name_female(self) -> str: """Generates a composite female given name with two unique names""" names = self.random_elements(self.first_names_female, length=2, unique=True) # type: ignore[var-annotated] return " ".join(names)
PypiClean
/NREL_osos-0.0.3-py3-none-any.whl/osos/api_github/api_github.py
import re import datetime import numpy as np import pandas as pd import requests import os import logging logger = logging.getLogger(__name__) class Github: """Class to call github api and return osos-formatted usage data.""" BASE_REQ = 'https://api.github.com/repos/{owner}/{repo}' TIME_FORMAT = '%Y-%m-%dT%H:%M:%SZ' def __init__(self, owner, repo, token=None): """ Parameters ---------- owner : str Repository owner, e.g. https://github.com/{owner}/{repo} repo : str Repository name, e.g. https://github.com/{owner}/{repo} token : str | None Github api authorization token. If none this gets retrieved from the GITHUB_TOKEN environment variable """ self._owner = owner self._repo = repo self.base_req = self.BASE_REQ.format(owner=owner, repo=repo) self.token = token if self.token is None: self.token = os.getenv('GITHUB_TOKEN', None) if self.token is None: msg = 'Could not find environment variable "GITHUB_TOKEN".' logger.error(msg) raise OSError(msg) else: logger.debug('Using github token from environment variable ' '"GITHUB_TOKEN".') else: logger.debug('Using github token from kwarg input to osos.') def __str__(self): st = (f'Github API interface for https://github.com/' f'{self._owner}/{self._repo}/') return st def __repr__(self): return str(self) def get_issues_pulls(self, option='issues', state='open', get_lifetimes=False, **kwargs): """Get open/closed issues/pulls for the repo (all have the same general parsing format) Parameters ---------- option : str "issues" or "pulls" state : str "open" or "closed" get_lifetimes : bool Flag to get the lifetime statistics of issues/pulls. Default is false to reduce number of API queries. Turning this on requires that we get the full data for every issue/pull. It is recommended that users retrieve lifetime statistics manually when desired and not as part of an automated OSOS workflow. kwargs : dict Optional kwargs to get passed to requests.get() Returns ------- out : int | dict Integer count of the number of issues/pulls if get_lifetimes=False, or a dict Namespace with keys: "{option}_{state}" and "{option}_{state}_*" for count, lifteimtes, and mean/median lifetime in days """ # github api has max 100 items per page. Use max to reduce the total # number of requests. if 'params' in kwargs: kwargs['params']['state'] = state kwargs['params']['per_page'] = 100 else: kwargs['params'] = {'state': state, 'per_page': 100} request = self.base_req + f'/{option}' if not get_lifetimes and option == 'pulls': out = self._total_count(request, **kwargs) return out elif not get_lifetimes and option == 'issues': # pulls get listed as issues but not the other way around i_out = self._total_count(request, **kwargs) request = self.base_req + '/pulls' p_out = self._total_count(request, **kwargs) out = i_out - p_out return out else: items = self.get_generator(request, **kwargs) numbers = [] lifetimes = [] for item in items: d0 = item['created_at'] d1 = item['closed_at'] d0 = datetime.datetime.strptime(d0, self.TIME_FORMAT) if state == 'closed' and d1 is not None: d1 = datetime.datetime.strptime(d1, self.TIME_FORMAT) elif state == 'open': d1 = datetime.datetime.now() assert d1 is not None, f'Bad final date for: {item}' # pulls get listed as issues but not the other way around condition_1 = option == 'pulls' condition_2 = option == 'issues' and 'pull_request' not in item if condition_1 or condition_2: numbers.append(item['number']) lifetime = (d1 - d0).total_seconds() / (24 * 3600) lifetimes.append(lifetime) mean = np.nan if not any(lifetimes) else np.mean(lifetimes) median = np.nan if not any(lifetimes) else np.median(lifetimes) out = {f'{option}_{state}': numbers, f'{option}_{state}_count': len(numbers), f'{option}_{state}_lifetimes': lifetimes, f'{option}_{state}_mean_lifetime': mean, f'{option}_{state}_median_lifetime': median, } return out def _traffic(self, option='clones', **kwargs): """Get the daily github repo traffic data for the last two weeks Parameters ---------- option : str "clones" or "views" kwargs : dict Optional kwargs to get passed to requests.get() Returns ------- out : pd.DataFrame Timeseries of daily git clone data. Includes columns for "views" or "clones" and "views_unique" or "clones_unique". Index is a pandas datetime index with just the datetime.date part. """ request = self.base_req + f'/traffic/{option}' out = self.get_request(request, **kwargs).json() out = pd.DataFrame(out[option]) if 'timestamp' in out: out.index = pd.to_datetime(out['timestamp']).dt.date out = out.drop('timestamp', axis=1) else: out = pd.DataFrame({'count': [0], 'uniques': [0]}, index=[datetime.date.today()]) out.index.name = None out = out.rename({'count': option, 'uniques': f'{option}_unique'}, axis=1) return out def _total_count(self, request, **kwargs): """Get the total count of a request object without querying every page Parameters ---------- request : str Request URL, example: "https://api.github.com/repos/NREL/reV/pulls" kwargs : dict Optional kwargs to get passed to requests.get() Returns ------- out : int Total number of items in all pages of the request """ req = self.get_request(request, **kwargs) num_pages = 1 n_last = 0 if 'last' in req.links: last_url = req.links['last']['url'] match = re.search(r'page=[0-9]*$', last_url) if not match: msg = f'Could not find page=[0-9]*$ in url: {last_url}' logger.error(msg) raise RuntimeError(msg) num_pages = int(match.group().replace('page=', '')) - 1 last_page = self.get_request(last_url, **kwargs) n_last = len(last_page.json()) out = len(req.json()) * num_pages + n_last return out def get_request(self, request, **kwargs): """Get the raw request output object Parameters ---------- request : str Request URL, example: "https://api.github.com/repos/NREL/reV/pulls" kwargs : dict Optional kwargs to get passed to requests.get() Returns ------- out : requests.models.Response requests.get() output object. """ headers = kwargs.pop('headers', {}) if 'Authorization' not in headers: headers['Authorization'] = f'token {self.token}' out = requests.get(request, headers=headers, **kwargs) if out.status_code != 200: msg = ('Received unexpected status code "{}" for reason "{}".' '\nRequest: {}\nOutput: {}' .format(out.status_code, out.reason, request, out.text)) logger.error(msg) raise IOError(msg) return out def get_generator(self, request, **kwargs): """Call the github API using the requests.get() method and merge all the paginated results into a single output Parameters ---------- request : str Request URL, example: "https://api.github.com/repos/NREL/reV/pulls" kwargs : dict Optional kwargs to get passed to requests.get() Returns ------- out : generator generator of list items in the request output """ headers = kwargs.pop('headers', {}) if 'Authorization' not in headers: headers['Authorization'] = f'token {self.token}' params = kwargs.pop('params', {}) params['page'] = 0 while True: params['page'] += 1 temp = requests.get(request, headers=headers, params=params, **kwargs) if temp.status_code != 200: msg = ('Received unexpected status code "{}" for reason "{}".' '\nRequest: {}\nOutput: {}' .format(temp.status_code, temp.reason, request, temp.text)) logger.error(msg) raise IOError(msg) temp = temp.json() if not any(temp): break elif not isinstance(temp, list): msg = ('JSON output is type "{}", not list, could ' 'not parse output from request: "{}"' .format(type(temp), request)) logger.error(msg) raise TypeError(msg) else: for entry in temp: yield entry def contributors(self, **kwargs): """Get the number of repo contributors Parameters ---------- kwargs : dict Optional kwargs to get passed to requests.get() Returns ------- out : int Number of contributors for the repo. """ logger.debug(f'Getting contributors for "{self._owner}/{self._repo}"') request = self.base_req + '/contributors' count = self._total_count(request, **kwargs) return count def commit_count(self, **kwargs): """Get the number of repo commits Parameters ---------- kwargs : dict Optional kwargs to get passed to requests.get() Returns ------- out : int Total number of commits to the repo. """ logger.debug(f'Getting commit count for "{self._owner}/{self._repo}"') request = self.base_req + '/commits' out = self._total_count(request, **kwargs) return out def commits(self, date_start=None, date_iter=None, search_all=False, **kwargs): """Get the number of commits by day in a given set of dates. Parameters ---------- date_start : datetime.date | None Option to search for commits from this date to today. Either input this or the date_iter. date_iter : list | tuple | pd.DatetimeIndex | None Iterable of dates to search for. Either input this or the date_start. search_all : bool Flag to search all commits or to terminate early (default) when the commit date is before all dates in the date_iter kwargs : dict Optional kwargs to get passed to requests.get() Returns ------- out : pd.DataFrame Timeseries of commit data based on date_iter as the index. Includes columns for "commits". """ if date_start is not None: date_iter = pd.date_range(date_start, datetime.date.today()) if date_iter is None: msg = 'Must either input date_start or date_iter!' logger.error(msg) raise RuntimeError(msg) logger.debug('Getting commit history for ' f'"{self._owner}/{self._repo}"') out = pd.DataFrame(index=date_iter) out['commits'] = 0 request = self.base_req + '/commits' commit_iter = self.get_generator(request, **kwargs) for com in commit_iter: c_date = com['commit']['committer']['date'] c_date = datetime.datetime.strptime(c_date, self.TIME_FORMAT) c_date = c_date.date() stop = True for date in date_iter: if c_date == date: out.at[date, 'commits'] += 1 stop = False break elif c_date > date: stop = False if stop and not search_all: break return out def clones(self, **kwargs): """Get the daily github repo clone data for the last two weeks. Parameters ---------- kwargs : dict Optional kwargs to get passed to requests.get() Returns ------- out : pd.DataFrame Timeseries of daily git clone data. Includes columns for "clones" and "clones_unique". Index is a pandas datetime index with just the datetime.date part. """ logger.debug(f'Getting clones for "{self._owner}/{self._repo}"') return self._traffic(option='clones', **kwargs) def forks(self, **kwargs): """Get the number of repo forks. Parameters ---------- kwargs : dict Optional kwargs to get passed to requests.get() Returns ------- out : int The number of forks. """ logger.debug(f'Getting forks for "{self._owner}/{self._repo}"') request = self.base_req + '/forks' count = self._total_count(request, **kwargs) return count def issues_closed(self, get_lifetimes=False, **kwargs): """Get data on the closed repo issues. Parameters ---------- get_lifetimes : bool Flag to get the lifetime statistics of issues/pulls. Default is false to reduce number of API queries. Turning this on requires that we get the full data for every issue/pull. It is recommended that users retrieve lifetime statistics manually when desired and not as part of an automated OSOS workflow. kwargs : dict Optional kwargs to get passed to requests.get() Returns ------- out : int | dict Number of closed issues, or if get_lifetimes is True, this returns a dict with additional metrics. """ logger.debug(f'Getting closed issues for "{self._owner}/{self._repo}"') out = self.get_issues_pulls(option='issues', state='closed', get_lifetimes=get_lifetimes, **kwargs) return out def issues_open(self, get_lifetimes=False, **kwargs): """Get data on the open repo issues. Parameters ---------- get_lifetimes : bool Flag to get the lifetime statistics of issues/pulls. Default is false to reduce number of API queries. Turning this on requires that we get the full data for every issue/pull. It is recommended that users retrieve lifetime statistics manually when desired and not as part of an automated OSOS workflow. kwargs : dict Optional kwargs to get passed to requests.get() Returns ------- out : int | dict Number of open issues, or if get_lifetimes is True, this returns a dict with additional metrics. """ logger.debug(f'Getting open issues for "{self._owner}/{self._repo}"') out = self.get_issues_pulls(option='issues', state='open', get_lifetimes=get_lifetimes, **kwargs) return out def pulls_closed(self, get_lifetimes=False, **kwargs): """Get data on the closed repo pull requests. Parameters ---------- get_lifetimes : bool Flag to get the lifetime statistics of issues/pulls. Default is false to reduce number of API queries. Turning this on requires that we get the full data for every issue/pull. It is recommended that users retrieve lifetime statistics manually when desired and not as part of an automated OSOS workflow. kwargs : dict Optional kwargs to get passed to requests.get() Returns ------- out : int | dict Number of closed pull requests, or if get_lifetimes is True, this returns a dict with additional metrics. """ logger.debug(f'Getting closed pulls for "{self._owner}/{self._repo}"') out = self.get_issues_pulls(option='pulls', state='closed', get_lifetimes=get_lifetimes, **kwargs) return out def pulls_open(self, get_lifetimes=False, **kwargs): """Get data on the open repo pull requests. Parameters ---------- get_lifetimes : bool Flag to get the lifetime statistics of issues/pulls. Default is false to reduce number of API queries. Turning this on requires that we get the full data for every issue/pull. It is recommended that users retrieve lifetime statistics manually when desired and not as part of an automated OSOS workflow. kwargs : dict Optional kwargs to get passed to requests.get() Returns ------- out : int | dict Number of open pull requests, or if get_lifetimes is True, this returns a dict with additional metrics. """ logger.debug(f'Getting open pulls for "{self._owner}/{self._repo}"') out = self.get_issues_pulls(option='pulls', state='open', get_lifetimes=get_lifetimes, **kwargs) return out def stargazers(self, **kwargs): """Get the number of repo stargazers Parameters ---------- kwargs : dict Optional kwargs to get passed to requests.get() Returns ------- out : int Number of stargazers for the repo. """ logger.debug(f'Getting stargazers for "{self._owner}/{self._repo}"') request = self.base_req + '/stargazers' count = self._total_count(request, **kwargs) return count def subscribers(self, **kwargs): """Get the number of repo subscribers Parameters ---------- kwargs : dict Optional kwargs to get passed to requests.get() Returns ------- out : int Number of subscribers for the repo. """ logger.debug(f'Getting subscribers for "{self._owner}/{self._repo}"') request = self.base_req + '/subscribers' count = self._total_count(request, **kwargs) return count def views(self, **kwargs): """Get the daily github repo views data for the last two weeks. Parameters ---------- kwargs : dict Optional kwargs to get passed to requests.get() Returns ------- out : pd.DataFrame Timeseries of daily git views data. Includes columns for "views" and "views_unique". Index is a pandas datetime index with just the datetime.date part. """ logger.debug(f'Getting views history for "{self._owner}/{self._repo}"') return self._traffic(option='views', **kwargs)
PypiClean
/Lazy_cleaner-0.0.9.tar.gz/Lazy_cleaner-0.0.9/Lazy_cleaner/Lazy_cleaner.py
import numpy as np import pandas as pd from sklearn.linear_model import LinearRegression from sklearn.ensemble import RandomForestRegressor import statsmodels.formula.api as sm from numpy import inf from scipy import stats import chardet import fuzzywuzzy from fuzzywuzzy import process import seaborn as sns import matplotlib.pyplot as plt import warnings warnings.filterwarnings("ignore") def fill_lin_rand(messy_df, metric, colnames): """ messy_df -> dataframe contain Nans metric -> weather fill Nans by linear regression or random forest colnames ->column names that needs to replace the Nans with numeric values this function is to fill the Nan values of columns by making a regression modle by taking features of Non-Nan values as a training data and predicting the missing values and fill it returns: the clean dataframe and list of the missing values """ # Create X_df of predictor columns X_df = messy_df.drop(colnames, axis = 1) # Create Y_df of predicted columns Y_df = messy_df[colnames] # Create empty dataframes and list Y_pred_df = pd.DataFrame(columns=colnames) Y_missing_df = pd.DataFrame(columns=colnames) missing_list = [] # Loop through all columns containing missing values for col in messy_df[colnames]: # Number of missing values in the column missing_count = messy_df[col].isnull().sum() # Separate train dataset which does not contain missing values messy_df_train = messy_df[~messy_df[col].isnull()] # Create X and Y within train dataset msg_cols_train_df = messy_df_train[col] messy_df_train = messy_df_train.drop(colnames, axis = 1) # Create test dataset, containing missing values in Y messy_df_test = messy_df[messy_df[col].isnull()] # Separate X and Y in test dataset msg_cols_test_df = messy_df_test[col] messy_df_test = messy_df_test.drop(colnames,axis = 1) # Copy X_train and Y_train Y_train = msg_cols_train_df.copy() X_train = messy_df_train.copy() # Linear Regression model if metric == "Linear Regression": model = LinearRegression() model.fit(X_train,Y_train) print("R-squared value is: " + str(model.score(X_train, Y_train))) # Random Forests regression model elif metric == "Random Forests": model = RandomForestRegressor(n_estimators = 10 , oob_score = True) model.fit(X_train,Y_train) # importances = model.feature_importances_ # indices = np.argsort(importances) # features = X_train.columns # print("Missing values in"+ col) # #plt.title('Feature Importances') # plt.barh(range(len(indices)), importances[indices], color='b', align='center') # plt.yticks(range(len(indices)), features) ## removed [indices] # plt.xlabel('Relative Importance') # plt.show() X_test = messy_df_test.copy() # Predict Y_test values by passing X_test as input to the model Y_test = model.predict(X_test) Y_test_integer = pd.to_numeric(pd.Series(Y_test),downcast='integer') # Append predicted Y values to known Y values Y_complete = Y_train.append(Y_test_integer) Y_complete = Y_complete.reset_index(drop = True) # Update list of missing values missing_list.append(Y_test.tolist()) Y_pred_df[col] = Y_complete Y_pred_df = Y_pred_df.reset_index(drop = True) # Create cleaned up dataframe clean_df = X_df.join(Y_pred_df) return clean_df,missing_list class stat(): def __init__(self): pass def p_val_of_features(self,data,target_column): """ df -> the dataframe label_column -> the column you want to predict this function calculate the P value of the features to know how it affects the regression module for a single label returns: summary report """ # stre is a string variable of independent and dependant columns stre = '{} ~'.format(target_column) for i in data.columns: stre = stre + "{} +".format(i) stre = stre[0:-1] #to remove the last + sign reg_ols = sm.ols(formula=stre, data=data).fit() return reg_ols.summary() #------------------------------------------------------------------------------------------------------------------------------ def na_per_data(self,data): """ df -> dataframe returns: the percentage of Nan values in the whole dataset """ per = (((data.isnull().sum()).sum())/ np.product(data.shape))*100 return per class fillnan(): def __init__(self): pass def simpleimputer(self,df): """ df -> dataframe this function fill the nan values with simple techniques like (mean,mode) depending on the data type of the column *also consider filling by median manually before using this method* returns: the dataframe after editing """ for i in df.columns: if df[i].isnull().any() == True : if df[i].dtypes == "object": if len(df[i].unique()) <= 10: df[i].fillna(df[i].mode()[0],inplace=True) if len(df[i].unique()) > 10 : df[i].dropna(inplace=True) if df[i].dtypes == "int64" or df[i].dtypes == "int32" or df[i].dtypes == "float64": if len(df[i].unique()) <= 10 : df[i].fillna(df[i].mode()[0],inplace=True) if len(df[i].unique()) > 10 : df[i].fillna(df[i].mean(),inplace=True) else: df[i].dropna(inplace=True) return df #------------------------------------------------------------------------------------------------------------------------------ def hyperimputer(self,df,metric = "Linear Regression"): # there is also "Random Forests" """ df->dataframe metric ->"Linear Regression" or "Random Forests" models to fill you numeric nan values with this function compines between the simple imputation and the linear regression imputation as in object columns it will impute with the mode and for any numerical number it will impute with linear regression or random forest *also consider filling by median manually before using this method* returns: the dataframe after editing """ for i in df.columns: if df[i].isnull().any() == True : if df[i].dtypes == "object": if len(df[i].unique()) <= 10: df[i].fillna(df[i].mode()[0],inplace=True) if len(df[i].unique()) > 10 : df[i].dropna(inplace=True) if df[i].dtypes == "int64" or df[i].dtypes == "int32" or df[i].dtypes == "float64": if len(df[i].unique()) > 10: df,_ = fill_lin_rand(df,metric,[i]) else: df[i].dropna(inplace=True) return df #------------------------------------------------------------------------------------------------------------------------------ def fill_by_nex(self,df,columns=[]): """ df -> dataframe columns -> a list of columns you want to fill this one fill nan values by the next value returns: the dataframe after editing """ for i in columns: df[i] = df[i].fillna(method='bfill', axis=0).fillna(0) return df #------------------------------------------------------------------------------------------------------------------------------ def fill_by_perv(self,df,columns=[]): """ df -> dataframe columns -> a list of columns you want to fill this one fill nan values by the previous value returns: the dataframe after editing """ for i in columns: df[i] = df[i].fillna(method='ffill', axis=0).fillna(0) return df class label(): def __init__(self): pass def to_category(self,df): """ change from and object datatype column into categorie datatype column df-> dataframe returns: the dataframe after editing """ cols = df.select_dtypes(include='object').columns for col in cols: ratio = len(df[col].value_counts()) / len(df) if ratio < 0.05: df[col] = df[col].astype('category') return df #------------------------------------------------------------------------------------------------------------------------------ def freq_labeling(self,df=None,column=None): """ replace objects by how frequent they are in a certain column df -> dataframe column -> column you want to apply this method on returns: the dataframe after editing """ df = df.copy() freq = (df[column].value_counts() /len(df)) d={} for i in freq.index: d[i] = freq[i] df[column] = df[column].map(d) return df class Clean_Data(): def __init__(self): pass def reduce_mem_usage(self,props): """ this funaction to reduce memory usage of dataset props-> dataset you want to reduce returns: the dataframe after editing and Nan values list """ start_mem_usg = props.memory_usage().sum() / 1024**2 print("Memory usage of properties dataframe is :",start_mem_usg," MB") NAlist = [] # Keeps track of columns that have missing values filled in. for col in props.columns: if props[col].dtype not in [object, bool]: # Exclude strings # Print current column type print("******************************") print("Column: ",col) print("dtype before: ",props[col].dtype) # make variables for Int, max and min IsInt = False mx = props[col].max() mn = props[col].min() ''' # Integer does not support NA, therefore, NA needs to be filled if not np.isfinite(props[col]).all(): NAlist.append(col) props[col].fillna(mn-1,inplace=True) ''' # test if column can be converted to an integer asint = props[col].fillna(0).astype(np.int64) result = (props[col] - asint) result = result.sum() if result > -0.01 and result < 0.01: IsInt = True # Make Integer/unsigned Integer datatypes if IsInt: if mn >= 0: if mx < 255: props[col] = props[col].astype(np.uint8) elif mx < 65535: props[col] = props[col].astype(np.uint16) elif mx < 4294967295: props[col] = props[col].astype(np.uint32) else: props[col] = props[col].astype(np.uint64) else: if mn > np.iinfo(np.int8).min and mx < np.iinfo(np.int8).max: props[col] = props[col].astype(np.int8) elif mn > np.iinfo(np.int16).min and mx < np.iinfo(np.int16).max: props[col] = props[col].astype(np.int16) elif mn > np.iinfo(np.int32).min and mx < np.iinfo(np.int32).max: props[col] = props[col].astype(np.int32) elif mn > np.iinfo(np.int64).min and mx < np.iinfo(np.int64).max: props[col] = props[col].astype(np.int64) # Make float datatypes 32 bit else: props[col] = props[col].astype(np.float32) # Print new column type print("dtype after: ",props[col].dtype) print("******************************") # Print final result print("___MEMORY USAGE AFTER COMPLETION:___") mem_usg = props.memory_usage().sum() / 1024**2 print("Memory usage is: ",mem_usg," MB") print("This is ",100*mem_usg/start_mem_usg,"% of the initial size") return props, NAlist #----------------------------------------------------------------------------------------------------------------------------- def replace_matches(self,df, column, string_to_match, min_ratio = 47): """ if there is simillar words but different format due to a data entry error you can apply this function to calculate similarity percentage and then change them df -> dataframe column -> column to edit string_to_match -> string you want to match and replace min_ratio -> minimum probability to excange returns: the dataframe after editing """ # get a list of unique strings strings = df[column].unique() # get the top 10 closest matches to our input string matches = fuzzywuzzy.process.extract(string_to_match, strings, limit=10, scorer=fuzzywuzzy.fuzz.token_sort_ratio) # only get matches with a ratio > 90 close_matches = [matches[0] for matches in matches if matches[1] >= min_ratio] # get the rows of all the close matches in our dataframe rows_with_matches = df[column].isin(close_matches) # replace all rows with close matches with the input matches df.loc[rows_with_matches, column] = string_to_match # let us know the function's done print("All done!") return df #----------------------------------------------------------------------------------------------------------------------------- def drop_missing(self,df,thresh=55): """ drop the columns if the missing values exceed 60% of it df-> dataframe thresh->percentage of the missing threshold to Delete above returns: the dataframe after editing """ thresh = len(df) * (thresh/100) df.dropna(axis=1, thresh=thresh, inplace=True) return df #------------------------------------------------------------------------------------------------------------------------------ def log_features(self,df=None): """ log the data to remove large gaps between the data after or before removing outliers df -> dataframe you want to apply log function to returns: dataset after applying log """ if 0 in df.values: df= np.log1p(df) if 0 not in df.values: df= np.log(df) df[df == -inf] = 0 return df #------------------------------------------------------------------------------------------------------------------------------ def dealing_with_outliers(self,df , type_o = "z-score"): """ this function deals and removes outliers with z-score and Inter-Quartile Range method hint : XGboost deal with it very good (SOTA machine learning model) df -> dataframe type_o -> type of the method you want to choose returns: the dataframe after editing """ if type_o == "z-score": # z-score range. z = np.abs(stats.zscore(df)) df = df[(z < 3).all(axis=1)] if type_o == "IQR" : #Inter-Quartile Range Q1 = df.quantile(0.25) Q3 = df.quantile(0.75) IQR = Q3 - Q1 df = df[~((df < (Q1 - 1.5 * IQR)) |(df > (Q3 + 1.5 * IQR))).any(axis=1)] return df #----------------------------------------------------------------------------------------------------------------------------------- def normalize(self,df,columns=[]): """ normalize columns by your choice in a dataframe df -> dataframe columns -> list of columns to normalize its values returns: the dataframe after editing """ for i in columns: df[i] = stats.boxcox(df[i]) return df #---------------------------------------------------------------------------------------------------------------------------------- def en_de(self,decode=False,typeo="utf-8"): """ encode and decode any string format decode -> if you want to encode assign as False if decode assign True typeo -> the format you want to encode to or decode from returns: the result after encoding or decoding """ #or ascii if decode == True : return bite.decode(typeo) else : return string.encode(typeo, errors="replace")
PypiClean
/Bi_Gau_distributions-0.1.tar.gz/Bi_Gau_distributions-0.1/Bi_Gau_distributions/Gaussiandistribution.py
import math import matplotlib.pyplot as plt from .Generaldistribution import Distribution class Gaussian(Distribution): """ Gaussian distribution class for calculating and visualizing a Gaussian distribution. Attributes: mean (float) representing the mean value of the distribution stdev (float) representing the standard deviation of the distribution data_list (list of floats) a list of floats extracted from the data file """ def __init__(self, mu=0, sigma=1): Distribution.__init__(self, mu, sigma) def calculate_mean(self): """Function to calculate the mean of the data set. Args: None Returns: float: mean of the data set """ avg = 1.0 * sum(self.data) / len(self.data) self.mean = avg return self.mean def calculate_stdev(self, sample=True): """Function to calculate the standard deviation of the data set. Args: sample (bool): whether the data represents a sample or population Returns: float: standard deviation of the data set """ if sample: n = len(self.data) - 1 else: n = len(self.data) mean = self.calculate_mean() sigma = 0 for d in self.data: sigma += (d - mean) ** 2 sigma = math.sqrt(sigma / n) self.stdev = sigma return self.stdev def plot_histogram(self): """Function to output a histogram of the instance variable data using matplotlib pyplot library. Args: None Returns: None """ plt.hist(self.data) plt.title('Histogram of Data') plt.xlabel('data') plt.ylabel('count') def pdf(self, x): """Probability density function calculator for the gaussian distribution. Args: x (float): point for calculating the probability density function Returns: float: probability density function output """ return (1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2) def plot_histogram_pdf(self, n_spaces = 50): """Function to plot the normalized histogram of the data and a plot of the probability density function along the same range Args: n_spaces (int): number of data points Returns: list: x values for the pdf plot list: y values for the pdf plot """ mu = self.mean sigma = self.stdev min_range = min(self.data) max_range = max(self.data) # calculates the interval between x values interval = 1.0 * (max_range - min_range) / n_spaces x = [] y = [] # calculate the x values to visualize for i in range(n_spaces): tmp = min_range + interval*i x.append(tmp) y.append(self.pdf(tmp)) # make the plots fig, axes = plt.subplots(2,sharex=True) fig.subplots_adjust(hspace=.5) axes[0].hist(self.data, density=True) axes[0].set_title('Normed Histogram of Data') axes[0].set_ylabel('Density') axes[1].plot(x, y) axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation') axes[0].set_ylabel('Density') plt.show() return x, y def __add__(self, other): """Function to add together two Gaussian distributions Args: other (Gaussian): Gaussian instance Returns: Gaussian: Gaussian distribution """ result = Gaussian() result.mean = self.mean + other.mean result.stdev = math.sqrt(self.stdev ** 2 + other.stdev ** 2) return result def __repr__(self): """Function to output the characteristics of the Gaussian instance Args: None Returns: string: characteristics of the Gaussian """ return "mean {}, standard deviation {}".format(self.mean, self.stdev)
PypiClean
/AyiinXd-0.0.8-cp311-cp311-macosx_10_9_universal2.whl/fipper/methods/messages/wait_for_callback_query.py
import asyncio from typing import Union from functools import partial from fipper import types from fipper.filters import Filter class WaitForCallbackQuery: async def wait_for_callback_query( self, chat_id: Union[int, str], filters: Filter = None, timeout: int = None ) -> "types.CallbackQuery": """Wait for callback query. Parameters: chat_id (``int`` | ``str``): Unique identifier (int) or username (str) of the target chat. filters (:obj:`Filters`): Pass one or more filters to allow only a subset of callback queries to be passed in your callback function. timeout (``int``, *optional*): Timeout in seconds. Returns: :obj:`~fipper.types.CallbackQuery`: On success, the callback query is returned. Raises: asyncio.TimeoutError: In case callback query not received within the timeout. Example: .. code-block:: python # Simple example callback_query = app.wait_for_callback_query(chat_id) # Example with filter callback_query = app.wait_for_callback_query(chat_id, filters=filters.user(user_id)) # Example with timeout callback_query = app.wait_for_callback_query(chat_id, timeout=60) """ if not isinstance(chat_id, int): chat = await self.get_chat(chat_id) chat_id = chat.id conversation_handler = self.dispatcher.conversation_handler future = self.loop.create_future() future.add_done_callback( partial( conversation_handler.delete_waiter, chat_id ) ) waiter = dict(future=future, filters=filters, update_type=types.CallbackQuery) conversation_handler.waiters[chat_id] = waiter return await asyncio.wait_for(future, timeout=timeout)
PypiClean
/Mroylib-1.3.0.tar.gz/Mroylib-1.3.0/qlib/asyn/daemon.py
import os import sys import atexit import signal from functools import partial from termcolor import cprint, colored def daemonize(pidfile, *, stdin='/dev/null', stdout='/dev/null', stderr='/dev/null'): if os.path.exists(pidfile): raise RuntimeError('Already running') # First fork (detaches from parent) try: if os.fork() > 0: raise SystemExit(0) except OSError as e: raise RuntimeError('fork #1 failed.') os.chdir('/') os.umask(0) os.setsid() # Second fork (relinquish,session leadership) try: if os.fork() > 0: raise SystemExit(0) except OSError as e: raise RuntimeError('fork #2 failed.') # Flush I/O buffers sys.stdout.flush() sys.stderr.flush() # Replace file descriptors for stdin, stdout, and stderr with open(stdin, 'rb', 0) as f: os.dup2(f.fileno(), sys.stdin.fileno()) with open(stdout, 'ab', 0) as f: os.dup2(f.fileno(), sys.stdout.fileno()) with open(stderr, 'ab', 0) as f: os.dup2(f.fileno(), sys.stderr.fileno()) # Write the PID file with open(pidfile, 'w') as f: print(os.getpid(), file=f) # Arrange to have the PID file removed on exit/signal atexit.register(lambda: os.remove(pidfile)) # Signal handler for termination(required) def sigterm_handler(signo, frame): raise SystemExit(1) signal.signal(signal.SIGTERM, sigterm_handler) def test(): import time sys.stdout.write('Daemon started with pid {}\n'.format(os.getpid())) while True: sys.stdout.write('Daemon Alive! {}\n'.format(time.ctime())) time.sleep(10) def run(func): PID_F = os.path.join("/var/run/", func.__name__ + ".pid") LOG_F = os.path.join("/var/log/", func.__name__ + ".log") ERROR_F = os.path.join("/var/log/", func.__name__ + ".error.log") try: print(func.__name__, colored("[start]", "green", attrs=['blink'])) daemonize(PID_F, stdout=LOG_F, stderr=ERROR_F) except RuntimeError as e: cprint(e, "red", file=sys.stderr) raise SystemExit(1) func() def restart(func): print("--- restart ---") stop(func) print("--- stop %s wait to starting ---" % func.__name__) run(func) print("--- start %s ---" % func.__name__) def stop(func): PID_F = os.path.join("/var/run/", func.__name__ + ".pid") LOG_F = os.path.join("/var/log/", func.__name__ + ".normal.log") for i in [PID_F]: if os.path.exists(i): with open(i) as f: os.kill(int(f.read()), signal.SIGTERM) else: print("Not running : ", i, file=sys.stderr) from time import asctime with open(LOG_F, "a+") as fp: print(asctime() ," -- stop --", file=fp) cprint("-- stop --", "blue") raise SystemExit(1)
PypiClean
/GDM-0.8.2.tar.gz/GDM-0.8.2/gdm/config.py
import os import logging import yorm from . import common from . import shell from .source import Source log = logging.getLogger(__name__) @yorm.attr(all=Source) class Sources(yorm.types.SortedList): """A list of source dependencies.""" @yorm.attr(location=yorm.types.String) @yorm.attr(sources=Sources) @yorm.attr(sources_locked=Sources) @yorm.sync("{self.root}/{self.filename}") class Config: """A dictionary of dependency configuration options.""" FILENAMES = ('gdm.yml', 'gdm.yaml', '.gdm.yml', '.gdm.yaml') def __init__(self, root, filename=FILENAMES[0], location='gdm_sources'): super().__init__() self.root = root self.filename = filename self.location = location self.sources = [] self.sources_locked = [] @property def path(self): """Get the full path to the configuration file.""" return os.path.join(self.root, self.filename) @property def location_path(self): """Get the full path to the sources location.""" return os.path.join(self.root, self.location) def install_deps(self, *names, depth=None, update=True, recurse=False, force=False, fetch=False, clean=True): """Get all sources.""" if depth == 0: log.info("Skipped directory: %s", self.location_path) return 0 if not os.path.isdir(self.location_path): shell.mkdir(self.location_path) shell.cd(self.location_path) sources = self._get_sources(use_locked=False if update else None) dirs = list(names) if names else [source.dir for source in sources] common.show() common.indent() count = 0 for source in sources: if source.dir in dirs: dirs.remove(source.dir) else: log.info("Skipped dependency: %s", source.dir) continue source.update_files(force=force, fetch=fetch, clean=clean) source.create_link(self.root, force=force) count += 1 common.show() config = load() if config: common.indent() count += config.install_deps( depth=None if depth is None else max(0, depth - 1), update=update and recurse, recurse=recurse, force=force, fetch=fetch, clean=clean, ) common.dedent() shell.cd(self.location_path, _show=False) common.dedent() if dirs: log.error("No such dependency: %s", ' '.join(dirs)) return 0 return count def lock_deps(self, *names, obey_existing=True): """Lock down the immediate dependency versions.""" shell.cd(self.location_path) common.show() common.indent() sources = self._get_sources(use_locked=obey_existing).copy() dirs = list(names) if names else [source.dir for source in sources] count = 0 for source in sources: if source.dir not in dirs: log.info("Skipped dependency: %s", source.dir) continue try: index = self.sources_locked.index(source) except ValueError: self.sources_locked.append(source.lock()) else: self.sources_locked[index] = source.lock() count += 1 common.show() shell.cd(self.location_path, _show=False) if count: yorm.update_file(self) return count def uninstall_deps(self): """Remove the sources location.""" shell.rm(self.location_path) common.show() def get_deps(self, depth=None, allow_dirty=True): """Yield the path, repository URL, and hash of each dependency.""" if os.path.exists(self.location_path): shell.cd(self.location_path) common.show() common.indent() else: return for source in self.sources: if depth == 0: log.info("Skipped dependency: %s", source.dir) continue yield source.identify(allow_dirty=allow_dirty) common.show() config = load() if config: common.indent() yield from config.get_deps( depth=None if depth is None else max(0, depth - 1), allow_dirty=allow_dirty, ) common.dedent() shell.cd(self.location_path, _show=False) common.dedent() def _get_sources(self, *, use_locked=None): if use_locked is True: if self.sources_locked: return self.sources_locked else: log.info("No locked sources, defaulting to none...") return [] elif use_locked is False: return self.sources else: if self.sources_locked: log.info("Defalting to locked sources...") return self.sources_locked else: log.info("No locked sources, using latest...") return self.sources def load(root=None): """Load the configuration for the current project.""" if root is None: root = os.getcwd() for filename in os.listdir(root): if filename.lower() in Config.FILENAMES: config = Config(root, filename) log.debug("Loaded config: %s", config.path) return config log.debug("No config found in: %s", root) return None
PypiClean
/LbelDB-0.2.0.tar.gz/LbelDB-0.2.0/README.md
[![Downloads](https://pepy.tech/badge/lbeldb)](https://pepy.tech/project/lbeldb) [![License: Unlicense](https://img.shields.io/badge/license-Unlicense-lightgreen.svg)](http://unlicense.org/) ![version](https://img.shields.io/badge/Version-0.1.1-brightgreen) # LbelDB.py >**A text based no corruption database for python. Easy to use. Made for beginners** ## Installation Use the package manager pip to install LbelDB.py ```bash pip install LbelDB ``` ## Usage ```python import ldb ldb.init() # general initialization ldb.create(("name","score")) # creates the dbs first labels or columns for i in enumerate(scores): ldb.update_ri(i[0] , 1 , i[1]) # updates the scores in each row ldb.store() # stores to the db to the `.lbel` file ldb.retrieve() # retrieves the db from memory ldb.view() # prints the db to the console ``` ## Contributing Pull requests are welcome. For major changes, please open an issue first to discuss what you would like to change. Please make sure to update tests as appropriate. ## License [Unlicense](https://choosealicense.com/licenses/unlicense/)
PypiClean
/BatchQ-0.1-1-pre-alpha.tar.gz/BatchQ-0.1-1-pre-alpha/docs/tutorial_nhup.rst
Tutorial: nohup Remote Submission ================================= In this tutorial we are going to write a submission module using nohup. Actually, we will not use nohup itself as this is a rather unstable application, but instead we will use the bash equivalent ``([command] )`` as this is a much more stable method. Basic functionality ------------------- By now we have already written the first many small classes using BatchQ and therefore, the only thing we really need to know is which parameters the class should depend on and which methods we should implement. A nohup module should take a command as input parameter as well as a working directory. It should implement the methods startjob, isrunning and clean. Subsequently, these function would need a function that enters the working directory and a function that checks whether a process is running. The implementation is straight forward: .. literalinclude:: ../batchq/contrib/examples/tutorial3_nohup1.py Full functionality ------------------
PypiClean
/NVDA-addonTemplate-0.5.2.zip/NVDA-addonTemplate-0.5.2/NVDAAddonTemplate/data/{{cookiecutter.project_slug}}/scons-local-2.5.0/SCons/Tool/ldc.py
__revision__ = "src/engine/SCons/Tool/ldc.py rel_2.5.0:3543:937e55cd78f7 2016/04/09 11:29:54 bdbaddog" import os import subprocess import SCons.Action import SCons.Builder import SCons.Defaults import SCons.Scanner.D import SCons.Tool import SCons.Tool.DCommon def generate(env): static_obj, shared_obj = SCons.Tool.createObjBuilders(env) static_obj.add_action('.d', SCons.Defaults.DAction) shared_obj.add_action('.d', SCons.Defaults.ShDAction) static_obj.add_emitter('.d', SCons.Defaults.StaticObjectEmitter) shared_obj.add_emitter('.d', SCons.Defaults.SharedObjectEmitter) env['DC'] = env.Detect('ldc2') env['DCOM'] = '$DC $_DINCFLAGS $_DVERFLAGS $_DDEBUGFLAGS $_DFLAGS -c -of=$TARGET $SOURCES' env['_DINCFLAGS'] = '${_concat(DINCPREFIX, DPATH, DINCSUFFIX, __env__, RDirs, TARGET, SOURCE)}' env['_DVERFLAGS'] = '${_concat(DVERPREFIX, DVERSIONS, DVERSUFFIX, __env__)}' env['_DDEBUGFLAGS'] = '${_concat(DDEBUGPREFIX, DDEBUG, DDEBUGSUFFIX, __env__)}' env['_DFLAGS'] = '${_concat(DFLAGPREFIX, DFLAGS, DFLAGSUFFIX, __env__)}' env['SHDC'] = '$DC' env['SHDCOM'] = '$DC $_DINCFLAGS $_DVERFLAGS $_DDEBUGFLAGS $_DFLAGS -c -relocation-model=pic -of=$TARGET $SOURCES' env['DPATH'] = ['#/'] env['DFLAGS'] = [] env['DVERSIONS'] = [] env['DDEBUG'] = [] if env['DC']: SCons.Tool.DCommon.addDPATHToEnv(env, env['DC']) env['DINCPREFIX'] = '-I=' env['DINCSUFFIX'] = '' env['DVERPREFIX'] = '-version=' env['DVERSUFFIX'] = '' env['DDEBUGPREFIX'] = '-debug=' env['DDEBUGSUFFIX'] = '' env['DFLAGPREFIX'] = '-' env['DFLAGSUFFIX'] = '' env['DFILESUFFIX'] = '.d' env['DLINK'] = '$DC' env['DLINKFLAGS'] = SCons.Util.CLVar('') env['DLINKCOM'] = '$DLINK -of=$TARGET $DLINKFLAGS $__DRPATH $SOURCES $_DLIBDIRFLAGS $_DLIBFLAGS' env['DSHLINK'] = '$DC' env['DSHLINKFLAGS'] = SCons.Util.CLVar('$DLINKFLAGS -shared -defaultlib=phobos2-ldc') # Hack for Fedora the packages of which use the wrong name :-( if os.path.exists('/usr/lib64/libphobos-ldc.so') or os.path.exists('/usr/lib32/libphobos-ldc.so') or os.path.exists('/usr/lib/libphobos-ldc.so') : env['DSHLINKFLAGS'] = SCons.Util.CLVar('$DLINKFLAGS -shared -defaultlib=phobos-ldc') env['SHDLINKCOM'] = '$DLINK -of=$TARGET $DSHLINKFLAGS $__DSHLIBVERSIONFLAGS $__DRPATH $SOURCES $_DLIBDIRFLAGS $_DLIBFLAGS' env['DLIBLINKPREFIX'] = '' if env['PLATFORM'] == 'win32' else '-L-l' env['DLIBLINKSUFFIX'] = '.lib' if env['PLATFORM'] == 'win32' else '' #env['_DLIBFLAGS'] = '${_concat(DLIBLINKPREFIX, LIBS, DLIBLINKSUFFIX, __env__, RDirs, TARGET, SOURCE)}' env['_DLIBFLAGS'] = '${_stripixes(DLIBLINKPREFIX, LIBS, DLIBLINKSUFFIX, LIBPREFIXES, LIBSUFFIXES, __env__)}' env['DLIBDIRPREFIX'] = '-L-L' env['DLIBDIRSUFFIX'] = '' env['_DLIBDIRFLAGS'] = '${_concat(DLIBDIRPREFIX, LIBPATH, DLIBDIRSUFFIX, __env__, RDirs, TARGET, SOURCE)}' env['DLIB'] = 'lib' if env['PLATFORM'] == 'win32' else 'ar cr' env['DLIBCOM'] = '$DLIB $_DLIBFLAGS {0}$TARGET $SOURCES $_DLIBFLAGS'.format('-c ' if env['PLATFORM'] == 'win32' else '') #env['_DLIBFLAGS'] = '${_concat(DLIBFLAGPREFIX, DLIBFLAGS, DLIBFLAGSUFFIX, __env__)}' env['DLIBFLAGPREFIX'] = '-' env['DLIBFLAGSUFFIX'] = '' # __RPATH is set to $_RPATH in the platform specification if that # platform supports it. env['DRPATHPREFIX'] = '-L-rpath=' env['DRPATHSUFFIX'] = '' env['_DRPATH'] = '${_concat(DRPATHPREFIX, RPATH, DRPATHSUFFIX, __env__)}' # Support for versioned libraries env['_DSHLIBVERSIONFLAGS'] = '$DSHLIBVERSIONFLAGS -L-soname=$_DSHLIBSONAME' env['_DSHLIBSONAME'] = '${DShLibSonameGenerator(__env__,TARGET)}' # NOTE: this is a quick hack, the soname will only work if there is # c/c++ linker loaded which provides callback for the ShLibSonameGenerator env['DShLibSonameGenerator'] = SCons.Tool.ShLibSonameGenerator # NOTE: this is only for further reference, currently $DSHLIBVERSION does # not work, the user must use $SHLIBVERSION env['DSHLIBVERSION'] = '$SHLIBVERSION' env['DSHLIBVERSIONFLAGS'] = [] SCons.Tool.createStaticLibBuilder(env) def exists(env): return env.Detect('ldc2') # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4:
PypiClean
/Axelrod-4.13.0.tar.gz/Axelrod-4.13.0/README.rst
.. image:: https://img.shields.io/pypi/v/Axelrod.svg :target: https://pypi.python.org/pypi/Axelrod .. image:: https://zenodo.org/badge/19509/Axelrod-Python/Axelrod.svg :target: https://zenodo.org/badge/latestdoi/19509/Axelrod-Python/Axelrod .. image:: https://github.com/Axelrod-Python/Axelrod/workflows/CI/badge.svg :target: https://github.com/Axelrod-Python/Axelrod/actions |Join the chat at https://gitter.im/Axelrod-Python/Axelrod| Axelrod ======= Goals ----- A Python library with the following principles and goals: 1. Enabling the reproduction of previous Iterated Prisoner's Dilemma research as easily as possible. 2. Creating the de-facto tool for future Iterated Prisoner's Dilemma research. 3. Providing as simple a means as possible for anyone to define and contribute new and original Iterated Prisoner's Dilemma strategies. 4. Emphasizing readability along with an open and welcoming community that is accommodating for developers and researchers of a variety of skill levels. Features -------- With Axelrod you: - have access `to over 200 strategies <http://axelrod.readthedocs.io/en/stable/reference/all_strategies.html>`_, including original and classics like Tit For Tat and Win Stay Lose Shift. These are extendable through parametrization and a collection of strategy transformers. - can create `head to head matches <http://axelrod.readthedocs.io/en/stable/tutorials/getting_started/match.html>`_ between pairs of strategies. - can create `tournaments <http://axelrod.readthedocs.io/en/stable/tutorials/getting_started/tournament.html>`_ over a number of strategies. - can study population dynamics through `Moran processes <http://axelrod.readthedocs.io/en/stable/tutorials/getting_started/moran.html>`_ and an `infinite population model <http://axelrod.readthedocs.io/en/stable/tutorials/further_topics/ecological_variant.html>`_. - can analyse detailed `results of tournaments <http://axelrod.readthedocs.io/en/stable/tutorials/getting_started/summarising_tournaments.html>`_ and matches. - can `visualise results <http://axelrod.readthedocs.io/en/stable/tutorials/getting_started/visualising_results.html>`_ of tournaments. .. image:: http://axelrod.readthedocs.io/en/stable/_images/demo_strategies_boxplot.svg :height: 300 px :align: center - can reproduce a number of contemporary research topics such as `fingerprinting <http://axelrod.readthedocs.io/en/stable/tutorials/further_topics/fingerprinting.html>`_ of strategies and `morality metrics <http://axelrod.readthedocs.io/en/stable/tutorials/further_topics/morality_metrics.html>`_. .. image:: https://github.com/Axelrod-Python/Axelrod-fingerprint/raw/master/assets/Tricky_Defector.png :height: 300 px :align: center The library has 100% test coverage and is extensively documented. See the documentation for details and examples of all the features: http://axelrod.readthedocs.org/ `An open reproducible framework for the study of the iterated prisoner's dilemma <http://openresearchsoftware.metajnl.com/article/10.5334/jors.125/>`_: a peer reviewed paper introducing the library (22 authors). Installation ------------ The library is tested on Python versions 3.8, 3.9, and 3.10. The simplest way to install is:: $ pip install axelrod To install from source:: $ git clone https://github.com/Axelrod-Python/Axelrod.git $ cd Axelrod $ python setup.py install Quick Start ----------- The following runs a basic tournament:: >>> import axelrod as axl >>> players = [s() for s in axl.demo_strategies] # Create players >>> tournament = axl.Tournament(players, seed=1) # Create a tournament >>> results = tournament.play() # Play the tournament >>> results.ranked_names ['Defector', 'Grudger', 'Tit For Tat', 'Cooperator', 'Random: 0.5'] Examples -------- - https://github.com/Axelrod-Python/tournament is a tournament pitting all the strategies in the repository against each other. These results can be easily viewed at http://axelrod-tournament.readthedocs.org. - https://github.com/Axelrod-Python/Axelrod-notebooks contains a set of example Jupyter notebooks. - https://github.com/Axelrod-Python/Axelrod-fingerprint contains fingerprints (data and plots) of all strategies in the library. Contributing ------------ All contributions are welcome! You can find helpful instructions about contributing in the documentation: https://axelrod.readthedocs.io/en/latest/how-to/contributing/index.html Publications ------------ You can find a list of publications that make use of or cite the library on the `citations <https://github.com/Axelrod-Python/Axelrod/blob/master/citations.md>`_ page. Contributors ------------ The library has had many awesome contributions from many `great contributors <https://github.com/Axelrod-Python/Axelrod/graphs/contributors>`_. The Core developers of the project are: - `drvinceknight <https://github.com/drvinceknight>`_ - `gaffney2010 <https://github.com/gaffney2010>`_ - `marcharper <https://github.com/marcharper>`_ - `meatballs <https://github.com/meatballs>`_ - `nikoleta-v3 <https://github.com/Nikoleta-v3>`_ .. |Join the chat at https://gitter.im/Axelrod-Python/Axelrod| image:: https://badges.gitter.im/Join%20Chat.svg :target: https://gitter.im/Axelrod-Python/Axelrod?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge
PypiClean
/Fortpy-1.7.7.tar.gz/Fortpy-1.7.7/fortpy/templates/ftypes.py
import weakref from numpy import require, array, asanyarray, dtype as _dtype from numpy.ctypeslib import ndpointer, as_array as farray from ctypes import (c_int, c_double, POINTER, c_bool, c_float, c_short, c_long, byref, c_void_p, addressof) c_int_p = POINTER(c_int) c_double_p = POINTER(c_double) c_float_p = POINTER(c_float) c_bool_p = POINTER(c_bool) c_short_p = POINTER(c_short) c_long_p = POINTER(c_long) libs = {} """The dict of shared libraries loaded so far by this module. Indexed by the lower-case path to the *.so.""" compilers = {} """Dict of compilers found for each shared library. Indexed by lower-case path to the shared library. Value is either 'gfortran' or 'ifort'. """ fresults = {} """Dict of FtypeResult instances indexed by the 'module.executable' key of the wrapper subroutine that generated the output. Used to manage shared memory being overwritten from repeated calls to the same executables. """ def as_array(pointer, shape): """Returns a contiguous (c-ordered) array for the specified pointer returned from a fortran allocatable array. :arg shape: a tuple of the integer size of each dimension. """ return farray(pointer, shape).T def static_symbol(module, method, lib, bindc=False): """Returns the symbol for the specified *fortran* module and subroutine/function that has been compiled into a shared library. :arg lib: the full path to the shared library *.so file. """ from os import path from numpy.ctypeslib import load_library libpath = path.abspath(lib) libkey = libpath.lower() if libkey not in compilers: compilers[libkey] = detect_compiler(libpath) if compilers[libkey] == False: raise ValueError("Couldn't auto-detect the compiler for {}".format(libpath)) compiler = compilers[libkey] if libkey not in libs: libs[libkey] = load_library(libpath, "") if not libs[libkey]: raise ValueError("Couldn't auto-load the shared library with ctypes.") slib = libs[libkey] identifier = "{}.{}".format(module, method) if bindc: symbol = method elif compiler == "gfortran": symbol = "__" + identifier.lower().replace(".", "_MOD_") else: #We assume the only other option is ifort. symbol = identifier.lower().replace(".", "_mp_") + "_" if hasattr(slib, symbol): return getattr(slib, symbol) else: return None def detect_compiler(libpath): """Determines the compiler used to compile the specified shared library by using the system utilities. :arg libpath: the full path to the shared library *.so file. """ from os import waitpid, path from subprocess import Popen, PIPE command = "nm {0}".format(path.abspath(libpath)) child = Popen(command, shell=True, executable="/bin/bash", stdout=PIPE) # Need to do this so that we are sure the process is done before moving on waitpid(child.pid, 0) contents = child.stdout.readlines() i = 0 found = False while i < len(contents) and found == False: if "_MOD_" in contents[i]: found = "gfortran" elif "_mp_" in contents[i]: found = "ifort" i += 1 return found class Ftype(object): """Represents an output data set from python for interacting with a fortran shared library. """ def __init__(self, pointer, indices, libpath): """ :arg pointer: the c-pointer to the memory address. :arg indices: the integer size of each dimension in the pointer array. """ self.pointer = pointer self.indices = indices self.libpath = libpath self.deallocated = False """Specifies whether this c-pointer has already deallocated the Fortran memory that it is referencing.""" def clean(self): """Deallocates the fortran-managed memory that this ctype references. """ if not self.deallocated: #Release/deallocate the pointer in fortran. method = self._deallocator() if method is not None: dealloc = static_symbol("ftypes_dealloc", method, self.libpath, True) if dealloc is None: return arrtype = ndpointer(dtype=int, ndim=1, shape=(len(self.indices),), flags="F") dealloc.argtypes = [c_void_p, c_int_p, arrtype] nindices = require(array([i.value for i in self.indices]), int, "F") dealloc(byref(self.pointer), c_int(len(self.indices)), nindices) self.deallocated = True def _deallocator(self): """Returns the name of the subroutine in ftypes_dealloc.f90 that can deallocate the array for this Ftype's pointer. :arg ctype: the string c-type of the variable. """ lookup = { "c_bool": "logical", "c_double": "double", "c_double_complex": "complex", "c_char": "char", "c_int": "int", "c_float": "float", "c_short": "short", "c_long": "long" } ctype = type(self.pointer).__name__.replace("LP_", "").lower() if ctype in lookup: return "dealloc_{0}_{1:d}d".format(lookup[ctype], len(self.indices)) else: return None class FtypesResult(object): """Represents the result from executing a fortran subroutine or function using the ctypes interface. """ def __init__(self, module, name, utype): """ :arg name: the name of the subroutine/function in the *original* code. :arg utype: the underlying type of the executable. Either 'subroutine' or 'function'. Affects the behavior of calling an instance. :arg result: a dictionary of the parameter values that were of intent "out" or "inout" and their values on exit. """ self.identifier = "{}.{}".format(module, name).lower() self.utype = utype self.result = {} self._finalizers = {} """A dictionary of Ftype instances with c-pointer information for finalizing the fortran arrays. """ def on_die(kref): for key in list(self._finalizers.keys()): self._finalizers[key].clean() self._del_ref = weakref.ref(self, on_die) if self.identifier in fresults: #Cleanup the previous result obtained from this method (this involves #reallocating and copying the values to a new array). Change the active #result (i.e. with active pointers to fortran-managed memory) to #be this new one. fresults[self.identifier].cleanup() fresults[self.identifier] = self def cleanup(self): """Cleans up this result so that all its pointers reference memory controlled *outside* of the shared library loaded with ctypes. """ #First we *copy* the arrays that we currently have pointers to. This is not #the optimal solution; however, because of limitations in ctypes, we don't #know anything better at the moment. for key in self.result: self.result[key] = self.result[key].copy() #Now deallocate the pointers managed by Fortran in the shared library so that #any subsequent calls to the executable that generated this result creates #new instances in memory. for key in list(self._finalizers.keys()): self._finalizers[key].clean() def add(self, varname, result, pointer=None): """Adds the specified python-typed result and an optional Ftype pointer to use when cleaning up this object. :arg result: a python-typed representation of the result. :arg pointer: an instance of Ftype with pointer information for deallocating the c-pointer. """ self.result[varname] = result setattr(self, varname, result) if pointer is not None: self._finalizers[varname] = pointer def __call__(self, key=None): """Returns the value of the result from calling the subroutine/function for the given key. """ if self.utype == "function": return self.result["{}_o"] elif key is not None: return self.result[key] elif len(self.result) == 1: return list(self.result.values())[0] else: raise ValueError("Can't call result without a key for subroutines.")
PypiClean
/Becky-0.7.tar.gz/Becky-0.7/becky_cli/run.py
import argparse import sys from becky_cli.becky.backups.backup_manager import BackupManager def add_add_params(parser): subparsers = parser.add_subparsers(help='What to add', dest='action_add', required=True) location_parser = subparsers.add_parser('location') location_parser.add_argument('--path', required=True) param_parser = subparsers.add_parser('param') param_parser.add_argument('--type', help="provider or scanner", required=True) param_parser.add_argument('--key', required=True) param_parser.add_argument('--value', required=True) def add_create_params(parser): parser.add_argument('--provider', required=True) parser.add_argument('--provider_param', action='append', nargs='*') parser.add_argument('--scanner', required=True) parser.add_argument('--scanner_param', action='append', nargs='*') def add_cron_params(parser): parser.add_argument('--name', required=True) parser.add_argument('--enable', dest='cron_enabled', action='store_true') parser.add_argument('--disable', dest='cron_enabled', action='store_false') parser.add_argument('--schedule') parser.set_defaults(cron_enabled=True) def add_show_params(parser): parser.add_argument('--type', dest='show_type', help="What data to show. info / saves / diffs", required=True) def add_delete_params(parser): subparsers = parser.add_subparsers(help="What to delete", dest='action_delete', required=True) diffs_parser = subparsers.add_parser('diffs') saves_parser = subparsers.add_parser('saves') def add_files_params(parser): parser.add_argument('--path', help="Shows any backed up files at the given path.", required=True) parser.add_argument('--timestamp', help="Specify the timestamp to use.") def add_restore_params(parser): parser.add_argument('--path', help="The file/folder (recursive) to be restored.", required=True) parser.add_argument('--restore_path', help="Location of restore folder.", required=True) parser.add_argument('--timestamp', help="Specify the timestamp to use.", required=True) def main(): parser = argparse.ArgumentParser(description='CLI backupper') subparsers = parser.add_subparsers(help='Action to take', dest='action', required=True) create_parser = subparsers.add_parser('create') add_create_params(create_parser) add_parser = subparsers.add_parser('add') add_add_params(add_parser) show_parser = subparsers.add_parser('show') add_show_params(show_parser) run_parser = subparsers.add_parser('run') delete_parser = subparsers.add_parser('delete') add_delete_params(delete_parser) files_parser = subparsers.add_parser('files') add_files_params(files_parser) restore_parser = subparsers.add_parser('restore') add_restore_params(restore_parser) list_parser = subparsers.add_parser('list') cron_parser = subparsers.add_parser('cron') add_cron_params(cron_parser) parser.add_argument('--name') args = parser.parse_args() vargs = vars(args) backup_manager = BackupManager() if args.action == 'create': backup_manager.create(vargs) elif args.action == 'add': if args.action_add == 'location': backup_manager.add_backup_location(vargs) elif args.action_add == 'param': backup_manager.add_parameter(vargs) elif args.action == 'show': backup_manager.show_backup(vargs) elif args.action == 'run': backup_manager.run_backup(vargs) elif args.action == 'delete': backup_manager.delete(vargs) elif args.action == 'files': backup_manager.show_files(vargs) elif args.action == 'restore': backup_manager.restore_files(vargs) elif args.action == 'list': backup_manager.list_backups(vargs) elif args.action == 'cron': if args.cron_enabled: if not args.schedule: parser.error('cron + enabled requires --schedule.') backup_manager.set_cron(vargs) if __name__ == "__main__": main()
PypiClean
/AWAKE_ANALYSIS_TOOLS-0.0.2-py3-none-any.whl/japc_support/AwkCmra.py
import numpy as np import pyjapc import os import sys os.environ['AAT'] = '/user/awakeop/AWAKE_ANALYSIS_TOOLS/' #os.environ['AAT'] = '/afs/cern.ch/user/s/sgess/AWAKE_ANALYSIS_TOOLS/' sys.path.append(os.environ['AAT']+'analyses/') import frame_analysis as fa ''' Class for AWAKE Camera Properties ''' class AwakeCamera(): def __init__(self,device,name,system,mode,japc): self.device = device self.name = name self.system = system self.mode = mode self.japc = japc self.run_ana = True self.fit_gauss = False self.median_filter = False self.fillCamHandles() self.initCam() def fillCamHandles(self): if self.system == 'PXI': self.settings_prop = 'PublishedSettings' self.exposure_field = 'exposureTime' self.delay_field = 'delayTime' self.height_str = 'height' self.width_str = 'width' self.pixel_str = 'pixelSize' self.x_ax_str = '' self.y_ax_str = '' self.timestamp_str = 'imageTimeStamp' if self.mode == 'EXT': self.image_prop = 'ExtractionImage' self.image_str = 'imageRawData' self.timingSelector = 'SPS.USER.AWAKE1' elif self.mode == 'LASER': self.image_prop = 'CameraImage' self.image_str = 'image' self.timingSelector = 'SPS.USER.ALL' else: print('GTFOH') return self.acq_string = self.device + '/' + self.image_prop self.sys_string = self.device + '/' + self.settings_prop elif self.system == 'BTV': self.image_prop = 'Image' self.settings_prop = '' self.image_str = 'imageSet' self.height_str = 'nbPtsInSet1' self.width_str = 'nbPtsInSet2' self.pixel_str = '' self.x_ax_str = 'imagePositionSet1' self.y_ax_str = 'imagePositionSet2' self.timestamp_str = '' if self.mode == 'EXT': self.timingSelector = 'SPS.USER.AWAKE1' elif self.mode == 'LASER': self.device = self.device + '.LASER' self.timingSelector = 'SPS.USER.ALL' else: print('GTFOH') return self.acq_string = self.device + '/' + self.image_prop else: print('GTFOH') return def initCam(self): if self.system == 'PXI': self.initPXI() self.getSystem() elif self.system == 'BTV': self.initBTV() else: print('GTFOH') return if self.run_ana: self.analyze() def initPXI(self): camData = self.async_get(self.acq_string) if self.mode == 'EXT': self.px_sz = camData[self.pixel_str] elif self.mode == 'LASER': self.px_sz = 5.0*camData[self.pixel_str] else: print('GTFOH') return self.image = camData[self.image_str] self.width = camData[self.width_str] self.height = camData[self.height_str] x_ax = self.px_sz*np.arange(self.width) self.x_ax = x_ax - np.mean(x_ax) y_ax = self.px_sz*np.arange(self.height) self.y_ax = y_ax - np.mean(y_ax) self.roi = [self.x_ax[0],self.x_ax[-1],self.y_ax[0],self.y_ax[-1]] def initBTV(self): camData = self.async_get(self.acq_string) im_vec = camData[self.image_str] self.width = camData[self.width_str] self.height = camData[self.height_str] self.image = np.reshape(im_vec,(self.width,self.height)) self.x_ax = camData[self.x_ax_str] self.y_ax = camData[self.y_ax_str] self.px_sz = self.x_ax[1] - self.x_ax[0] self.roi = [self.x_ax[0],self.x_ax[-1],self.y_ax[0],self.y_ax[-1]] def getSystem(self): sysData = self.async_get(self.sys_string) self.exp_time = sysData[self.exposure_field] self.del_time = sysData[self.delay_field] def updateImage(self): if self.system == 'PXI': self.image = self.async_get(self.acq_string+'#'+self.image_str) elif self.system == 'BTV': im_vec = self.async_get(self.acq_string+'#'+self.image_str) self.image = np.reshape(im_vec,(self.width,self.height)) else: print('GTFOH') return if self.run_ana: self.analyze() def analyze(self): self.frame_ana = fa.FrameAna(self.image,self.x_ax,self.y_ax,self.roi) self.frame_ana.fit_gauss = self.fit_gauss self.frame_ana.median_filter = self.median_filter self.frame_ana.analyze_frame() def async_get(self,param): return self.japc.getParam(param,timingSelectorOverride=self.timingSelector) def subCallback(self,name,image): if self.system == 'PXI': self.image = image elif self.system == 'BTV': self.image = np.reshape(image,(self.width,self.height)) else: print('GTFOH') return if self.run_ana: self.analyze() def start_sub(self): self.japc.subscribeParam(self.acq_string+'#'+self.image_str,self.subCallback) self.sub_state = True self.japc.startSubscriptions() def start_ExtSub(self,extFunc): self.japc.subscribeParam(self.acq_string+'#'+self.image_str,extFunc) self.sub_state = True self.japc.startSubscriptions() def stop_sub(self): #print('out dis bitch') self.japc.stopSubscriptions() self.japc.clearSubscriptions() self.sub_state = False
PypiClean
/CsuPMTD-1.0.27.tar.gz/CsuPMTD-1.0.27/PMTD/maskrcnn_benchmark/modeling/roi_heads/keypoint_head/loss.py
import torch from torch.nn import functional as F from PMTD.maskrcnn_benchmark.modeling.matcher import Matcher from PMTD.maskrcnn_benchmark.modeling.balanced_positive_negative_sampler import ( BalancedPositiveNegativeSampler, ) from PMTD.maskrcnn_benchmark.structures.boxlist_ops import boxlist_iou from PMTD.maskrcnn_benchmark.modeling.utils import cat from PMTD.maskrcnn_benchmark.structures.boxlist_ops import cat_boxlist from PMTD.maskrcnn_benchmark.structures.keypoint import keypoints_to_heat_map def project_keypoints_to_heatmap(keypoints, proposals, discretization_size): proposals = proposals.convert("xyxy") return keypoints_to_heat_map( keypoints.keypoints, proposals.bbox, discretization_size ) def cat_boxlist_with_keypoints(boxlists): assert all(boxlist.has_field("keypoints") for boxlist in boxlists) kp = [boxlist.get_field("keypoints").keypoints for boxlist in boxlists] kp = cat(kp, 0) fields = boxlists[0].get_fields() fields = [field for field in fields if field != "keypoints"] boxlists = [boxlist.copy_with_fields(fields) for boxlist in boxlists] boxlists = cat_boxlist(boxlists) boxlists.add_field("keypoints", kp) return boxlists def _within_box(points, boxes): """Validate which keypoints are contained inside a given box. points: NxKx2 boxes: Nx4 output: NxK """ x_within = (points[..., 0] >= boxes[:, 0, None]) & ( points[..., 0] <= boxes[:, 2, None] ) y_within = (points[..., 1] >= boxes[:, 1, None]) & ( points[..., 1] <= boxes[:, 3, None] ) return x_within & y_within class KeypointRCNNLossComputation(object): def __init__(self, proposal_matcher, fg_bg_sampler, discretization_size): """ Arguments: proposal_matcher (Matcher) fg_bg_sampler (BalancedPositiveNegativeSampler) discretization_size (int) """ self.proposal_matcher = proposal_matcher self.fg_bg_sampler = fg_bg_sampler self.discretization_size = discretization_size def match_targets_to_proposals(self, proposal, target): match_quality_matrix = boxlist_iou(target, proposal) matched_idxs = self.proposal_matcher(match_quality_matrix) # Keypoint RCNN needs "labels" and "keypoints "fields for creating the targets target = target.copy_with_fields(["labels", "keypoints"]) # get the targets corresponding GT for each proposal # NB: need to clamp the indices because we can have a single # GT in the image, and matched_idxs can be -2, which goes # out of bounds matched_targets = target[matched_idxs.clamp(min=0)] matched_targets.add_field("matched_idxs", matched_idxs) return matched_targets def prepare_targets(self, proposals, targets): labels = [] keypoints = [] for proposals_per_image, targets_per_image in zip(proposals, targets): matched_targets = self.match_targets_to_proposals( proposals_per_image, targets_per_image ) matched_idxs = matched_targets.get_field("matched_idxs") labels_per_image = matched_targets.get_field("labels") labels_per_image = labels_per_image.to(dtype=torch.int64) # this can probably be removed, but is left here for clarity # and completeness # TODO check if this is the right one, as BELOW_THRESHOLD neg_inds = matched_idxs == Matcher.BELOW_LOW_THRESHOLD labels_per_image[neg_inds] = 0 keypoints_per_image = matched_targets.get_field("keypoints") within_box = _within_box( keypoints_per_image.keypoints, matched_targets.bbox ) vis_kp = keypoints_per_image.keypoints[..., 2] > 0 is_visible = (within_box & vis_kp).sum(1) > 0 labels_per_image[~is_visible] = -1 labels.append(labels_per_image) keypoints.append(keypoints_per_image) return labels, keypoints def subsample(self, proposals, targets): """ This method performs the positive/negative sampling, and return the sampled proposals. Note: this function keeps a state. Arguments: proposals (list[BoxList]) targets (list[BoxList]) """ labels, keypoints = self.prepare_targets(proposals, targets) sampled_pos_inds, sampled_neg_inds = self.fg_bg_sampler(labels) proposals = list(proposals) # add corresponding label and regression_targets information to the bounding boxes for labels_per_image, keypoints_per_image, proposals_per_image in zip( labels, keypoints, proposals ): proposals_per_image.add_field("labels", labels_per_image) proposals_per_image.add_field("keypoints", keypoints_per_image) # distributed sampled proposals, that were obtained on all feature maps # concatenated via the fg_bg_sampler, into individual feature map levels for img_idx, (pos_inds_img, neg_inds_img) in enumerate( zip(sampled_pos_inds, sampled_neg_inds) ): img_sampled_inds = torch.nonzero(pos_inds_img).squeeze(1) proposals_per_image = proposals[img_idx][img_sampled_inds] proposals[img_idx] = proposals_per_image self._proposals = proposals return proposals def __call__(self, proposals, keypoint_logits): heatmaps = [] valid = [] for proposals_per_image in proposals: kp = proposals_per_image.get_field("keypoints") heatmaps_per_image, valid_per_image = project_keypoints_to_heatmap( kp, proposals_per_image, self.discretization_size ) heatmaps.append(heatmaps_per_image.view(-1)) valid.append(valid_per_image.view(-1)) keypoint_targets = cat(heatmaps, dim=0) valid = cat(valid, dim=0).to(dtype=torch.uint8) valid = torch.nonzero(valid).squeeze(1) # torch.mean (in binary_cross_entropy_with_logits) does'nt # accept empty tensors, so handle it sepaartely if keypoint_targets.numel() == 0 or len(valid) == 0: return keypoint_logits.sum() * 0 N, K, H, W = keypoint_logits.shape keypoint_logits = keypoint_logits.view(N * K, H * W) keypoint_loss = F.cross_entropy(keypoint_logits[valid], keypoint_targets[valid]) return keypoint_loss def make_roi_keypoint_loss_evaluator(cfg): matcher = Matcher( cfg.MODEL.ROI_HEADS.FG_IOU_THRESHOLD, cfg.MODEL.ROI_HEADS.BG_IOU_THRESHOLD, allow_low_quality_matches=False, ) fg_bg_sampler = BalancedPositiveNegativeSampler( cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE, cfg.MODEL.ROI_HEADS.POSITIVE_FRACTION ) resolution = cfg.MODEL.ROI_KEYPOINT_HEAD.RESOLUTION loss_evaluator = KeypointRCNNLossComputation(matcher, fg_bg_sampler, resolution) return loss_evaluator
PypiClean
/DACBench-0.2.0.tar.gz/DACBench-0.2.0/dacbench/envs/fast_downward.py
import os import socket import subprocess import time import typing from copy import deepcopy from enum import Enum from os import remove from os.path import join as joinpath import numpy as np from dacbench import AbstractEnv class StateType(Enum): """Class to define numbers for state types""" RAW = 1 DIFF = 2 ABSDIFF = 3 NORMAL = 4 NORMDIFF = 5 NORMABSDIFF = 6 class FastDownwardEnv(AbstractEnv): """ Environment to control Solver Heuristics of FastDownward """ def __init__(self, config): """ Initialize FD Env Parameters ------- config : objdict Environment configuration """ super(FastDownwardEnv, self).__init__(config) self._heuristic_state_features = [ "Average Value", # 'Dead Ends Reliable', "Max Value", "Min Value", "Open List Entries", "Varianz", ] self._general_state_features = [ # 'evaluated_states', 'evaluations', 'expanded_states', # 'generated_ops', # 'generated_states', 'num_variables', # 'registered_states', 'reopened_states', # "cg_num_eff_to_eff", "cg_num_eff_to_pre", "cg_num_pre_to_eff" ] total_state_features = len(config.heuristics) * len( self._heuristic_state_features ) self._use_gsi = config.use_general_state_info if config.use_general_state_info: total_state_features += len(self._general_state_features) self.__skip_transform = [False for _ in range(total_state_features)] if config.use_general_state_info: self.__skip_transform[4] = True # skip num_variables transform self.__skip_transform[7] = True self.__skip_transform[8] = True self.__skip_transform[9] = True self.heuristics = config.heuristics self.host = config.host self._port = config.get("port", 0) if config["parallel"]: self.port = 0 self.fd_seed = config.fd_seed self.control_interval = config.control_interval if config.fd_logs is None: self.logpath_out = os.devnull self.logpath_err = os.devnull else: self.logpath_out = os.path.join(config.fd_logs, "fdout.txt") self.logpath_err = os.path.join(config.fd_logs, "fderr.txt") self.fd_path = config.fd_path self.fd = None if "domain_file" in config.keys(): self.domain_file = config["domain_file"] self.socket = None self.conn = None self._prev_state = None self.num_steps = config.num_steps self.__state_type = StateType(config.state_type) self.__norm_vals = [] self._config_dir = config.config_dir self._port_file_id = config.port_file_id self._transformation_func = None # create state transformation function with inputs (current state, previous state, normalization values) if self.__state_type == StateType.DIFF: self._transformation_func = lambda x, y, z, skip: x - y if not skip else x elif self.__state_type == StateType.ABSDIFF: self._transformation_func = ( lambda x, y, z, skip: abs(x - y) if not skip else x ) elif self.__state_type == StateType.NORMAL: self._transformation_func = ( lambda x, y, z, skip: FastDownwardEnv._save_div(x, z) if not skip else x ) elif self.__state_type == StateType.NORMDIFF: self._transformation_func = ( lambda x, y, z, skip: FastDownwardEnv._save_div(x, z) - FastDownwardEnv._save_div(y, z) if not skip else x ) elif self.__state_type == StateType.NORMABSDIFF: self._transformation_func = ( lambda x, y, z, skip: abs( FastDownwardEnv._save_div(x, z) - FastDownwardEnv._save_div(y, z) ) if not skip else x ) self.max_rand_steps = config.max_rand_steps self.__start_time = None self.done = True # Starts as true as the expected behavior is that before normal resets an episode was done. @property def port(self): if self._port == 0: if self.socket is None: raise ValueError( "Automatic port selection enabled. Port not know at the moment" ) _, port = self.socket.getsockname() else: port = self._port return port @port.setter def port(self, port): self._port = port @property def argstring(self): # if a socket is bound to 0 it will automatically choose a free port return f"rl_eager(rl([{''.join(f'{h},' for h in self.heuristics)[:-1]}],random_seed={self.fd_seed}),rl_control_interval={self.control_interval},rl_client_port={self.port})" @staticmethod def _save_div(a, b): """ Helper method for safe division Parameters ---------- a : list or np.array values to be divided b : list or np.array values to divide by Returns ------- np.array Division result """ return np.divide(a, b, out=np.zeros_like(a), where=b != 0) def send_msg(self, msg: bytes): """ Send message and prepend the message size Based on comment from SO see [1] [1] https://stackoverflow.com/a/17668009 Parameters ---------- msg : bytes The message as byte """ # Prefix each message with a 4-byte length (network byte order) msg = str.encode("{:>04d}".format(len(msg))) + msg self.conn.sendall(msg) def recv_msg(self): """ Recieve a whole message. The message has to be prepended with its total size Based on comment from SO see [1] Returns ---------- bytes The message as byte """ # Read message length and unpack it into an integer raw_msglen = self.recvall(4) if not raw_msglen: return None msglen = int(raw_msglen.decode()) # Read the message data return self.recvall(msglen) def recvall(self, n: int): """ Given we know the size we want to recieve, we can recieve that amount of bytes. Based on comment from SO see [1] Parameters --------- n: int Number of bytes to expect in the data Returns ---------- bytes The message as byte """ # Helper function to recv n bytes or return None if EOF is hit data = b"" while len(data) < n: packet = self.conn.recv(n - len(data)) if not packet: return None data += packet return data def _process_data(self): """ Split received json into state reward and done Returns ---------- np.array, float, bool state, reward, done """ msg = self.recv_msg().decode() # print("----------------------------") # print(msg) # print("=>") msg = msg.replace("-inf", "0") msg = msg.replace("inf", "0") # print(msg) data = eval(msg) r = data["reward"] done = data["done"] del data["reward"] del data["done"] state = [] if self._use_gsi: for feature in self._general_state_features: state.append(data[feature]) for heuristic_id in range(len(self.heuristics)): # process heuristic data for feature in self._heuristic_state_features: state.append(data["%d" % heuristic_id][feature]) if self._prev_state is None: self.__norm_vals = deepcopy(state) self._prev_state = deepcopy(state) if ( self.__state_type != StateType.RAW ): # Transform state to DIFF state or normalize tmp_state = state state = list( map( self._transformation_func, state, self._prev_state, self.__norm_vals, self.__skip_transform, ) ) self._prev_state = tmp_state return np.array(state), r, done def step(self, action: typing.Union[int, typing.List[int]]): """ Environment step Parameters --------- action: typing.Union[int, List[int]] Parameter(s) to apply Returns ---------- np.array, float, bool, bool, dict state, reward, terminated, truncated, info """ self.done = super(FastDownwardEnv, self).step_() if not np.issubdtype( type(action), np.integer ): # check for core int and any numpy-int try: action = action[0] except IndexError as e: print(type(action)) raise e if self.num_steps: msg = ",".join([str(action), str(self.num_steps)]) else: msg = str(action) self.send_msg(str.encode(msg)) s, r, terminated = self._process_data() r = max(self.reward_range[0], min(self.reward_range[1], r)) info = {} if terminated: self.done = True self.kill_connection() if self.c_step > self.n_steps: info["needs_reset"] = True self.send_msg(str.encode("END")) self.kill_connection() return s, r, terminated, self.done, info def reset(self, seed=None, options={}): """ Reset environment Returns ---------- np.array State after reset dict Meta-info """ super(FastDownwardEnv, self).reset_(seed) self._prev_state = None self.__start_time = time.time() if not self.done: # This means we interrupt FD before a plan was found # Inform FD about imminent shutdown of the connection self.send_msg(str.encode("END")) self.done = False if self.conn: self.conn.shutdown(2) self.conn.close() self.conn = None if not self.socket: self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self.socket.settimeout(10) self.socket.bind((self.host, self.port)) if self.fd: self.fd.terminate() if self.instance.endswith(".pddl"): command = [ "python3", f"{self.fd_path}", self.domain_file, self.instance, "--search", self.argstring, ] else: command = [ "python3", f"{self.fd_path}", self.instance, "--search", self.argstring, ] with open(self.logpath_out, "a+") as fout, open(self.logpath_err, "a+") as ferr: err_output = subprocess.STDOUT if self.logpath_err == "/dev/null" else ferr self.fd = subprocess.Popen(command, stdout=fout, stderr=err_output) # write down port such that FD can potentially read where to connect to if self._port_file_id: fp = joinpath(self._config_dir, "port_{:d}.txt".format(self._port_file_id)) else: fp = joinpath(self._config_dir, f"port_{self.port}.txt") with open(fp, "w") as portfh: portfh.write(str(self.port)) self.socket.listen() try: self.conn, address = self.socket.accept() except socket.timeout: raise OSError( "Fast downward subprocess not reachable (time out). " "Possible solutions:\n" " (1) Did you run './dacbench/envs/rl-plan/fast-downward/build.py' " "in order to build the fd backend?\n" " (2) Try to fix this by setting OPENBLAS_NUM_THREADS=1. " "For more details see https://github.com/automl/DACBench/issues/96" ) s, _, _ = self._process_data() if self.max_rand_steps > 1: for _ in range(self.np_random.randint(1, self.max_rand_steps + 1)): s, _, _, _, _ = self.step(self.action_space.sample()) if self.conn is None: return self.reset() else: s, _, _, _, _ = self.step(0) # hard coded to zero as initial step remove( fp ) # remove the port file such that there is no chance of loading the old port return s, {} def kill_connection(self): """Kill the connection""" if self.conn: self.conn.shutdown(2) self.conn.close() self.conn = None if self.socket: self.socket.shutdown(2) self.socket.close() self.socket = None def close(self): """ Close Env Returns ------- bool Closing confirmation """ if self.socket is None: return True fp = joinpath(self._config_dir, f"port_{self.port}.txt") if os.path.exists(fp): remove(fp) self.kill_connection() return True def render(self, mode: str = "human") -> None: """ Required by gym.Env but not implemented Parameters ------- mode : str Rendering mode """ pass
PypiClean
/GLAM-0.3.7-py3-none-any.whl/glam/matching/fuzzy/_predict.py
from rapidfuzz import fuzz, process import numpy as np import pandas as pd from tqdm import tqdm import gc def lookup_addresses(addresses,linz,speed_performance_balance = 0): weights = { 'unit_value_score' : 1, 'address_number_score' : 80, 'address_number_suffix_score' : 1, 'address_number_high_score' : 1, 'full_road_name_ascii_score' : 100, 'suburb_town_city_score': 50, 'postcode_score' : 80, } return_cols = ['unit_value','address_number','address_number_suffix','address_number_high','full_road_name_ascii','suburb_locality_ascii','town_city_ascii','postcode','shape_X','shape_Y','match_score'] matched_addresses = [] # matched_addresses = Parallel(n_jobs=4,backend = 'multiprocessing')(delayed(matching.lookup_address_rapidfuzz)(x,linz,weights,return_cols,speed_performance_balance) for x in tqdm(addresses)) for i,x in enumerate(tqdm(addresses, smoothing = 0.05, mininterval=1, colour = 'green')): matched_addresses.append(lookup_address_rapidfuzz(x,linz,weights,return_cols,confidence=speed_performance_balance)) if i % 1000 == 0: gc.collect() gc.collect() return matched_addresses def lookup_address_rapidfuzz(addy,linz,weights,return_cols,confidence): """ Takes a parsed address and finds the best match in the linz dataset Inputs: addy: search address in parsed dictionary format linz: pandas df of linz addresses Outputs: dictionary of address components with latitute and longitude """ if addy is None: return None search_area = [] score_cols = [] if 'postcode' in addy: search_area = reduce_search_space(linz, 'postcode_int', int(addy['postcode']), search_area = search_area, matcher = None) if 'first_number' in addy: search_area = reduce_search_space(linz, 'address_number_int', int(addy['first_number']), search_area=search_area, matcher = None) # if exact matching failed, try again with fuzzy matching on postcode if len(search_area) == 0: search_area = [] if 'first_number' in addy: search_area = reduce_search_space(linz, 'address_number_int', int(addy['first_number']), search_area=search_area, matcher = None) if 'postcode' in addy: search_area = reduce_search_space(linz, 'postcode', addy['postcode'], search_area = search_area, matcher = fuzz.ratio, confidence=0) score_cols.append('postcode_score') if 'street_name' in addy: search_area = reduce_search_space(linz, 'full_road_name_ascii',addy['street_name'], search_area=search_area, matcher = fuzz.ratio, confidence=confidence) score_cols.append('full_road_name_ascii_score') if 'suburb_town_city' in addy: search_area = reduce_search_space(linz, 'suburb_town_city',addy['suburb_town_city'], search_area=search_area, matcher = fuzz.partial_ratio, confidence = confidence) score_cols.append('suburb_town_city_score') # unit search_area = reduce_search_space(linz, 'unit_value',addy.get('unit',''), search_area=search_area, matcher = fuzz.ratio, confidence = 0) score_cols.append('unit_value_score') # first number suffix search_area = reduce_search_space(linz, 'address_number_suffix',addy.get('first_number_suffix',''), search_area=search_area, matcher = fuzz.ratio, confidence = 0) score_cols.append('address_number_suffix_score') # second number # search_area = reduce_search_space(linz, 'address_number_high',addy.get('second_number',''), search_area=search_area, matcher = fuzz.ratio, confidence = 0) # score_cols.append('address_number_high_score') if len(search_area) == 0: return None return conclude_search(search_area, weights, score_cols, return_cols) def conclude_search(search_area, weights, score_cols, return_cols): search_area['match_score'] = np.sum([weights[col]*search_area[col] for col in score_cols],axis=0)/sum([weights[col] for col in score_cols]) mappings = search_area.loc[search_area['match_score'].idxmax()][return_cols].to_dict() return {k: v for k, v in mappings.items() if v != ''} def reduce_search_space(linz, search_col, search_term, search_area, matcher=fuzz.ratio, confidence = 20): """ helper function for address lookup to iteratively reduce search space in LINZ dataset """ if matcher is None: # exact match if len(search_area) == 0: search_area = linz[linz[search_col].values == search_term].copy() else: search_area = search_area[search_area[search_col].values == search_term] else: # fuzzy match if len(search_area) == 0: search_area = linz.copy() res = process.extract(search_term,search_area[search_col].str.upper(), scorer = matcher, score_cutoff=confidence, workers = -1, limit=len(search_area)) search_area = search_area.loc[[x[2] for x in res]] search_area[search_col + '_score'] = [x[1] for x in res] return search_area def get_matches_df(sparse_matrix, A, B, top=100): non_zeros = sparse_matrix.nonzero() sparserows = non_zeros[0] sparsecols = non_zeros[1] if top: nr_matches = top else: nr_matches = sparsecols.size left_side = np.empty([nr_matches], dtype=object) right_side = np.empty([nr_matches], dtype=object) similairity = np.zeros(nr_matches) for index in range(0, nr_matches): left_side[index] = A[sparserows[index]] right_side[index] = B[sparsecols[index]] similairity[index] = sparse_matrix.data[index] return pd.DataFrame({'left_side': left_side, 'right_side': right_side, 'similairity': similairity}) def join_address(addy): parts = '' if 'unit' in addy: parts += addy['unit'] + '/' parts += addy.get('first_number','') parts += addy.get('first_number_suffix','') parts += ' ' + addy.get('street_name','') if 'suburb_town_city' in addy: parts += ', ' + addy.get('suburb_town_city','') if 'postcode' in addy: parts += ', ' + addy.get('postcode','') return parts
PypiClean
/HammerTime-http-0.9.1.tar.gz/HammerTime-http-0.9.1/hammertime/rules/redirects.py
import re from copy import copy from uuid import uuid4 from difflib import SequenceMatcher from urllib.parse import urljoin, urlparse from hammertime.http import Entry from hammertime.ruleset import RejectRequest valid_redirects = (301, 302, 303, 307, 308) class FollowRedirects: def __init__(self, *, max_redirect=15): self.max_redirect = max_redirect self.engine = None def set_engine(self, engine): self.engine = engine def set_child_heuristics(self, heuristics): self.child_heuristics = heuristics async def on_request_successful(self, entry): status_code = entry.response.code if status_code in valid_redirects: entry.result.redirects.append(copy(entry)) await self._follow_redirects(entry) async def _follow_redirects(self, entry): status_code = entry.response.code redirect_count = 0 while status_code in valid_redirects: if redirect_count > self.max_redirect: raise RejectRedirection("Max redirect limit reached") try: url = entry.response.headers["location"] last_url = entry.result.redirects[-1].request.url _entry = await self._perform_request(url, base_url=last_url) entry.result.redirects.append(_entry) entry.response = _entry.response status_code = entry.response.code redirect_count += 1 except KeyError: raise RejectRedirection("Missing location field in header of redirect") async def _perform_request(self, url, base_url): entry = Entry.create(urljoin(base_url, url)) self.engine.stats.requested += 1 entry = await self.engine.perform(entry, self.child_heuristics) self.engine.stats.completed += 1 return entry class RejectCatchAllRedirect: def __init__(self): self.engine = None self.redirects = {} def set_engine(self, engine): self.engine = engine def set_kb(self, kb): kb.redirects = self.redirects def load_kb(self, kb): self.redirects = kb.redirects def set_child_heuristics(self, heuristics): self.child_heuristics = heuristics async def after_headers(self, entry): if entry.response.code in valid_redirects and "location" in entry.response.headers: url = entry.request.url path = self._get_url_with_base_path(url) random_url = self._build_random_url(path) redirect_for_request = self._to_absolute_url(url, entry.response.headers["location"]) default_redirect_for_path = await self._get_default_redirect_for_path(path, random_url) # Some catch-all redirects include the current path in the requested location. # Classic examples: # - Adding a slash # - error.php?src=/origin norm_request = self._normalize(redirect_for_request, url) norm_default = self._normalize(default_redirect_for_path, random_url) if norm_request == norm_default: raise RejectRedirection("Catch-all redirect rejected: {} redirected to {}".format( url, default_redirect_for_path)) async def _get_default_redirect_for_path(self, path, random_url): if path in self.redirects: return self.redirects[path] else: _entry = await self.engine.perform_high_priority(Entry.create(random_url), self.child_heuristics) if _entry.response.code in valid_redirects: try: default_redirect = self._to_absolute_url(path, _entry.response.headers["location"]) self._add_redirect_to_kb(path, default_redirect) return default_redirect except KeyError: pass return None def _get_url_with_base_path(self, complete_url): path = urlparse(complete_url).path path_parts = path.split("/")[:-1] path = "/".join(path_parts) + "/" return urljoin(complete_url, path) def _build_random_url(self, base_path): random_path = base_path + str(uuid4()) return random_path def _normalize(self, redirect_url, initial_url): if redirect_url: path = urlparse(initial_url).path return redirect_url.replace(path, '_REQUESTED_') else: return None def _add_redirect_to_kb(self, requested_path, redirect_url): if requested_path not in self.redirects: self.redirects[requested_path] = redirect_url def _to_absolute_url(self, base_url, url): if urlparse(url).netloc == "": return urljoin(base_url, url) return url class RedirectLimiter: def __init__(self, sequence_matching=True): self.sequence_matching = sequence_matching self.digits = re.compile(r'\d+') self.not_found = re.compile(r'not[-_]*found', re.I) async def after_headers(self, entry): if entry.response.code not in valid_redirects: return location = entry.response.headers.get("location") if not location: return if "404" in self.digits.findall(location): raise RejectRedirection("Redirection to error page: %s" % location) if self.not_found.search(location): raise RejectRedirection("Redirection to error page: %s" % location) if self.sequence_matching and self._sequences_differ(entry.request.url, location): raise RejectRedirection("Redirection to unrelated path: %s" % location) def _sequences_differ(self, request, redirect): a = urlparse(request).path b = urlparse(redirect).path matcher = SequenceMatcher(isjunk=None, a=a, b=b, autojunk=False) return round(matcher.ratio(), 1) <= 0.8 class RejectRedirection(RejectRequest): pass
PypiClean
/Decision_tree_zrq-0.0.2.tar.gz/Decision_tree_zrq-0.0.2/neupy_core_model_api_src/chiMerge.py
from time import ctime import pymysql import pandas as pd import numpy as np import math import matplotlib as mpl import threading import multiprocessing from sklearn import preprocessing from sklearn.linear_model import LogisticRegression # from jilincode.chiMerge import chiMerge #from zrq import chiMerge from time import ctime from pandas.core.frame import DataFrame def split(Instances): ''''' Split the 4 attibutes, collect the data of the ith attributs, i=0,1,2,3 Return a list like [['0.2', 'Iris-setosa'], ['0.2', 'Iris-setosa'],...]''' log=[] for i in range(len(Instances)): log.append([Instances.iloc[i][0], Instances.iloc[i][1]]) return(log) def count(log): '''''Count the number of the same record Return a list like [['4.3', 'Iris-setosa', 1], ['4.4', 'Iris-setosa', 3],...]''' log_cnt=[] log.sort(key=lambda log:log[0]) i=0 while(i<len(log)): cnt=log.count(log[i])#count the number of the same record record=log[i][:] record.append(cnt) # the return value of append is None log_cnt.append(record) i+=cnt#count the next diferent item return(log_cnt) def build(log_cnt): '''''Build a structure (a list of truples) that ChiMerge algorithm works properly on it return a list like ([0:[6,0]],...) 含义:变量值为0是,非违规6人,违规0人 ''' log_dic={} for record in log_cnt: if record[0] not in log_dic.keys(): log_dic[record[0]]=[0,0] if record[1]==0: log_dic[record[0]][0]=record[2] elif record[1]==1: log_dic[record[0]][1]=record[2] else: raise TypeError("Data Exception") log_truple=sorted(log_dic.items()) return(log_truple) def collect(Instances): ''''' collect data for discretization ''' log = split(Instances) log_cnt = count(log) log_tuple = build(log_cnt) return (log_tuple) def combine(a,b): ''''' a=('4.4', [3, 1, 0]), b=('4.5', [1, 0, 2]) combine(a,b)=('4.4', [4, 1, 2]) ''' c=a[:] # c[0]=a[0] for i in range(len(a[1])): c[1][i]+=b[1][i] return(c) def chi2(A): ''''' Compute the Chi-Square value ''' '''计算两个区间的卡方值''' m=len(A); k=len(A[0]) R=[] '''第i个区间的实例数''' for i in range(m): sum=0 for j in range(k): sum+=A[i][j] R.append(sum) C=[] '''第j个类的实例数''' for j in range(k): sum=0 for i in range(m): sum+=A[i][j] C.append(sum) N=0 '''总的实例数''' for ele in C: N+=ele res=0 for i in range(m): for j in range(k): Eij=R[i]*C[j]/N if Eij!=0: res=res+(A[i][j]-Eij)**2/Eij return res def ChiMerge(log_tuple,max_interval): ''''' ChiMerge algorithm ''' ''''' Return split points ''' num_interval=len(log_tuple) while(num_interval>max_interval): num_pair=num_interval-1 chi_values=[] ''' 计算相邻区间的卡方值''' for i in range(num_pair): arr=[log_tuple[i][1],log_tuple[i+1][1]] chi_values.append(chi2(arr)) min_chi=min(chi_values) # get the minimum chi value for i in range(num_pair-1,-1,-1): # treat from the last one if chi_values[i]==min_chi: log_tuple[i]=combine(log_tuple[i],log_tuple[i+1]) # combine the two adjacent intervals log_tuple[i+1]='Merged' while('Merged' in log_tuple): # remove the merged record log_tuple.remove('Merged') num_interval=len(log_tuple) split_points = [record[0] for record in log_tuple] return(split_points) def chiMerge(df, max_group): # print('Strat:' + ctime()) log_touple = collect(df) print(log_touple) split_point = ChiMerge(log_touple,max_group) # print('End:' + ctime()) return split_point
PypiClean
/COMPAS-1.17.5.tar.gz/COMPAS-1.17.5/src/compas/artists/volmeshartist.py
from __future__ import print_function from __future__ import absolute_import from __future__ import division from abc import abstractmethod from compas.colors import Color from .artist import Artist from .colordict import ColorDict class VolMeshArtist(Artist): """Artist for drawing volmesh data structures. Parameters ---------- volmesh : :class:`~compas.datastructures.VolMesh` A COMPAS volmesh. Attributes ---------- volmesh : :class:`~compas.datastructures.VolMesh` The COMPAS volmesh associated with the artist. vertices : list[int] The list of vertices to draw. Default is a list of all vertices of the volmesh. edges : list[tuple[int, int]] The list of edges to draw. Default is a list of all edges of the volmesh. faces : list[int] The list of faces to draw. Default is a list of all faces of the volmesh. cells : list[int] The list of cells to draw. Default is a list of all cells of the volmesh. vertex_xyz : dict[int, list[float]] The view coordinates of the vertices. By default, the actual vertex coordinates are used. vertex_color : dict[int, :class:`~compas.colors.Color`] Mapping between vertices and colors. Missing vertices get the default vertex color: :attr:`default_vertexcolor`. edge_color : dict[tuple[int, int], :class:`~compas.colors.Color`] Mapping between edges and colors. Missing edges get the default edge color: :attr:`default_edgecolor`. face_color : dict[int, :class:`~compas.colors.Color`] Mapping between faces and colors. Missing faces get the default face color: :attr:`default_facecolor`. cell_color : dict[int, :class:`~compas.colors.Color`] Mapping between cells and colors. Missing cells get the default cell color: :attr:`default_facecolor`. vertex_text : dict[int, str] Mapping between vertices and text labels. edge_text : dict[tuple[int, int], str] Mapping between edges and text labels. face_text : dict[int, str] Mapping between faces and text lables. cell_text : dict[int, str] Mapping between cells and text lables. Class Attributes ---------------- default_vertexcolor : :class:`~compas.colors.Color` The default color of the vertices of the mesh that don't have a specified color. default_edgecolor : :class:`~compas.colors.Color` The default color of the edges of the mesh that don't have a specified color. default_facecolor : :class:`~compas.colors.Color` The default color of the faces of the mesh that don't have a specified color. default_cellcolor : :class:`~compas.colors.Color` The default color of the cells of the mesh that don't have a specified color. """ color = Color.from_hex("#0092D2").lightened(50) default_vertexcolor = Color.from_hex("#0092D2") default_edgecolor = Color.from_hex("#0092D2") default_facecolor = Color.from_hex("#0092D2").lightened(50) default_cellcolor = Color.from_hex("#0092D2").lightened(50) vertex_color = ColorDict() edge_color = ColorDict() face_color = ColorDict() cell_color = ColorDict() def __init__( self, volmesh, vertices=None, edges=None, faces=None, cells=None, vertexcolor=None, edgecolor=None, facecolor=None, cellcolor=None, **kwargs ): super(VolMeshArtist, self).__init__() self._default_vertexcolor = None self._default_edgecolor = None self._default_facecolor = None self._default_cellcolor = None self._volmesh = None self._vertices = None self._edges = None self._faces = None self._cells = None self._vertex_xyz = None self._vertex_color = None self._edge_color = None self._face_color = None self._cell_color = None self._vertex_text = None self._edge_text = None self._face_text = None self._cell_text = None self._vertexcollection = None self._edgecollection = None self._facecollection = None self._cellcollection = None self._vertexnormalcollection = None self._facenormalcollection = None self._vertexlabelcollection = None self._edgelabelcollection = None self._facelabelcollection = None self._celllabelcollection = None self.volmesh = volmesh self.vertices = vertices self.edges = edges self.faces = faces self.cells = cells self.vertex_color = vertexcolor self.edge_color = edgecolor self.face_color = facecolor self.cell_color = cellcolor @property def volmesh(self): return self._volmesh @volmesh.setter def volmesh(self, volmesh): self._volmesh = volmesh self._vertex_xyz = None @property def vertices(self): if self._vertices is None: self._vertices = list(self.volmesh.vertices()) return self._vertices @vertices.setter def vertices(self, vertices): self._vertices = vertices @property def edges(self): if self._edges is None: self._edges = list(self.volmesh.edges()) return self._edges @edges.setter def edges(self, edges): self._edges = edges @property def faces(self): if self._faces is None: self._faces = list(self.volmesh.faces()) return self._faces @faces.setter def faces(self, faces): self._faces = faces @property def cells(self): if self._cells is None: self._cells = list(self.volmesh.cells()) return self._cells @cells.setter def cells(self, cells): self._cells = cells @property def vertex_xyz(self): if not self._vertex_xyz: self._vertex_xyz = { vertex: self.volmesh.vertex_attributes(vertex, "xyz") for vertex in self.volmesh.vertices() } return self._vertex_xyz @vertex_xyz.setter def vertex_xyz(self, vertex_xyz): self._vertex_xyz = vertex_xyz @property def vertex_text(self): if not self._vertex_text: self._vertex_text = {vertex: str(vertex) for vertex in self.volmesh.vertices()} return self._vertex_text @vertex_text.setter def vertex_text(self, text): if text == "key": self._vertex_text = {vertex: str(vertex) for vertex in self.volmesh.vertices()} elif text == "index": self._vertex_text = {vertex: str(index) for index, vertex in enumerate(self.volmesh.vertices())} elif isinstance(text, dict): self._vertex_text = text @property def edge_text(self): if not self._edge_text: self._edge_text = {edge: "{}-{}".format(*edge) for edge in self.volmesh.edges()} return self._edge_text @edge_text.setter def edge_text(self, text): if text == "key": self._edge_text = {edge: "{}-{}".format(*edge) for edge in self.volmesh.edges()} elif text == "index": self._edge_text = {edge: str(index) for index, edge in enumerate(self.volmesh.edges())} elif isinstance(text, dict): self._edge_text = text @property def face_text(self): if not self._face_text: self._face_text = {face: str(face) for face in self.volmesh.faces()} return self._face_text @face_text.setter def face_text(self, text): if text == "key": self._face_text = {face: str(face) for face in self.volmesh.faces()} elif text == "index": self._face_text = {face: str(index) for index, face in enumerate(self.volmesh.faces())} elif isinstance(text, dict): self._face_text = text @property def cell_text(self): if not self._cell_text: self._cell_text = {cell: str(cell) for cell in self.volmesh.cells()} return self._cell_text @cell_text.setter def cell_text(self, text): if text == "key": self._cell_text = {cell: str(cell) for cell in self.volmesh.cells()} elif text == "index": self._cell_text = {cell: str(index) for index, cell in enumerate(self.volmesh.cells())} elif isinstance(text, dict): self._cell_text = text @abstractmethod def draw_vertices(self, vertices=None, color=None, text=None): """Draw the vertices of the mesh. Parameters ---------- vertices : list[int], optional The vertices to include in the drawing. Default is all vertices. color : tuple[float, float, float] | :class:`~compas.colors.Color` | dict[int, tuple[float, float, float] | :class:`~compas.colors.Color`], optional The color of the vertices, as either a single color to be applied to all vertices, or a color dict, mapping specific vertices to specific colors. text : dict[int, str], optional The text labels for the vertices as a text dict, mapping specific vertices to specific text labels. Returns ------- list The identifiers of the objects representing the vertices in the visualization context. """ raise NotImplementedError @abstractmethod def draw_edges(self, edges=None, color=None, text=None): """Draw the edges of the mesh. Parameters ---------- edges : list[tuple[int, int]], optional The edges to include in the drawing. Default is all edges. color : tuple[float, float, float] | :class:`~compas.colors.Color` | dict[tuple[int, int], tuple[float, float, float] | :class:`~compas.colors.Color`], optional The color of the edges, as either a single color to be applied to all edges, or a color dict, mapping specific edges to specific colors. text : dict[tuple[int, int], str], optional The text labels for the edges as a text dict, mapping specific edges to specific text labels. Returns ------- list The identifiers of the objects representing the edges in the visualization context. """ raise NotImplementedError @abstractmethod def draw_faces(self, faces=None, color=None, text=None): """Draw the faces of the mesh. Parameters ---------- faces : list[int], optional The faces to include in the drawing. Default is all faces. color : tuple[float, float, float] | :class:`~compas.colors.Color` | dict[int, tuple[float, float, float] | :class:`~compas.colors.Color`], optional The color of the faces, as either a single color to be applied to all faces, or a color dict, mapping specific faces to specific colors. text : dict[int, str], optional The text labels for the faces as a text dict, mapping specific faces to specific text labels. Returns ------- list The identifiers of the objects representing the faces in the visualization context. """ raise NotImplementedError @abstractmethod def draw_cells(self, cells=None, color=None, text=None): """Draw the cells of the mesh. Parameters ---------- cells : list[int], optional The cells to include in the drawing. Default is all cells. color : tuple[float, float, float] | :class:`~compas.colors.Color` | dict[int, tuple[float, float, float] | :class:`~compas.colors.Color`], optional The color of the cells, as either a single color to be applied to all cells, or a color dict, mapping specific cells to specific colors. text : dict[int, str], optional The text labels for the cells as a text dict, mapping specific cells to specific text labels. Returns ------- list The identifiers of the objects representing the cells in the visualization context. """ raise NotImplementedError @abstractmethod def clear_vertices(self): """Clear the vertices of the mesh. Returns ------- None """ raise NotImplementedError @abstractmethod def clear_edges(self): """Clear the edges of the mesh. Returns ------- None """ raise NotImplementedError @abstractmethod def clear_faces(self): """Clear the faces of the mesh. Returns ------- None """ raise NotImplementedError @abstractmethod def clear_cells(self): """Clear the cells of the mesh. Returns ------- None """ raise NotImplementedError
PypiClean
/GailBot-0.2a0-py3-none-any.whl/gailbot/core/engines/exception.py
class ConnectionError(Exception): def __str__(self) -> str: return "ERROR 404: STT Connection Error" class TranscriptionError(Exception): def __init__(self, error: str = None) -> None: super().__init__(error) self.error = error def __str__(self) -> str: return f"ERROR 500: Transcription error: {self.error}" class APIKeyError(Exception): def __init__(self, *args: object) -> None: super().__init__(*args) def __str__(self) -> str: return "ERROR 508: API key error" class AudioFileError(Exception): def __init__(self, *args: object) -> None: super().__init__(*args) def __str__(self) -> str: return "ERROR 510: Not a valid audio file" class ModelCreateError(Exception): def __init__(self, *args: object) -> None: super().__init__(*args) def __str__(self) -> str: return "ERROR 511: Model creation error" class WatsonMethodExecutionError(Exception): def __init__(self, *args: object) -> None: super().__init__(*args) def __str__(self) -> str: return "ERROR 512: Watson method execution error" class OutPutError(Exception): def __init__(self, *args: object) -> None: super().__init__(*args) def __str__(self) -> str: return "ERROR 520: Error writing output" class GetUttResultError(Exception): def __init__(self, *args: object) -> None: super().__init__(*args) def __str__(self) -> str: return "ERROR 521: Failed to get utterance result" from dataclasses import dataclass @dataclass class ERROR: CONNECTION_ERROR = "ERROR 404: No internet connection" GOOGLE_TRANSCRIPTION_FAILED = "ERROR 501: Google STT transcription failed" WATSON_TRANSCRIPTION_FAILED = "ERROR 502: Watson STT transcription failed" WHISPER_TRANSCRIPTION_FAILED = "ERROR 503: Whisper STT transcription failed" AUDIO_COMPRESSION_FAILED = ( "ERROR 505: Failed to compress large audio file to opus format" ) CHILD_PROCESS_STOPPED = "ERROR 531: Child process stopped" CHILD_PROCESS_ERROR = "ERROR 532: Child process error" CHILD_PROCESS_NOT_FOUND = "ERROR 533: Child process not found"
PypiClean
/NetTraRec-0.1.12-py3-none-any.whl/NetRecorder/NetTraRecServer.py
import os from argparse import ArgumentParser, RawTextHelpFormatter from NetRecorder.gear_for_nr import tell_the_datetime def record_starter_server(): dp = ' 这是一个查看或者返回服务器流量信息的工具,以服务的方式启动,默认使用推 redis 的方式,单位为 bytes/m\n' \ ' https://github.com/ga1008/net_tracfic_recorder' da = "" parser = ArgumentParser(description=dp, formatter_class=RawTextHelpFormatter, add_help=True) parser.add_argument("-n", "--net_devices", type=str, dest="net_devices", default='eth0,enp2s0', help=f'{da}指定网络设备,默认 eth0。多个值使用英文逗号 "," 隔开\n') parser.add_argument("-u", "--unit", type=str, dest="unit", default='bytes', help=f'{da}指定流量单位,auto/bytes/kb/mb/gb,默认bytes\n') parser.add_argument("-rf", "--refresh_rate", type=str, dest="refresh_rate", default='m', help=f'{da}统计频率,h/m/s (时/分/秒),默认m\n') parser.add_argument("-pr", "--push_redis", type=str, dest="push_redis", default='y', nargs='?', help=f'{da}y/n。将结果推入指定的redis,默认n。如果设置了此参数,则接下来需要提供目标redis的信息\n') parser.add_argument("-kp", "--key_params", type=str, dest="key_params", default='local', help=f'{da}关键参数提供方式,input/local/now,\n' f'input是随后输入,\n' f'local是在本地redis的"NetRec_key_params"内寻找,\n' 'now是后面直接跟上>>>>参数字典,例如:now>>>>{"host": "127.0.0.1", "port": ..., "db": ...},' 'now方式仅限测试\n' f'默认input\n') args = parser.parse_args() unit = args.unit refresh_rate = args.refresh_rate.lower() push_to_redis = args.push_redis key_params_mode = args.key_params try: log_dir = "~/" file_name = "NetTraRec.py" args_str = f"-u {unit} -rf {refresh_rate} -pr {push_to_redis} -kp {key_params_mode} -ps" run = f"~/miniconda/bin/python {os.path.join(os.path.abspath(os.path.dirname(__file__)), file_name)} {args_str}" out_log_path = os.path.join(log_dir, f"{tell_the_datetime(compact_mode=True)}.out") run = f"nohup {run} > {out_log_path} &" res = os.popen(run).read() except KeyboardInterrupt: print('\n 退出 \n') except Exception as E: print(f"Error: {E}")
PypiClean
/BibReview-0.2.4.tar.bz2/BibReview-0.2.4/command_line.py
# BibReview # Copyright (C) 2012 Jean-Baptiste LAMY (jibalamy at free . fr) # BibReview is developped by Jean-Baptiste LAMY, at LIM&BIO, # UFR SMBH, Université Paris 13, Sorbonne Paris Cité. # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import sys, os, os.path, inspect, shlex import bibreview, bibreview.globdef as globdef, bibreview.model as model, bibreview.parse_bibreview as parse_bibreview, bibreview.parse_pubmed, bibreview.parse_bibtex, bibreview.parse_reflib, bibreview.export_bibtex BASES = [] def format_arg(arg): if arg.startswith(u"--"): return arg return '"%s"' % arg.replace('"', '\\"') def run(command): global BASES if isinstance(command, list): argv = command else: argv = shlex.split(command) i = 0 gui = 1 while i < len(argv): arg = argv[i] if arg == "--version": gui = 0 print "BibReview version %s" % globdef.VERSION elif arg == "--help": gui = 0 print _("__help__") elif arg == "--save-as": gui = 0 i += 1 s = BASES.pop().__xml__().encode("utf8") open(argv[i], "w").write(s) elif arg == "--save": gui = 0 s = BASES[-1].__xml__().encode("utf8") open(BASES.pop().filename or None, "w").write(s) elif arg in model.COMMAND_LINE_FUNCS: arg_names = inspect.getargspec(model.COMMAND_LINE_FUNCS[arg])[0] args = argv[i+1 : i+len(arg_names)+1] for j in range(len(arg_names)): if arg_names[j].startswith("base"): if args[j] == "CURRENT": args[j] = BASES[-1] else: args[j] = parse_bibreview.parse_bibreview_file(args[j]) r = model.COMMAND_LINE_FUNCS[arg](*args) if isinstance(r, model.Base): r.comment = _(u"Command line:\n") + argv[0] + u" " + u" ".join(map(format_arg, argv[1:])) BASES = [r] elif r is None: gui = 0 i += len(arg_names) else: BASES.append(parse_bibreview.parse_bibreview_file(arg)) i += 1 if gui: import gtk, bibreview.gui if not BASES: BASES.append(model.Base()) for base in BASES: bibreview.gui.MainWindow(base).show_all() gtk.main() if BASES: return BASES[-1]
PypiClean
/Bugzilla-ETL-0.3.13353.zip/Bugzilla-ETL-0.3.13353/bzETL/util/files.py
import codecs from datetime import datetime import io import os import shutil from .struct import listwrap, nvl from .cnv import CNV class File(object): def __init__(self, filename, buffering=2 ** 14): if filename == None: from .logs import Log Log.error("File must be given a filename") #USE UNIX STANDARD self._filename = "/".join(filename.split(os.sep)) self.buffering = buffering @property def filename(self): return self._filename.replace("/", os.sep) @property def abspath(self): return os.path.abspath(self._filename) def backup_name(self, timestamp=None): """ RETURN A FILENAME THAT CAN SERVE AS A BACKUP FOR THIS FILE """ suffix = CNV.datetime2string(nvl(timestamp, datetime.now()), "%Y%m%d_%H%M%S") parts = self._filename.split(".") if len(parts) == 1: output = self._filename + "." + suffix elif len(parts) > 1 and parts[-2][-1] == "/": output = self._filename + "." + suffix else: parts.insert(-1, suffix) output = ".".join(parts) return output def read(self, encoding="utf-8"): with codecs.open(self._filename, "r", encoding=encoding) as file: return file.read() def read_ascii(self): if not self.parent.exists: self.parent.create() with open(self._filename, "r") as file: return file.read() def write_ascii(self, content): if not self.parent.exists: self.parent.create() with open(self._filename, "w") as file: file.write(content) def write(self, data): if not self.parent.exists: self.parent.create() with open(self._filename, "w") as file: for d in listwrap(data): file.write(d) def __iter__(self): #NOT SURE HOW TO MAXIMIZE FILE READ SPEED #http://stackoverflow.com/questions/8009882/how-to-read-large-file-line-by-line-in-python #http://effbot.org/zone/wide-finder.htm def output(): with io.open(self._filename, "rb") as f: for line in f: yield line.decode("utf-8") return output() def append(self, content): if not self.parent.exists: self.parent.create() with open(self._filename, "a") as output_file: output_file.write(content) def add(self, content): return self.append(content) def extend(self, content): if not self.parent.exists: self.parent.create() with open(self._filename, "a") as output_file: for c in content: output_file.write(c) def delete(self): try: if os.path.isdir(self._filename): shutil.rmtree(self._filename) elif os.path.isfile(self._filename): os.remove(self._filename) return self except Exception, e: if e.strerror=="The system cannot find the path specified": return from .logs import Log Log.error("Could not remove file", e) def backup(self): names=self._filename.split("/")[-1].split(".") if len(names)==1: backup=File(self._filename+".backup "+datetime.utcnow().strftime("%Y%m%d %H%i%s")) def create(self): try: os.makedirs(self._filename) except Exception, e: from .logs import Log Log.error("Could not make directory {{dir_name}}", {"dir_name":self._filename}, e) @property def parent(self): return File("/".join(self._filename.split("/")[:-1])) @property def exists(self): if self._filename in ["", "."]: return True try: return os.path.exists(self._filename) except Exception, e: return False
PypiClean
/Electrum-CHI-3.3.8.tar.gz/Electrum-CHI-3.3.8/packages/setuptools/_vendor/packaging/requirements.py
from __future__ import absolute_import, division, print_function import string import re from setuptools.extern.pyparsing import stringStart, stringEnd, originalTextFor, ParseException from setuptools.extern.pyparsing import ZeroOrMore, Word, Optional, Regex, Combine from setuptools.extern.pyparsing import Literal as L # noqa from setuptools.extern.six.moves.urllib import parse as urlparse from .markers import MARKER_EXPR, Marker from .specifiers import LegacySpecifier, Specifier, SpecifierSet class InvalidRequirement(ValueError): """ An invalid requirement was found, users should refer to PEP 508. """ ALPHANUM = Word(string.ascii_letters + string.digits) LBRACKET = L("[").suppress() RBRACKET = L("]").suppress() LPAREN = L("(").suppress() RPAREN = L(")").suppress() COMMA = L(",").suppress() SEMICOLON = L(";").suppress() AT = L("@").suppress() PUNCTUATION = Word("-_.") IDENTIFIER_END = ALPHANUM | (ZeroOrMore(PUNCTUATION) + ALPHANUM) IDENTIFIER = Combine(ALPHANUM + ZeroOrMore(IDENTIFIER_END)) NAME = IDENTIFIER("name") EXTRA = IDENTIFIER URI = Regex(r'[^ ]+')("url") URL = (AT + URI) EXTRAS_LIST = EXTRA + ZeroOrMore(COMMA + EXTRA) EXTRAS = (LBRACKET + Optional(EXTRAS_LIST) + RBRACKET)("extras") VERSION_PEP440 = Regex(Specifier._regex_str, re.VERBOSE | re.IGNORECASE) VERSION_LEGACY = Regex(LegacySpecifier._regex_str, re.VERBOSE | re.IGNORECASE) VERSION_ONE = VERSION_PEP440 ^ VERSION_LEGACY VERSION_MANY = Combine(VERSION_ONE + ZeroOrMore(COMMA + VERSION_ONE), joinString=",", adjacent=False)("_raw_spec") _VERSION_SPEC = Optional(((LPAREN + VERSION_MANY + RPAREN) | VERSION_MANY)) _VERSION_SPEC.setParseAction(lambda s, l, t: t._raw_spec or '') VERSION_SPEC = originalTextFor(_VERSION_SPEC)("specifier") VERSION_SPEC.setParseAction(lambda s, l, t: t[1]) MARKER_EXPR = originalTextFor(MARKER_EXPR())("marker") MARKER_EXPR.setParseAction( lambda s, l, t: Marker(s[t._original_start:t._original_end]) ) MARKER_SEPERATOR = SEMICOLON MARKER = MARKER_SEPERATOR + MARKER_EXPR VERSION_AND_MARKER = VERSION_SPEC + Optional(MARKER) URL_AND_MARKER = URL + Optional(MARKER) NAMED_REQUIREMENT = \ NAME + Optional(EXTRAS) + (URL_AND_MARKER | VERSION_AND_MARKER) REQUIREMENT = stringStart + NAMED_REQUIREMENT + stringEnd class Requirement(object): """Parse a requirement. Parse a given requirement string into its parts, such as name, specifier, URL, and extras. Raises InvalidRequirement on a badly-formed requirement string. """ # TODO: Can we test whether something is contained within a requirement? # If so how do we do that? Do we need to test against the _name_ of # the thing as well as the version? What about the markers? # TODO: Can we normalize the name and extra name? def __init__(self, requirement_string): try: req = REQUIREMENT.parseString(requirement_string) except ParseException as e: raise InvalidRequirement( "Invalid requirement, parse error at \"{0!r}\"".format( requirement_string[e.loc:e.loc + 8])) self.name = req.name if req.url: parsed_url = urlparse.urlparse(req.url) if not (parsed_url.scheme and parsed_url.netloc) or ( not parsed_url.scheme and not parsed_url.netloc): raise InvalidRequirement("Invalid URL given") self.url = req.url else: self.url = None self.extras = set(req.extras.asList() if req.extras else []) self.specifier = SpecifierSet(req.specifier) self.marker = req.marker if req.marker else None def __str__(self): parts = [self.name] if self.extras: parts.append("[{0}]".format(",".join(sorted(self.extras)))) if self.specifier: parts.append(str(self.specifier)) if self.url: parts.append("@ {0}".format(self.url)) if self.marker: parts.append("; {0}".format(self.marker)) return "".join(parts) def __repr__(self): return "<Requirement({0!r})>".format(str(self))
PypiClean
/NREL-rex-0.2.84.tar.gz/NREL-rex-0.2.84/rex/utilities/downscale.py
import numpy as np import pandas as pd import logging from rex.utilities.solar_position import SolarPosition from rex.utilities.utilities import get_lat_lon_cols, pd_date_range from nsrdb.all_sky import CLEAR_TYPES from nsrdb.all_sky.all_sky import all_sky from nsrdb.utilities.interpolation import temporal_lin, temporal_step logger = logging.getLogger(__name__) def make_time_index(year, frequency, set_timezone=True): """Make the NSRDB target time index. Parameters ---------- year : int Year for time index. frequency : str String in the Pandas frequency format, e.g. '5min'. set_timezone : bool Flag to set a timezone-aware time index. will be set to UTC with zero offset. Returns ------- ti : pd.DatetimeIndex Pandas datetime index for a full year at the requested resolution. """ ti = pd_date_range('1-1-{y}'.format(y=year), '1-1-{y}'.format(y=year + 1), freq=frequency)[:-1] if set_timezone: ti = ti.tz_localize('UTC') return ti def interp_cld_props(data, ti_native, ti_new, var_list=('cld_reff_dcomp', 'cld_opd_dcomp')): """Interpolate missing cloud properties (NOT CLOUD TYPE). Parameters ---------- data : dict Namespace of variables for input to all_sky. Must include the cloud variables in var_list and "cloud_type". ti_native : pd.DateTimeIndex Native time index of the original NSRDB data. ti_new : pd.DateTimeIndex Intended downscaled time index. var_list : list | tuple Cloud variables to downscale. Returns ------- data : dict Namespace of variables with the cloud variables in var_list downscaled to the requested ti_new. """ for var in var_list: # make sparse dataframe with new time_index data[var] = pd.DataFrame(data[var], index=ti_native).reindex(ti_new) # find location of bad data cld_fill_flag = ((data[var] < 0) | data[var].isnull()) # replace to-fill values with nan data[var].values[cld_fill_flag] = np.nan # set clear timesteps cloud props to zero for better transitions data[var].values[np.isin(data['cloud_type'], CLEAR_TYPES)] = 0.0 # interpolate empty values data[var] = data[var].interpolate(method='linear', axis=0).values logger.debug('Downscaled array for "{}" has shape {} and {} NaN values' .format(var, data[var].shape, np.isnan(data[var]).sum())) return data def downscale_nsrdb(SAM_res, res, frequency='5min', sam_vars=('dhi', 'dni', 'wind_speed', 'air_temperature'), variability_kwargs=None): """Downscale the NSRDB resource and return the preloaded SAM_res. Parameters ---------- SAM_res : SAMResource SAM resource object. res : NSRDB NSRDB resource handler. frequency : str String in the Pandas frequency format, e.g. '5min'. sam_vars : tuple | list Variables to save to SAM resource handler before returning. variability_kwargs : Nonetype | dict Downscaling kwargs to the NSRDB all sky method call. Should include maximum GHI synthetic variability fraction ("var_frac") which will be set to 0.05 (5%) if variability_kwargs is None. Returns ------- SAM_res : SAMResource SAM resource object with downscaled solar resource data loaded. Time index and shape are also updated. """ logger.debug('Downscaling NSRDB resource data to "{}".'.format(frequency)) # variables required for all-sky not including clouds, ti, sza var_list = ('aod', 'surface_pressure', 'surface_albedo', 'ssa', 'asymmetry', 'alpha', 'ozone', 'total_precipitable_water', ) # Indexing variable sites_slice = SAM_res.sites_slice # get downscaled time_index time_index = make_time_index(res.time_index.year[0], frequency) SAM_res._time_index = time_index SAM_res._shape = (len(time_index), len(SAM_res.sites)) logger.debug('Native resource time index has length {}: \n{}' .format(len(res.time_index), res.time_index)) logger.debug('Target resource time index has length {}: \n{}' .format(len(time_index), time_index)) # downscale variables into an all-sky input variable namespace all_sky_ins = {'time_index': time_index} for var in var_list: arr = res[var, :, sites_slice] arr = temporal_lin(res[var, :, sites_slice], res.time_index, time_index) all_sky_ins[var] = arr logger.debug('Downscaled array for "{}" has shape {} and {} NaN values' .format(var, arr.shape, np.isnan(arr).sum())) # calculate downscaled solar zenith angle lat_lon_cols = get_lat_lon_cols(res.meta) lat_lon = res.meta.loc[SAM_res.sites, lat_lon_cols]\ .values.astype(np.float32) sza = SolarPosition(time_index, lat_lon).zenith all_sky_ins['solar_zenith_angle'] = sza logger.debug('Downscaled array for "solar_zenith_angle" ' 'has shape {} and {} NaN values' .format(sza.shape, np.isnan(sza).sum())) # get downscaled cloud properties all_sky_ins['cloud_type'] = temporal_step( res['cloud_type', :, sites_slice], res.time_index, time_index) all_sky_ins['cld_opd_dcomp'] = res['cld_opd_dcomp', :, sites_slice] all_sky_ins['cld_reff_dcomp'] = res['cld_reff_dcomp', :, sites_slice] all_sky_ins = interp_cld_props(all_sky_ins, res.time_index, time_index) # add all sky kwargs such as variability if variability_kwargs is None: variability_kwargs = {'var_frac': 0.05} all_sky_ins['variability_kwargs'] = variability_kwargs # run all-sky logger.debug('Running all-sky for "{}".'.format(SAM_res)) all_sky_outs = all_sky(**all_sky_ins) # set downscaled data to sam resource handler for k, v in all_sky_outs.items(): if k in sam_vars: SAM_res[k] = v # downscale extra vars needed for SAM but not for all-sky for var in sam_vars: if var not in SAM_res._res_arrays: SAM_res[var] = temporal_lin(res[var, :, sites_slice], res.time_index, time_index) return SAM_res
PypiClean
/DobbyStock-0.1.tar.gz/DobbyStock-0.1/step2- test files/Test_Stock_Main.py
import unittest from main_package.Stock_main import * class TestStock(unittest.TestCase): # test class @classmethod def setUpClass(cls): print('setupClass') def setUp(self): print('Set up') def test_high_price(self): self.high_price_list = [20,30,40] self.assertEqual(self.high_price_list, [20,30,40]) def test_low_price(self): self.low_price_list = [5,10,15] self.assertEqual(self.low_price_list, [5,10,15]) def test_vol(self): self.volume_list = [1,2,3] self.assertEqual(self.volume_list, [1,2,3]) def test_size(self): self.n = 5 self.high_price_list = [random.randint(201, 500) for i in range(int(self.n))] self.low_price_list = [random.randint(50, 200) for i in range(int(self.n))] self.volume_list = [random.randint(1, 200) for i in range(int(self.n))] self.assertEqual(self.n , len(self.high_price_list())) self.assertEqual(self.n , len(self.low_price_list())) self.assertEqual(self.n , 4) # it will has error def test_get_high_price(self): stock = Stock() high_prices = stock.get_high_price() self.assertIsInstance(high_prices, list) self.assertGreaterEqual(max(high_prices), 201) self.assertLessEqual(min(high_prices), 500) def test_get_low_price(self): stock = Stock() low_prices = stock.get_low_price() self.assertIsInstance(low_prices, list) self.assertGreaterEqual(max(low_prices), 50) self.assertLessEqual(min(low_prices), 200) def test_get_volume(self): stock = Stock() volumes = stock.get_volume() self.assertIsInstance(volumes, list) self.assertGreaterEqual(max(volumes), 1) self.assertLessEqual(min(volumes), 200) def test_get_size(self): stock = Stock() size = stock.get_size() self.assertIsInstance(size, int) self.assertGreaterEqual(size, 0) def test_str(self): stock = Stock() stock_str = str(stock) self.assertIsInstance(stock_str, str) self.assertIn("high price list:", stock_str) self.assertIn("low price list:", stock_str) self.assertIn("volume list:", stock_str) def tearDown(self): print('Tear Down') @classmethod def tearDownClass(cls): print('teardownClass')
PypiClean
/APIConnect-2.0.3-py3-none-any.whl/feed/feed.py
from cgitb import Hook import json import logging import socket from threading import Thread from time import sleep from typing import Any, Callable from constants.streaming_constants import StreamingConstants LOGGER = logging.getLogger(__name__) class Feed: def __init__(self, confObj): self.__conf = confObj AppIdKey = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhcHAiOjAsImZmIjoiVyIsImJkIjoid2ViLXBjIiwibmJmIjoxNjkwNTQxNjQ3LCJzcmMiOiJlbXRtdyIsImF2IjoiMi4wLjMiLCJhcHBpZCI6IjFmZWZjY2Y2YmQzYzllNjFkMWNlYTFlMDY2ZWJlMDg1IiwiaXNzIjoiZW10IiwiZXhwIjoxNjkwNTY5MDAwLCJpYXQiOjE2OTA1NDE5NDd9.Cl5oqCYl4Yx4fiC_oWSsDzIe-8vgkPqWqYAP5XuLtps" Host="ncst.nuvamawealth.com" Port=9443 self._appID = AppIdKey self.__host = Host self.__port = Port if self.__conf: if self.__conf['GLOBAL'].get('AppIdKey'): self._appID = self.__conf['GLOBAL'].get('AppIdKey') if self.__conf['STREAM'].get('HOST'): self.__host = self.__conf['STREAM'].get('HOST') if self.__conf['STREAM'].get('PORT'): self.__port = int(self.__conf['STREAM'].get('PORT')) self._sock = None self._socket_fs = None self.__requestsList = {} t = Thread(target=self.__do_connection) t.start() def _subscribe(self, request : str, callback : Callable[[str], Any], requestCode : StreamingConstants): self.__requestsList[requestCode] = {'request' : request, 'callback' : callback} self.__sub(requestCode) def _unsubscribe(self, request : str, requestCode : StreamingConstants): if self.__is_connection_alive(): self.__send_stream_request(request) else : self.__do_connection() self.__send_stream_request(request) self.__requestsList.pop(requestCode, "Key not found") def __sub(self, action): if self.__is_connection_alive(): if action == 'all': for req_code in self.__requestsList.keys(): self.__start_streaming(self.__requestsList[req_code]['request']) sleep(0.1) elif type(action) is StreamingConstants: self.__start_streaming(self.__requestsList[action]['request']) else: self.__do_connection() self.__sub(action) def __start_streaming(self, sendRequest : str): self.__send_stream_request(sendRequest) t_read = Thread(target = self.__read_stream_data) t_read.start() def __send_stream_request(self, request : str): self._socket_fs.writelines(request) self._socket_fs.flush() def __read_stream_data(self): while True: resp = self._socket_fs.readline() if resp: LOGGER.debug(f"Response recevied : {resp}") try: resp_dict = json.loads(resp) if resp_dict['response']["streaming_type"] == "quote3": callback = self.__requestsList[StreamingConstants.QUOTE_SREAM_REQ_CODE]['callback'] if resp_dict['response']["streaming_type"] == "quote": callback = self.__requestsList[StreamingConstants.REDUCED_QUOTE_SREAM_REQ_CODE]['callback'] elif resp_dict['response']["streaming_type"] == "orderFiler": callback = self.__requestsList[StreamingConstants.ORDER_STREAM_REQ_CODE]['callback'] elif resp_dict['response']["streaming_type"] == "news": callback = self.__requestsList[StreamingConstants.LIVENEWS_STREAM_REQ_CODE]['callback'] elif resp_dict['response']["streaming_type"] == "quote2": callback = self.__requestsList[StreamingConstants.DEPTH_STREAM_REQ_CODE]['callback'] elif resp_dict['response']["streaming_type"] == "miniquote": callback = self.__requestsList[StreamingConstants.MINI_QUOTE_STREAM_REQ_CODE]['callback'] callback(resp) except json.JSONDecodeError: pass else: LOGGER.error("Response Blank. Socket Connection seems to be closed. Trying to reconnect...") break self.__sub(action = "all") def __is_connection_alive(self) -> bool: alive = False status = f"Socket is null : {self._sock is None}, socket file stream is null : {self._socket_fs is None}, " if (self._sock is not None) and (self._socket_fs is not None) : LOGGER.debug(status + f"Socket is closed : {self._sock._closed}, socket file stream is closed : {self._socket_fs.closed}") if (not self._sock._closed) and (not self._socket_fs.closed): alive = True return alive def __do_connection(self): ''' Create connection; if it fails inititate retry logic ''' try : self.__create_connection() except OSError: self.__retry_connection() def __create_connection(self): # New code TCP self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self._sock.settimeout(100) self._sock.connect((self.__host, self.__port)) # raises OSError self._sock.setblocking(True) self._socket_fs = self._sock.makefile('rw') LOGGER.info("Connection established with subscriber.") def __retry_connection(self): times = 17000 # ~17000 for ~24 hours with delay of 5 seconds initalDelay = 1 # seconds maxDelay = 5 # seconds delayFactor = 2.0 currentDelay = initalDelay for currentTry in range(times, 0, -1): try : self.__create_connection() except OSError as e: LOGGER.error(f"Error : {e}. Failed to establish connection with the streaming socket. Retrying socket connection... Max tries left {currentTry}") sleep(currentDelay) currentDelay = currentDelay*delayFactor if currentDelay*delayFactor < maxDelay else maxDelay else: break else: #last attempt try : self.__create_connection() except OSError as e: LOGGER.error(f"Failed to connect with streaming socket after {times} unsuccessful retry attempts. Error : {e}") self._sock.close() raise e
PypiClean
/ArgotToo-1.0.tar.gz/ArgotToo-1.0/argotToo.py
import sys from collections import defaultdict from math import log from math import log10 import numpy ## Function to read in the BLAST file provided by the user def readBlast(blastFilePath): protGoDicBLAST = defaultdict(dict) ## Dictionary in a dictionary, holding for each [protein] and [goTerm] the sseqid, qseqid, and weight for each BLAST hit sseqList = [] protToGoDic = defaultdict(list) for line in open(blastFilePath): ## Initial iteration over the BLAST file to gaather all sseqids ssline = line.strip().split('\t') sseqId = ssline[1] sseqIdIdentifier = sseqId.split('|')[-1].split('_')[0] sseqList.append(sseqIdIdentifier) sseqSet = set(sseqList) print('Matching BLAST to GOA') for line in open('data/goaPfam.tab'): ## Gather all GO terms related to all sseqids ssline = line.strip().split('\t') prot = ssline[0] if prot in sseqSet: for goTerm in ssline[1::]: if goTerm.startswith('GO:'): protToGoDic[prot].append(goTerm) sseqSet.remove(prot) for line in open(blastFilePath): ## For each protein, add a dictionary for each GO term it has a hit to, with the corresponding BLAST hit(s) protGoDicBLAST = defaultdict(dict) ssline = line.strip().split('\t') qseqId = ssline[0] sseqId = ssline[1] sseqIdIdentifier = sseqId.split('|')[-1].split('_')[0] evalue = float(ssline[2]) goList = protToGoDic[sseqIdIdentifier] for go in goList: if evalue == 0.0: evalue = 1e250 weight = abs(-log(evalue)) ## Weight = -log(evalue) if not go == '': protGoDicBLAST[qseqId].setdefault(go,[]).append([sseqId,evalue,weight]) return(protGoDicBLAST) ## Function to read in the HMMScan file provided by the user def readHMMER(hmmerFilePath): print('Matching HMMER to GOA') pfam2GoDic = readPfam2Go() protGoDicHMMER = defaultdict(dict) pfamDic = defaultdict(int) pfamProtCountDic = {} count = 0 for line in open(hmmerFilePath): ## Initial iteration over the HMMScan input file to gather all Pfam ID's ssline = line.strip().split('\t') pfamID = ssline[1].split('.')[0] pfamDic[pfamID] = [] pfamProtCountDic[pfamID] = 0 count += 1 print 'Gathering the GO terms of all proteins related to a Pfam model. This will take a few minutes' for line in open('data/goaPfam.tab'): ## Iterate over the goaPfam.tab file to gather all the GO terms of all proteins related to the Pfam ID's ssline = line.strip().split('\t') goList = [] for part in ssline[1::]: if not part.startswith('GO:'): pfamID = part if pfamID in pfamDic: for go in goList: pfamDic[pfamID+'~~'+go] += 1 pfamProtCountDic[pfamID] += 1 elif part.startswith('GO:'): goList.append(part) count = 0 print(len(pfamDic)) for key,counts in pfamDic.items(): ## Remove all GO terms related to a Pfam entry, enriched through the GOA entries, if the GO term belongs to less than a third of all proteins related to the Pfam model pfamID = key.split('~~')[0] protCount = pfamProtCountDic[pfamID] cutoff = protCount/3 if counts < cutoff: pfamDic.pop(key) for line in open(hmmerFilePath): ## Fill a dictionary with a dictionary, with [qseqid] and [goTerm], with each HMMScan hit related to the protein and GO term ssline = line.strip().split('\t') qseqId = ssline[0] pfamID = ssline[1].split('.')[0] evalue = float(ssline[2]) if evalue < 1e250: evalue = 1e250 goList = pfam2GoDic[pfamID] if not goList == []: weight = abs(-log(evalue)) ## Weight = -log(evalue) for go in goList: protGoDicHMMER[qseqId].setdefault(go,[]).append([pfamID,evalue,weight]) for go in set(pfamDic[pfamID]): protCount = pfamProtCountDic[pfamID] goCount = pfamDic[pfamID +'~~'+go] weight = weight*(goCount/protCount) ## Pfam hit weights are multiplied by (goCount/protCount) to compensate for the enrichment of GO terms for each Pfam hit protGoDicHMMER[qseqId].setdefault(go,[]).append([pfamID,evalue,weight]) return(protGoDicHMMER) ## Function to merge all annotations from BLAST and HMMScan def mergeAnnotations(protGoDicBLAST,protGoDicHMMER): print('Merging BLAST and HMMER annotations') protGoDic = defaultdict(dict) for prot,values in protGoDicBLAST.items(): ## For each protein, go term in the blast dictionary, merge the weights. Add the weights of the HMMScan dic if it has the same protein, go term pair for goTerm in values: totalWeight = 0 hmmerHits = [] blastHits = protGoDicBLAST[prot][goTerm] for sseqId in protGoDicBLAST[prot][goTerm]: weight = float(sseqId[-1]) totalWeight += weight if prot in protGoDicHMMER: if goTerm in protGoDicHMMER[prot]: for sseqId in protGoDicHMMER[prot][goTerm]: weight = sseqId[-1] totalWeight += weight hmmerHits = protGoDicHMMER[prot][goTerm] protGoDic[prot].setdefault(goTerm,[]).append(totalWeight) protGoDic[prot].setdefault(goTerm,[]).append(blastHits) protGoDic[prot].setdefault(goTerm,[]).append(hmmerHits) for prot,values in protGoDicHMMER.items(): ## For the remaining protein, go term pairs in the HMMScan dic, merge the weights for all hits for the protein, go term pair for goTerm in values: if not goTerm in protGoDic[prot]: totalWeight = 0 hmmerHits = protGoDicHMMER[prot][goTerm] blastHits = [] for sseqId in protGoDicHMMER[prot][goTerm]: weight = float(sseqId[-1]) totalWeight += weight protGoDic[prot].setdefault(goTerm,[]).append(totalWeight) protGoDic[prot].setdefault(goTerm,[]).append(blastHits) protGoDic[prot].setdefault(goTerm,[]).append(hmmerHits) return(protGoDic) ## Function to retrieve the merged weights for all protein, go term pairs, and uppropagate the weights up the GO hierarchy def getGoWeightDic(protGoDic,parentsDic,rootNodeDic): weightDic = defaultdict(dict) for prot,goTerms in protGoDic.items(): for goTerm in goTerms: totalWeight = protGoDic[prot][goTerm][0] rootNode = rootNodeDic[goTerm] if not rootNode in weightDic[prot]: weightDic[prot][rootNode] = totalWeight else: weightDic[prot][rootNode] += totalWeight weightDic[prot][goTerm] = totalWeight for parent in parentsDic[goTerm]: if not parent in weightDic[prot]: weightDic[prot][parent] = totalWeight else: weightDic[prot][parent] += totalWeight return(weightDic) ## Function which read the pfam 2 go into a dictionary def readPfam2Go(): pfam2GoDic = defaultdict(list) for line in open('data/pfam2go'): if not line.startswith('!'): ssline = line.strip().split(' ') pfamID = ssline[1] go = ssline[-1] pfam2GoDic[pfamID].append(go) return(pfam2GoDic) ## Function which retrieves the GO slim, go parents, and go children into dictionaries def getGoSlim(): childrenDic = defaultdict(list) parentsDic = defaultdict(list) goSlimDic = defaultdict(list) for line in open('data/goRelations.tab'): ssline = line.strip().split('\t') child = ssline[0] parent = ssline[1] if not child == parent: childrenDic[parent].append(child) parentsDic[child].append(parent) goSlimDic[child].append(parent) goSlimDic[parent].append(child) return(childrenDic,parentsDic,goSlimDic) ## Function to calculate the semantic similarity between two GO terms def getSemanticSimilarity(goTerm1,goTerm2,parentsDic,rootNodeDic,childrenDic,goCountsDic): goTerm1Ic = getIcScore(goTerm1,goCountsDic,rootNodeDic,childrenDic) goTerm2Ic = getIcScore(goTerm2,goCountsDic,rootNodeDic,childrenDic) goTerm1Parents = parentsDic[goTerm1] goTerm2Parents = parentsDic[goTerm2] commonParents = [val for val in goTerm1Parents if val in goTerm2Parents] highestIc = 0 highestIcTerm = '' for parent in commonParents: parentIc = getIcScore(parent,goCountsDic,rootNodeDic,childrenDic) if parentIc > highestIc: highestIc = parentIc highestIcTerm = parent if goTerm1 in goTerm2Parents: if goTerm1Ic > highestIc: highestIcTerm = goTerm1 highestIc = goTerm1Ic elif goTerm2 in goTerm1Parents: if goTerm2Ic > highestIc: highestIcTerm = goTerm2 highestIc = goTerm2Ic semanticSim = (2*highestIc)/(goTerm1Ic+goTerm2Ic) ## Semantic similarity formula return(semanticSim) ## Function to create the groups and their weights def getGroupScores(incDic,parentsDic,rootNodeDic,childrenDic,goCountsDic): print('Getting group scores') groupScoreDic = defaultdict(dict) groupScoreNcDic = defaultdict(dict) for prot,values in incDic.items(): for go in values: incScore = incDic[prot][go] incScore = float(incScore) groupScoreDic[prot].setdefault(go,0) groupScoreDic[prot][go] += incScore groupScoreNcDic[prot].setdefault(go,0) groupScoreNcDic[prot][go] += incScore goParents = parentsDic[go] for goParent in goParents: semanticSimilarity = getSemanticSimilarity(go,goParent,parentsDic,rootNodeDic,childrenDic,goCountsDic) if semanticSimilarity >= 0.7: ## GO parents are only considered if they have >= 0.7 similarity with the predicted GO term groupScoreDic[prot].setdefault(goParent,0) groupScoreDic[prot][goParent] += incScore return(groupScoreDic,groupScoreNcDic) ## Function to create a dictionary with the root node for each GO term def getRootNodes(): rootNodeDic = {} for line in open('data/nameSpaces.tab'): ssline = line.strip().split('\t') go = ssline[0] rootNode = ssline[1] if rootNode == 'biological_process': rootNodeDic[go] = 'GO:0008150' elif rootNode == 'molecular_function': rootNodeDic[go] = 'GO:0003674' elif rootNode == 'cellular_component': rootNodeDic[go] = 'GO:0005575' else: rootNodeDic[go] = 'GO:0008150' return(rootNodeDic) ## Function to retrieve the internal confidence (inc) scores for each go term in the weight dic, returns a dictionary def getIncScores(weightDic,rootNodeDic): print('Getting inc scores') incScoreDic = defaultdict(dict) for prot,goTerms in weightDic.items(): for goTerm in goTerms: if not goTerm in rootNodeDic: rootNode = 'GO:0008150' else: rootNode = rootNodeDic[goTerm] goWeight = weightDic[prot][goTerm] if rootNode in weightDic[prot]: rootGoWeight = weightDic[prot][rootNode] else: rootGoWeight = goWeight incScore = goWeight/rootGoWeight incScoreDic[prot][goTerm] = incScore return(incScoreDic) ## Function to create a standard deviation of all the group scores def standardDeviation(weightDic): list = [] for prot,goTerms in weightDic.items(): for goTerm in goTerms: list.append(groupScoreDic[prot][goTerm]) stdDev = numpy.std(list) return(stdDev) ## Function to calculate the Z score of a GO term prediction def getZScore(weightDic,stdDev,rootNodeDic,goSlimDic): print('Getting Z-Scores') zScoreDic = defaultdict(dict) for prot,goTerms in weightDic.items(): for goTerm in goTerms: try: rootNode = rootNodeDic[goTerm] goWeight = weightDic[prot][goTerm] rootGoWeight = weightDic[prot][rootNode] goSlim = goSlimDic[goTerm] zScore = (goWeight-(rootGoWeight/len(goSlim)))/stdDev zScoreDic[prot][goTerm] = zScore except: zScoreDic[prot][goTerm] = 0 continue return zScoreDic ## Function to retrieve the amount of times a GO term occurs in the GOA def getGoCounts(rootNodeDic): goCountsDic = defaultdict(int) for line in open('data/goCounts.tab'): ssline = line.strip().split('\t') go = ssline[0] counts = int(ssline[1]) goCountsDic[go] = counts if go in rootNodeDic: rootNode = rootNodeDic[go] goCountsDic[rootNode] += counts return(goCountsDic) ## Function to retrieve the IC score for a given GO term def getIcScore(goTerm,goCountsDic,rootNodeDic,childrenDic): if goTerm in rootNodeDic: rootNode = rootNodeDic[goTerm] else: rootNode = 'GO:0008150' totRootProteins = goCountsDic[rootNode] ic = 0 goCounts = float(goCountsDic[goTerm]) children = childrenDic[goTerm] for child in children: ## GO counts for child GO terms are added up to the total goCounts += float(goCountsDic[child]) ic = -log10(float(goCounts)/float(totRootProteins)) return(ic) ## Calculates the total score for each GO term def getTotalScores(protGoDic,groupScoreDic,incScoreDic,goCountDic,goTypeDic,childrenDic,goSlimDic,zScoreDic): print('Calculating total scores') outfile = open(sys.argv[3],'w') for prot,values in protGoDic.items(): for goTerm in values: goWeight = protGoDic[prot][goTerm][0] incScore = incScoreDic[prot][goTerm] groupScore = groupScoreDic[prot][goTerm] icScore = getIcScore(goTerm,goCountsDic,rootNodeDic,childrenDic) incGroupScore = incScore/groupScore if not zScoreDic[prot][goTerm] < 300: outfile.write(prot+'\t'+goTerm+'\t'+str(icScore*incScore*goWeight)+'\n') outfile.close() print('ArgotToo usage: <BLAST file (qseqid \\t sseqid \\t evalue)> <HMMER file (qseqid \\t domain \\t evalue)> <output file>') if not len(sys.argv) == 4: print("Not enough commands passed to ArgotToo") sys.exit() childrenDic,parentsDic,goSlimDic = getGoSlim() rootNodeDic = getRootNodes() goCountsDic = getGoCounts(rootNodeDic) protGoDicBLAST = readBlast(sys.argv[1]) protGoDicHMMER = readHMMER(sys.argv[2]) protGoDic = mergeAnnotations(protGoDicBLAST,protGoDicHMMER) weightDic = getGoWeightDic(protGoDic,parentsDic,rootNodeDic) incScoreDic = getIncScores(weightDic,rootNodeDic) groupScoreDic,groupScoreNcDic = getGroupScores(incScoreDic,parentsDic,rootNodeDic,childrenDic,goCountsDic) stdDev = standardDeviation(weightDic) zScoreDic = getZScore(weightDic,stdDev,rootNodeDic,goSlimDic) totalScore = getTotalScores(protGoDic,groupScoreNcDic,incScoreDic,goCountsDic,rootNodeDic,childrenDic,goSlimDic,zScoreDic)
PypiClean
/IERNA-1.0.0-py3-none-any.whl/era/View/edit_template_top_level.py
from tkinter import DISABLED, NORMAL,filedialog, messagebox import tkinter import customtkinter from era.Database.Model.emailTemplateModel import EmailTemplateModel from era.Database.Repository.databaseRepository import GetDBRepositorySingletion from era.Logger.logger import Logger from era.Utility.ConfigUtility import GetConfigSingletion from era.Utility.StringUtilityCTK import GetStringSingletionCTK import shutil class EditTemplateTopLevel(customtkinter.CTkToplevel): __None = -1 __AddTemplate = 0 __EditTemplate = 1 __DeleteTemplate = 2 __selectedTemplate = None def __init__(self, xPos,yPos,*args, **kwargs): super().__init__(*args, **kwargs) self.logger = Logger() self.config_obj = GetConfigSingletion() self.stringVar = GetStringSingletionCTK() self.database = GetDBRepositorySingletion() self.geometry(f'+{xPos}+{yPos}') self.geometry("800x900") self.operationType = self.__None customtkinter.CTkFrame.rowconfigure(self,0) # customtkinter.CTkFrame.rowconfigure(self,1) # customtkinter.CTkFrame.rowconfigure(self,2) # customtkinter.CTkFrame.rowconfigure(self,3) # customtkinter.CTkFrame.rowconfigure(self,4) # customtkinter.CTkFrame.rowconfigure(self,5,weight=10) # customtkinter.CTkFrame.rowconfigure(self,6) customtkinter.CTkFrame.columnconfigure(self,0,weight=1) customtkinter.CTkFrame.columnconfigure(self,1,weight=1) customtkinter.CTkFrame.columnconfigure(self,2,weight=1) self.addNewTemplate = customtkinter.CTkButton(self,text = self.stringVar.createTemplate.get(),command=self.__AddNewTemplate) self.addNewTemplate.grid(row=0, column=0, pady = 10, padx = 10,sticky="nsew") self.editTemplate = customtkinter.CTkButton(self,text = self.stringVar.editTemplate.get(),command=self.__EditExistTemplate) self.editTemplate.grid(row=0, column=1, pady = 10, padx = 10,sticky="nsew") self.deleteTemplate = customtkinter.CTkButton(self,text = self.stringVar.deleteTemplate.get(),command=self.__DeleteExistTemplate) self.deleteTemplate.grid(row=0, column=2, pady = 10, padx = 10,sticky="nsew") self.grab_set() def show(self): self.wait_window() return self.operationType def __AddNewTemplate(self): self.addNewTemplate.grid_forget() self.editTemplate.grid_forget() self.deleteTemplate.grid_forget() customtkinter.CTkFrame.rowconfigure(self,0) customtkinter.CTkFrame.rowconfigure(self,1) customtkinter.CTkFrame.rowconfigure(self,2) customtkinter.CTkFrame.rowconfigure(self,3) customtkinter.CTkFrame.rowconfigure(self,4) customtkinter.CTkFrame.rowconfigure(self,5,weight=10) customtkinter.CTkFrame.rowconfigure(self,6) customtkinter.CTkFrame.columnconfigure(self,0,weight=1) customtkinter.CTkFrame.columnconfigure(self,1,weight=1) customtkinter.CTkFrame.columnconfigure(self,2,weight=1) self.inputTemplateLable = customtkinter.CTkLabel(self, anchor="w",text=self.stringVar.templateName.get()) self.inputTemplateLable.grid(row=1, column=0, padx=10 ,pady=10,sticky="ew") self.inputNameVar = customtkinter.StringVar(self,value='') self.inputName = customtkinter.CTkEntry(self,font=("Arial", 15),width=300,textvariable=self.inputNameVar) self.inputName.grid(row=1, column=1,columnspan =2, sticky='nwse',padx=10,pady=10) self.inputPathLable = customtkinter.CTkLabel(self, anchor="w",text=self.stringVar.templatePath.get()) self.inputPathLable.grid(row=2, column=0, padx=10 ,pady=10,sticky="ew") self.inputPath = customtkinter.StringVar(self,value='') self.inputPathTextField = customtkinter.CTkEntry(self,font=("Arial", 15),width=300,textvariable=self.inputPath,state=DISABLED) self.inputPathTextField.grid(row=2, column=1, sticky='nwse',padx=10,pady=10) self.setInputPathButton = customtkinter.CTkButton(self,text=self.stringVar.selectTemplate.get(),command=self.__selectInputPutDirectory) self.setInputPathButton.grid(row=2, column=2, sticky='nsew',padx=10, pady=10) self.emailSubjectLable = customtkinter.CTkLabel(self, anchor="w",text=self.stringVar.emailSubject.get()) self.emailSubjectLable.grid(row=3, column=0, padx=10 ,pady=10,sticky="ew") self.inputSubjectVar = customtkinter.StringVar(self,value='') self.inputEmailSubject = customtkinter.CTkEntry(self,font=("Arial", 15),width=300,textvariable=self.inputSubjectVar) self.inputEmailSubject.grid(row=3, column=1,columnspan =2, sticky='nwse',padx=10,pady=10) self.containDataVar = customtkinter.StringVar(self,"Yes") self.containDataCheckbox = customtkinter.CTkCheckBox(self, text=self.stringVar.requireData.get(), variable=self.containDataVar, onvalue="Yes", offvalue="No") self.containDataCheckbox.grid(row=4, column=0, sticky='nsew',padx=10, pady=10) self.containAttachmentVar = customtkinter.StringVar(self,"Yes") self.containAttachmentCheckbox = customtkinter.CTkCheckBox(self, text=self.stringVar.requireAttachment.get(), variable=self.containAttachmentVar, onvalue="Yes", offvalue="No") self.containAttachmentCheckbox.grid(row=4, column=1, sticky='nsew',padx=10, pady=10) self.emailPreview = customtkinter.CTkTextbox(self) self.emailPreview.configure(state=DISABLED) self.emailPreview.grid(row=5, column=0, columnspan = 3,sticky="nsew",padx=10,pady=10) self.saveToDb = customtkinter.CTkButton(self,text=self.stringVar.save.get(),command=self.__SaveTemplate) self.saveToDb.grid(row=6, column=1, sticky='nsew',padx=10, pady=10) self.clearEmail = customtkinter.CTkButton(master=self, text =self.stringVar.resetEmail.get(),command=self.__ClearTemplate, fg_color="transparent", border_width=2, text_color=("gray10", "#DCE4EE")) self.clearEmail.grid(row=6, column=2, pady = 10, padx = 10,sticky="nsew") pass def __EditExistTemplate(self): self.addNewTemplate.grid_forget() self.editTemplate.grid_forget() self.deleteTemplate.grid_forget() customtkinter.CTkFrame.rowconfigure(self,0) customtkinter.CTkFrame.rowconfigure(self,1) customtkinter.CTkFrame.rowconfigure(self,2) customtkinter.CTkFrame.rowconfigure(self,3) customtkinter.CTkFrame.rowconfigure(self,4) customtkinter.CTkFrame.rowconfigure(self,5) customtkinter.CTkFrame.rowconfigure(self,6,weight=10) customtkinter.CTkFrame.rowconfigure(self,7) customtkinter.CTkFrame.columnconfigure(self,0,weight=1) customtkinter.CTkFrame.columnconfigure(self,1,weight=1) customtkinter.CTkFrame.columnconfigure(self,2,weight=1) self.template = customtkinter.CTkOptionMenu(self,values=self.database.GetAllEmailTemplateName(),command=self._OnSelectEmailTemplate,dynamic_resizing=False) self.template.grid(row=1, column=0, columnspan = 3 ,pady = 10, padx = 10, sticky="nsew") self.inputTemplateLable = customtkinter.CTkLabel(self, anchor="w",text=self.stringVar.templateName.get()) self.inputTemplateLable.grid(row=2, column=0, padx=10 ,pady=10,sticky="ew") self.inputNameVar = customtkinter.StringVar(self,value='') self.inputName = customtkinter.CTkEntry(self,font=("Arial", 15),width=300,textvariable=self.inputNameVar) self.inputName.grid(row=2, column=1,columnspan =2, sticky='nwse',padx=10,pady=10) self.inputPathLable = customtkinter.CTkLabel(self, anchor="w",text=self.stringVar.templatePath.get()) self.inputPathLable.grid(row=3, column=0, padx=10 ,pady=10,sticky="ew") self.inputPath = customtkinter.StringVar(self,value='') self.inputPathTextField = customtkinter.CTkEntry(self,font=("Arial", 15),width=300,textvariable=self.inputPath,state=DISABLED) self.inputPathTextField.grid(row=3, column=1, sticky='nwse',padx=10,pady=10) self.setInputPathButton = customtkinter.CTkButton(self,text=self.stringVar.selectTemplate.get(),command=self.__selectInputPutDirectory) self.setInputPathButton.grid(row=3, column=2, sticky='nsew',padx=10, pady=10) self.emailSubjectLable = customtkinter.CTkLabel(self, anchor="w",text=self.stringVar.emailSubject.get()) self.emailSubjectLable.grid(row=4, column=0, padx=10 ,pady=10,sticky="ew") self.inputSubjectVar = customtkinter.StringVar(self,value='') self.inputEmailSubject = customtkinter.CTkEntry(self,font=("Arial", 15),width=300,textvariable=self.inputSubjectVar) self.inputEmailSubject.grid(row=4, column=1,columnspan =2, sticky='nwse',padx=10,pady=10) self.containDataVar = customtkinter.StringVar(self,"Yes") self.containDataCheckbox = customtkinter.CTkCheckBox(self, text=self.stringVar.requireData.get(), variable=self.containDataVar, onvalue="Yes", offvalue="No") self.containDataCheckbox.grid(row=5, column=0, sticky='nsew',padx=10, pady=10) self.containAttachmentVar = customtkinter.StringVar(self,"Yes") self.containAttachmentCheckbox = customtkinter.CTkCheckBox(self, text=self.stringVar.requireAttachment.get(), variable=self.containAttachmentVar, onvalue="Yes", offvalue="No") self.containAttachmentCheckbox.grid(row=5, column=1, sticky='nsew',padx=10, pady=10) self.emailPreview = customtkinter.CTkTextbox(self) self.emailPreview.configure(state=DISABLED) self.emailPreview.grid(row=6, column=0, columnspan = 3,sticky="nsew",padx=10,pady=10) self.saveToDb = customtkinter.CTkButton(self,text=self.stringVar.save.get(),command=self._OnUpdateEmailTemplate) self.saveToDb.grid(row=7, column=1, sticky='nsew',padx=10, pady=10) self.clearEmail = customtkinter.CTkButton(master=self, text =self.stringVar.resetEmail.get(),command=self.__ClearTemplate, fg_color="transparent", border_width=2, text_color=("gray10", "#DCE4EE")) self.clearEmail.grid(row=7, column=2, pady = 10, padx = 10,sticky="nsew") pass def __DeleteExistTemplate(self): self.__EditExistTemplate() self.saveToDb.configure(command = self._OnDeleteEmailTemplate,) pass def __selectInputPutDirectory(self): path = filedialog.askopenfilename() if path == '': return else: self.inputPath.set(path) file = open(path,encoding="utf-8")#append mode self.emailPreview.configure(state=NORMAL) self.emailPreview.delete("0.0","end") self.emailPreview.insert("0.0",file.read()) self.emailPreview.configure(state=DISABLED) file.close() return def __SaveTemplate(self): if self.inputNameVar.get() == None or self.inputNameVar.get() == "": messagebox.showerror(self.stringVar.createTemplate.get(), self.stringVar.errorMissingName.get()) return elif self.inputPath.get() == None or self.inputPath.get() == "": messagebox.showerror(self.stringVar.createTemplate.get(), self.stringVar.errorMissingPath.get()) return elif self.inputSubjectVar.get() == None or self.inputSubjectVar.get() == "": messagebox.showerror(self.stringVar.createTemplate.get(), self.stringVar.errorMissingSubject.get()) return copyResultPath = shutil.copy(self.inputPath.get(), self.config_obj.ReadConfig('resource_path','resourcepath')) email = EmailTemplateModel( path=copyResultPath, name=self.inputNameVar.get(), containData= 1 if self.containDataCheckbox.get() == "Yes" else 0, containAttachment= 1 if self.containAttachmentCheckbox.get() == "Yes" else 0, subject=self.inputSubjectVar.get() ) if self.database.SaveEmailTemplate(emailTemplateModel=email): messagebox.showinfo(self.stringVar.createTemplate.get(), self.stringVar.createTemplateSuccess.get()) self.operationType = self.__AddTemplate self.destroy() return else: messagebox.showerror(self.stringVar.createTemplate.get(), self.stringVar.createTemplateFailed.get()) return def __ClearTemplate(self): self.inputNameVar.set("") self.inputPath.set("") self.inputSubjectVar.set("") self.containDataVar.set("No") self.containAttachmentVar.set("No") if self.template != None: self.template.set(self.stringVar.defaultTemplate.get()) pass def _OnSelectEmailTemplate(self,choice): if choice == self.stringVar.defaultTemplate.get(): pass else: self.__selectedTemplate = self.database.GetEmailTemplateByName(choice) if self.__selectedTemplate != None: self.inputNameVar.set(self.__selectedTemplate.GetName()) self.inputPath.set(self.__selectedTemplate.GetPath()) self.inputSubjectVar.set(self.__selectedTemplate.GetSubject()) self.containDataVar.set("Yes" if self.__selectedTemplate.GetContainData() == 1 else "No") self.containAttachmentVar.set("Yes" if self.__selectedTemplate.GetContainAttachment() == 1 else "No") file = open(self.inputPath.get(),encoding="utf-8")#append mode self.emailPreview.configure(state=NORMAL) self.emailPreview.delete("0.0","end") self.emailPreview.insert("0.0",file.read()) self.emailPreview.configure(state=DISABLED) file.close() pass def _OnUpdateEmailTemplate(self): if self.__selectedTemplate == None: return self.__selectedTemplate.SetName(self.inputNameVar.get()) self.__selectedTemplate.SetPath(self.inputPath.get()) self.__selectedTemplate.SetSubject(self.inputSubjectVar.get()) self.__selectedTemplate.SetContainData(1 if self.containDataVar.get() == "Yes" else 0) self.__selectedTemplate.SetContainAttachment(1 if self.containAttachmentVar.get() == "Yes" else 0) if self.database.UpdateEmailTemplate(self.__selectedTemplate): messagebox.showinfo(self.stringVar.createTemplate.get(), self.stringVar.updateTempalteSuccess.get()) self.operationType = self.__EditTemplate shutil.copy(self.inputPath.get(), self.config_obj.ReadConfig('resource_path','resourcepath')) self.destroy() else: messagebox.showerror(self.stringVar.createTemplate.get(), self.stringVar.updateTempalteFailed.get()) return def _OnDeleteEmailTemplate(self): if self.__selectedTemplate == None: return if self.database.DeleteEmailTemplate(self.__selectedTemplate.GetIdx()): messagebox.showinfo(self.stringVar.createTemplate.get(), self.stringVar.deleteTempalteSuccess.get()) self.operationType = self.__EditTemplate self.destroy() else: messagebox.showerror(self.stringVar.createTemplate.get(), self.stringVar.deleteTempalteFailed.get()) return pass
PypiClean
/Muntjac-1.1.2.tar.gz/Muntjac-1.1.2/muntjac/event/action_manager.py
from muntjac.event import action from muntjac.terminal.key_mapper import KeyMapper from muntjac.event.shortcut_action import ShortcutAction class ActionManager(action.IHandler, action.INotifier): #action.IContainer """Notes: Empties the keymapper for each repaint to avoid leaks; can cause problems in the future if the client assumes key don't change. (if lazyloading, one must not cache results) """ def __init__(self, viewer=None): #: List of action handlers self.ownActions = None #: List of action handlers self.actionHandlers = None #: Action mapper self.actionMapper = None self._clientHasActions = False self.viewer = viewer def requestRepaint(self): if self.viewer is not None: self.viewer.requestRepaint() def setViewer(self, viewer): if viewer == self.viewer: return if self.viewer is not None: self.viewer.removeActionHandler(self) self.requestRepaint() # this goes to the old viewer if viewer is not None: viewer.addActionHandler(self) self.viewer = viewer self.requestRepaint() # this goes to the new viewer def addAction(self, action): if self.ownActions is None: self.ownActions = set() if self.ownActions.add(action): self.requestRepaint() def removeAction(self, action): if self.ownActions is not None: if self.ownActions.remove(action): self.requestRepaint() def addActionHandler(self, actionHandler): if actionHandler == self: # don't add the actionHandler to itself return if actionHandler is not None: if self.actionHandlers is None: self.actionHandlers = set() if actionHandler not in self.actionHandlers: self.actionHandlers.add(actionHandler) self.requestRepaint() def removeActionHandler(self, actionHandler): if (self.actionHandlers is not None and actionHandler in self.actionHandlers): if self.actionHandlers.remove(actionHandler): self.requestRepaint() if len(self.actionHandlers) == 0: self.actionHandlers = None def removeAllActionHandlers(self): if self.actionHandlers is not None: self.actionHandlers = None self.requestRepaint() def paintActions(self, actionTarget, paintTarget): self.actionMapper = None actions = set() if self.actionHandlers is not None: for handler in self.actionHandlers: ac = handler.getActions(actionTarget, self.viewer) if ac is not None: for a in ac: actions.add(a) if self.ownActions is not None: actions = actions.union(self.ownActions) # Must repaint whenever there are actions OR if all actions have # been removed but still exist on client side if (len(actions) > 0) or self._clientHasActions: self.actionMapper = KeyMapper() paintTarget.addVariable(self.viewer, "action", "") paintTarget.startTag("actions") for a in actions: paintTarget.startTag("action") akey = self.actionMapper.key(a) paintTarget.addAttribute("key", akey); if a.getCaption() is not None: paintTarget.addAttribute("caption", a.getCaption()) if a.getIcon() is not None: paintTarget.addAttribute("icon", a.getIcon()) if isinstance(a, ShortcutAction): sa = a paintTarget.addAttribute("kc", sa.getKeyCode()) modifiers = sa.getModifiers() if modifiers is not None: smodifiers = [None] * len(modifiers) for i in range(len(modifiers)): smodifiers[i] = str(modifiers[i]) paintTarget.addAttribute("mk", smodifiers) paintTarget.endTag("action") paintTarget.endTag("actions") # Update flag for next repaint so we know if we need to paint empty # actions or not (must send actions is client had actions before and # all actions were removed). self._clientHasActions = len(actions) > 0 def handleActions(self, variables, sender): if 'action' in variables and self.actionMapper is not None: key = variables.get('action') a = self.actionMapper.get(key) target = variables.get('actiontarget') if a is not None: self.handleAction(a, sender, target) def getActions(self, target, sender): actions = set() if self.ownActions is not None: for a in self.ownActions: actions.add(a) if self.actionHandlers is not None: for h in self.actionHandlers: as_ = h.getActions(target, sender) if as_ is not None: for a in as_: actions.add(a) return list(actions) def handleAction(self, a, sender, target): if self.actionHandlers is not None: arry = list(self.actionHandlers) for handler in arry: handler.handleAction(a, sender, target) if ((self.ownActions is not None) and (a in self.ownActions) and isinstance(a, action.IListener)): a.handleAction(sender, target)
PypiClean
/ETLT-0.9.6.tar.gz/ETLT-0.9.6/etlt/dimension/RegularDimension.py
import abc class RegularDimension(metaclass=abc.ABCMeta): """ Abstract parent class for translating natural key to a technical key of a regular dimension. """ # ------------------------------------------------------------------------------------------------------------------ def __init__(self): """ Object constructor. """ self._map = {} """ The map from natural keys to a technical keys. :type: dict[T, int|None] """ # Pre-load look up data in to the map. self.pre_load_data() # ------------------------------------------------------------------------------------------------------------------ def get_id(self, natural_key, enhancement=None): """ Returns the technical ID for a natural key or None if the given natural key is not valid. :param T natural_key: The natural key. :param T enhancement: Enhancement data of the dimension row. :rtype: int|None """ # If the natural key is known return the technical ID immediately. if natural_key in self._map: return self._map[natural_key] # The natural key is not in the map of this dimension. Call a stored procedure for translating the natural key # to a technical key. self.pre_call_stored_procedure() success = False try: key = self.call_stored_procedure(natural_key, enhancement) success = True finally: self.post_call_stored_procedure(success) # Add the translation for natural key to technical ID to the map. self._map[natural_key] = key return key # ------------------------------------------------------------------------------------------------------------------ @abc.abstractmethod def call_stored_procedure(self, natural_key, enhancement): """ Calls a stored procedure for getting the technical key of a natural key. Returns the technical ID or None if the given natural key is not valid. :param T natural_key: The natural key. :param T enhancement: Enhancement data of the dimension row. :rtype: int|None """ raise NotImplementedError() # ------------------------------------------------------------------------------------------------------------------ def pre_load_data(self): """ Can be overridden to pre-load lookup data from a dimension table. :rtype: None """ pass # ------------------------------------------------------------------------------------------------------------------ def pre_call_stored_procedure(self): """ This method is invoked before call the stored procedure for getting the technical key of a natural key. In a concurrent environment override this method to acquire a lock on the dimension or dimension hierarchy. :rtype: None """ pass # ------------------------------------------------------------------------------------------------------------------ def post_call_stored_procedure(self, success): """ This method is invoked after calling the stored procedure for getting the technical key of a natural key. In a concurrent environment override this method to release a lock on the dimension or dimension hierarchy and to commit or rollback the transaction. :param bool success: True: the stored procedure is executed successfully. False: an exception has occurred. :rtype: None """ pass # ----------------------------------------------------------------------------------------------------------------------
PypiClean
/Fattoush-0.4.0.tar.gz/Fattoush-0.4.0/src/fattoush/config/config_group.py
import copy import json import multiprocessing import urlparse from os import environ, path from jsonschema import validate from fattoush import namespace, util from fattoush.config import config, deprecated from fattoush.runner.parsing import parse_args class FattoushConfigGroup(object): schema = { "$schema": "http://json-schema.org/draft-04/schema#", "description": "schema for a single file fattoush config", "type": "object", "required": ["browsers"], "properties": { "description": {"$ref": "#/definitions/comment"}, "server": { "anyOf": [ {"$ref": "#/definitions/saucelabs"}, {"$ref": "#/definitions/local"} ] }, "browsers": { "anyOf": [ {"$ref": "#/definitions/browsers"}, {"$ref": "#/definitions/browsers-pre-0.4"} ] } }, "definitions": { "comment": { "description": "Not used anywhere, just a comment, has no validation, " "so you can store anything, intended to allow strings, " "and lists of strings, in order to facilitate multi-line " "comments, but could take an object with all sorts " "of meta-data... Please don't abuse." }, "saucelabs": { "description": "Specification of how to connect to saucelabs", "properties": { "description": {"$ref": "#/definitions/comment"}, "url": { "description": "The initial URL to load when the test begins", "type": "string" }, "user": { "description": "The user name used to invoke Sauce OnDemand", "type": "string" }, "key": { "description": "The access key for the user used to invoke " "Sauce OnDemand", "type": "string" } }, "required": ["user", "key"], "additionalProperties": False }, "local": { "description": "Specification of how to connect to a selenium server on " "your local network. Defaults to 127.0.0.1:4444", "properties": { "description": {"$ref": "#/definitions/comment"}, "host": { "description": "The hostname of the Selenium server", "type": "string" }, "port": { "description": "The port of the Selenium server", "type": "string" }, "url": { "description": "The initial URL to load when the test begins", "type": "string" } }, "additionalProperties": False }, "browsers-pre-0.4": { "type": "array", "minItems": 1, "items": {"$ref": "#/definitions/capabilities"}, "uniqueItems": True, }, "browsers": { "description": "Specification of the browsers to ask webdriver to open", "type": "object", "properties": { "capabilities": { "description": "Keyed lookup of desired capabilities", "type": "object", "additionalProperties": { "$ref": "#/definitions/capabilities", }, }, "options": { "description": "Keyed lookup of browser options", "type": "object", "additionalProperties": { "$ref": "#/definitions/options", }, }, "selection": { "type": "array", "items": {"$ref": "#/definitions/browser"}, }, }, "required": ["selection"], }, "capabilities": { "description": "Specification of the desired capabilities to " "give webdriver", "type": "object", "properties": { "description": {"$ref": "#/definitions/comment"}, "platform": {"type": "string"}, "os": {"type": "string"}, "browser": { "type": "string", "enum": list(config.FattoushConfig.desired.keys()), }, "url": { "type": "string", "description": "Contains the operating system, version and " "browser name of the selected browser, in a " "format designed for use by the Selenium " "Client Factory" }, "browser-version": {"type": "string"} }, "required": ["browser"] }, "options": { "description": "Specification of the browser options to give webdriver", "type": "array", "items": {"type": "string"}, }, "browser": { "description": "Specification of a browsers to ask webdriver to open", "type": "object", "properties": { "description": {"$ref": "#/definitions/comment"}, "capabilities": { "anyOf": [ {"$ref": "#/definitions/capabilities"}, { "type": "string", "description": "Key within capabilities", }, ] }, "options": { "anyOf": [ {"$ref": "#/definitions/options"}, { "type": "string", "description": "Key within options", }, ] }, }, }, } } @staticmethod def config_from_env(): """ Joyfully SauceConnect presents completely different environmental variables based on whether you are running against one saucelabs session or several. This function will return a list of different session configurations whatever the case - empty if there are none, a list with only one item if there is only one, or a list of multiple items if there are many. It's almost as if this would have been the sensible way for them to do it too. The only part which is different in the singular case is the missing 'os' key. in the multiple case the json string is documented to be of the format as follows: [ { "platform":"LINUX", "os":"Linux", "browser":"firefox", "url":"sauce-ondemand:?os=Linux& browser=firefox& browser-version=16", "browser-version":"16" }, { "platform":"VISTA", "os":"Windows 2008", "browser":"iexploreproxy", "url":"sauce-ondemand:?os=Windows 2008& browser=iexploreproxy& browser-version=9", "browser-version":"9" } ] """ try: json_data = environ.get("SAUCE_ONDEMAND_BROWSERS") browsers = json.loads(json_data) except (ValueError, TypeError): url = environ.get("SELENIUM_DRIVER") try: query = urlparse.urlparse(url).query parsed = urlparse.parse_qs(query) except AttributeError: parsed = {} browsers = [ { "platform": environ.get("SELENIUM_PLATFORM"), "browser": environ.get("SELENIUM_BROWSER"), "url": url, "browser-version": environ.get("SELENIUM_VERSION") } ] if "os" in parsed: browsers[0]["os"] = parsed["os"] return { "server": { "host": environ.get("SELENIUM_HOST"), "port": environ.get("SELENIUM_PORT"), "url": environ.get("SELENIUM_URL"), "user": environ.get("SAUCE_USER_NAME"), "key": environ.get("SAUCE_API_KEY") }, "browsers": browsers } @staticmethod def config_from_file(absolute_file_path): """ Supports reading config a single json file. """ return json.load(open(absolute_file_path)) @property def xrange(self): return xrange(len(self.configs["browsers"])) @classmethod def from_cli_args(cls): import sys options = parse_args(sys.argv[1:]) return cls(options) def __init__(self, options): """ Takes the options that are passed into the runner and creates a config object that can be referred to throughout fattoush. :type options: Namespace """ if options.print_schema: print(json.dumps(self.schema, indent=2, sort_keys=True)) exit(0) elif options.print_config: file_name = path.join(path.dirname(__file__), 'example_config.json') with open(file_name) as example: print example.read() exit(0) self._raw_options = options self.parallel = options.parallel if options.config is None: self.configs = self.config_from_env() else: self.configs = self.config_from_file(options.config) validate(self.configs, self.schema) self._convert_legacy_config() xunit_filename = ('lettucetests.xml' if options.enable_xunit and options.xunit_file is None else options.xunit_file) self.lettuce_options = { 'base_path': options.base_path, 'scenarios': options.scenarios, 'verbosity': options.verbosity, 'random': options.random, 'enable_xunit': options.enable_xunit, 'xunit_filename': xunit_filename, 'failfast': options.failfast, 'auto_pdb': options.auto_pdb, 'tags': ([tag.strip('@') for tag in options.tags] if options.tags else None) } def _convert_legacy_config(self): if isinstance(self.configs['browsers'], list): # Before version 0.4 all capabilities went # straight in the browser objects, since 0.4 this # has become nested to also include options to set # on the remote executor. The pre-0.4 format shall # be deprecated from 0.5 onwards. with deprecated.FromVersion('0.5', 'Legacy browser config format'): browsers = self.configs["browsers"] self.configs["browsers"] = { 'selection': [ {'capabilities': browser} for browser in browsers ] } @property def to_dict(self): """ The returned dictionary gives a shallow copy of the data required to create a FeatureConfig. """ return { 'lettuce_options': self.lettuce_options.copy(), 'config': self.configs.copy() } def run(self): if not self.configs: raise IndexError('There are no webdriver configs against ' 'which to run lettuce.') runner = ( self._run_parallel if self.parallel == 'webdriver' else self._run_series ) return sum( result.scenarios_ran - result.scenarios_passed for result in runner() if result.features_passed < result.features_ran ) def _run_series(self): """ Runs lettuce against each browser configuration one at a time :type self: fattoush.config.FattoushConfigGroup """ return list( util.try_map( _run_kwargs, self._iter_browser_kwargs(), ) ) def _run_parallel(self): """ Runs lettuce against all the browser configurations at the same time in different processes. :type self: fattoush.config.FattoushConfigGroup """ multiprocessing.Pool().map( _run_kwargs, self._iter_browser_kwargs(), ) def _iter_browser_kwargs(self): browsers = self.configs["browsers"] kwargs = { 'server': self.configs.get("server", {}), 'lettuce': self.lettuce_options, } for (index, browser) in enumerate(browsers['selection']): kwargs['index'] = index caps = browser.setdefault('capabilities', {}) if not isinstance(caps, dict): browser['capabilities'] = browsers['capabilities'][caps] opts = browser.setdefault('options', []) if not isinstance(opts, list): browser['options'] = browsers['options'][opts] kwargs['browser'] = browser yield kwargs def _run_kwargs(kwargs): """ This would be a starmap in py3... """ return run_single(**copy.deepcopy(kwargs)) def run_single(index, browser, server, lettuce): """ :type index: int :type browser: dict :type server: dict :type lettuce: dict """ fattoush_config = config.FattoushConfig( index=index, browser=browser, server=server, lettuce_cfg=lettuce, ) namespace.config = fattoush_config try: result = fattoush_config.run() return result finally: namespace.config = None
PypiClean
/DJModels-0.0.6-py3-none-any.whl/djmodels/utils/dateparse.py
# We're using regular expressions rather than time.strptime because: # - They provide both validation and parsing. # - They're more flexible for datetimes. # - The date/datetime/time constructors produce friendlier error messages. import datetime import re from djmodels.utils.timezone import get_fixed_timezone, utc date_re = re.compile( r'(?P<year>\d{4})-(?P<month>\d{1,2})-(?P<day>\d{1,2})$' ) time_re = re.compile( r'(?P<hour>\d{1,2}):(?P<minute>\d{1,2})' r'(?::(?P<second>\d{1,2})(?:\.(?P<microsecond>\d{1,6})\d{0,6})?)?' ) datetime_re = re.compile( r'(?P<year>\d{4})-(?P<month>\d{1,2})-(?P<day>\d{1,2})' r'[T ](?P<hour>\d{1,2}):(?P<minute>\d{1,2})' r'(?::(?P<second>\d{1,2})(?:\.(?P<microsecond>\d{1,6})\d{0,6})?)?' r'(?P<tzinfo>Z|[+-]\d{2}(?::?\d{2})?)?$' ) standard_duration_re = re.compile( r'^' r'(?:(?P<days>-?\d+) (days?, )?)?' r'((?:(?P<hours>-?\d+):)(?=\d+:\d+))?' r'(?:(?P<minutes>-?\d+):)?' r'(?P<seconds>-?\d+)' r'(?:\.(?P<microseconds>\d{1,6})\d{0,6})?' r'$' ) # Support the sections of ISO 8601 date representation that are accepted by # timedelta iso8601_duration_re = re.compile( r'^(?P<sign>[-+]?)' r'P' r'(?:(?P<days>\d+(.\d+)?)D)?' r'(?:T' r'(?:(?P<hours>\d+(.\d+)?)H)?' r'(?:(?P<minutes>\d+(.\d+)?)M)?' r'(?:(?P<seconds>\d+(.\d+)?)S)?' r')?' r'$' ) # Support PostgreSQL's day-time interval format, e.g. "3 days 04:05:06". The # year-month and mixed intervals cannot be converted to a timedelta and thus # aren't accepted. postgres_interval_re = re.compile( r'^' r'(?:(?P<days>-?\d+) (days? ?))?' r'(?:(?P<sign>[-+])?' r'(?P<hours>\d+):' r'(?P<minutes>\d\d):' r'(?P<seconds>\d\d)' r'(?:\.(?P<microseconds>\d{1,6}))?' r')?$' ) def parse_date(value): """Parse a string and return a datetime.date. Raise ValueError if the input is well formatted but not a valid date. Return None if the input isn't well formatted. """ match = date_re.match(value) if match: kw = {k: int(v) for k, v in match.groupdict().items()} return datetime.date(**kw) def parse_time(value): """Parse a string and return a datetime.time. This function doesn't support time zone offsets. Raise ValueError if the input is well formatted but not a valid time. Return None if the input isn't well formatted, in particular if it contains an offset. """ match = time_re.match(value) if match: kw = match.groupdict() kw['microsecond'] = kw['microsecond'] and kw['microsecond'].ljust(6, '0') kw = {k: int(v) for k, v in kw.items() if v is not None} return datetime.time(**kw) def parse_datetime(value): """Parse a string and return a datetime.datetime. This function supports time zone offsets. When the input contains one, the output uses a timezone with a fixed offset from UTC. Raise ValueError if the input is well formatted but not a valid datetime. Return None if the input isn't well formatted. """ match = datetime_re.match(value) if match: kw = match.groupdict() kw['microsecond'] = kw['microsecond'] and kw['microsecond'].ljust(6, '0') tzinfo = kw.pop('tzinfo') if tzinfo == 'Z': tzinfo = utc elif tzinfo is not None: offset_mins = int(tzinfo[-2:]) if len(tzinfo) > 3 else 0 offset = 60 * int(tzinfo[1:3]) + offset_mins if tzinfo[0] == '-': offset = -offset tzinfo = get_fixed_timezone(offset) kw = {k: int(v) for k, v in kw.items() if v is not None} kw['tzinfo'] = tzinfo return datetime.datetime(**kw) def parse_duration(value): """Parse a duration string and return a datetime.timedelta. The preferred format for durations in Django is '%d %H:%M:%S.%f'. Also supports ISO 8601 representation and PostgreSQL's day-time interval format. """ match = ( standard_duration_re.match(value) or iso8601_duration_re.match(value) or postgres_interval_re.match(value) ) if match: kw = match.groupdict() days = datetime.timedelta(float(kw.pop('days', 0) or 0)) sign = -1 if kw.pop('sign', '+') == '-' else 1 if kw.get('microseconds'): kw['microseconds'] = kw['microseconds'].ljust(6, '0') if kw.get('seconds') and kw.get('microseconds') and kw['seconds'].startswith('-'): kw['microseconds'] = '-' + kw['microseconds'] kw = {k: float(v) for k, v in kw.items() if v is not None} return days + sign * datetime.timedelta(**kw)
PypiClean
/Flask-XML-RPC-Re-0.1.4.tar.gz/Flask-XML-RPC-Re-0.1.4/flask_xmlrpcre/xmlrpcre.py
from flask import request, current_app import sys if sys.version_info[0] == 2: from SimpleXMLRPCServer import SimpleXMLRPCDispatcher as Dispatcher import xmlrpclib string_types = basestring else: from xmlrpc.server import SimpleXMLRPCDispatcher as Dispatcher import xmlrpc.client as xmlrpclib string_types = str Fault = xmlrpclib.Fault class XMLRPCHandler(Dispatcher): """ This is the basic XML-RPC handler class. To use it, you create it:: handler = XMLRPCHandler('api') Then, you can register functions with the :meth:`register` method:: @handler.register def spam(): pass :meth:`register` is just an alias for :meth:`register_function`, so you can use that too. You can also register an instance using the :meth:`register_instance` method, and any methods on said instance will be exposed if they do not start with an ``_``. Then, you connect it to a :class:`~flask.Flask` instance or a Flask module with the :meth:`connect` method, like this:: handler.connect(app, '/') :param endpoint_name: The name to use as an endpoint when connected to an app or module. If not specified here, you specify when you call :meth:`connect`. :param instance: The instance to register and expose the methods of. :param introspection: Whether to register the introspection functions, like :obj:`system.listMethods`. (It will by default.) :param multicall: Whether to register the :obj:`system.multicall` function. (It won't by default.) """ def __init__(self, endpoint_name=None, instance=None, introspection=True, multicall=False): if sys.version_info[:2] < (2, 5): Dispatcher.__init__(self) else: Dispatcher.__init__(self, True, 'utf-8') self.endpoint_name = endpoint_name if introspection: self.register_introspection_functions() if multicall: self.register_multicall_functions() if instance: self.register_instance(instance) def register(self, *args, **kwargs): """ An alias for :meth:`register_function`. """ return self.register_function(*args, **kwargs) def register_function(self, function, name=None): """ This will register the given function. There are two ways to use it. As a plain old method, with or without a name:: handler.register_function(spam) handler.register_function(spam, 'spam') As a decorator, also with or without a name:: @handler.register_function def spam(): pass @handler.register_function('spam') def spam(): pass It's shorter and easier to use :meth:`register`, however, as it does the exact same thing. :param function: The function to register. (In the named decorator form, this is the function's name.) :param name: The name to use, except in the named decorator form. If not given, the function's :obj:`__name__` attribute will be used. """ if isinstance(function, string_types): return lambda fn: self.register_function(fn, function) return Dispatcher.register_function(self, function, name) def register_instance(self, instance, allow_dotted_names=False): """ This registers any kind of object. If the requested method hasn't been registered by :meth:`register_function`, it will be checked against the instance. You can only have one instance at a time, however. If :obj:`allow_dotted_names` is True, the name will be split on the dots and the object will be traveled down recursively. However, this is a **HUGE SECURITY LOOPHOLE**, as while private methods (starting with ``_``) will not be exposed, it's still possible that someone could get access to your globals and do very bad things. So don't do it unless you have a very good reason. :param instance: The instance to register. :param allow_dotted_names: Whether to resolve dots in method names. You probably shouldn't. """ # Yes, it's just a wrapper. I know. This way the docs are consistent. Dispatcher.register_instance(self, instance, allow_dotted_names) def connect(self, app_module, route, endpoint_name=None): """ Connects the handler to an app or module. You have to provide the app and the URL route to use. The route can't contain any variable parts, because there is no way to get them to the method. :: handler.connect(app, '/api') :param app_module: The app or module to connect the handler to. :param route: The URL route to use for the handler. :param endpoint_name: The name to use when connecting the endpoint. """ if endpoint_name is None: endpoint_name = self.endpoint_name if endpoint_name is None: # still raise RuntimeError("No endpoint name given!") app_module.add_url_rule(route, endpoint_name, self.handle_request, methods=['POST']) def handle_request(self): """ This is the actual request handler that is routed by :meth:`connect`. It takes the request data, dispatches the method, and sends it back to the client. """ response_data = self._marshaled_dispatch(request.data) return current_app.response_class(response_data, content_type='text/xml') def namespace(self, prefix): """ This returns a :class:`XMLRPCNamespace` object, which has :meth:`~XMLRPCNamespace.register` and :meth:`~XMLRPCNamespace.register_function` methods. These forward directly to the :meth:`register_function` method of the parent they were created from, but they will prepend the given prefix, plus a dot, to the name registered. For example:: blog = handler.namespace('blog') @blog.register def new_post(whatever): pass would make :obj:`new_post` available as :obj:`blog.new_post`. :param prefix: The name to prefix the methods with. """ return XMLRPCNamespace(self, prefix) class XMLRPCNamespace(object): """ This is a simple proxy that can register methods, and passes them on to the :class:`XMLRPCHandler` that created it with a given name added as a prefix (with a dot). For more nesting, you can create namespaces from namespaces with the :meth:`namespace` method. :parameter handler: The handler to pass the methods to. :parameter prefix: The prefix to give to the assigned methods. A dot will be appended. """ def __init__(self, handler, prefix): self.handler = handler self.prefix = prefix def register_function(self, function, name=None): """ Registers a function. Use is the same as with the :meth:`XMLRPCHandler.register_function` method. :param function: The function to register. (In the named decorator form, this is the function's name.) :param name: The name to use, except in the named decorator form. If not given, the function's :obj:`__name__` attribute will be used. """ if isinstance(function, string_types): return lambda fn: self.register_function(fn, function) if name is None: name = function.__name__ new_name = self.prefix + '.' + name self.handler.register_function(function, new_name) def register(self, *args, **kwargs): """ An alias for :meth:`register_function`. As with :meth:`XMLRPCHandler.register`, it's shorter and easier to type. """ return self.register_function(*args, **kwargs) def namespace(self, name): """ Returns another namespace for the same handler, with the given name postfixed to the current namespace's prefix. For example, :: handler.namespace('foo').namespace('bar') gives the same result as:: handler.namespace('foo.bar') :param prefix: The name to prefix the methods with. """ return XMLRPCNamespace(self.handler, self.prefix + '.' + name) def dump_method_call(method, *params, allow_none=False): """ This marshals the given method and parameters into a proper XML-RPC method call. It's very useful for testing. :param method: The name of the method to call. :param params: The parameters to pass to the method. :param allow_none: Allow to marshal None values. """ return xmlrpclib.dumps(params, methodname=method, allow_none=allow_none) def load_method_response(response): """ This returns the actual value returned from an XML-RPC response. If it's a :obj:`Fault` instance, it will return the fault instead of the value. This is also useful for testing. :param response: The marshaled XML-RPC method response or fault. """ try: return xmlrpclib.loads(response)[0][0] except Fault: _, fault = sys.exc_info()[:2] return fault def test_xmlrpc_call(client, rpc_path, method, *params, allow_none=False): """ This makes a method call using a Werkzeug :obj:`Client`, such as the one returned by :meth:`flask.Flask.test_client`. It constructs the method call, makes the request, and then returns the response value or a :obj:`Fault`. :param client: A :obj:`werkzeug.Client`. :param rpc_path: The path to the XML-RPC handler. :param method: The method to call. :param params: The parameters to pass to the method. :param allow_none: Allow to pass None values. """ rv = client.post( rpc_path, data=dump_method_call(method, *params, allow_none=allow_none), content_type='text/xml' ) return load_method_response(rv.data) test_xmlrpc_call.__test__ = False # prevents Nose from collecting it class XMLRPCTester(object): """ This lets you conveniently make method calls using a Werkzeug :obj:`Client`, like the one returned by :meth:`flask.Flask.test_client`. You create it with the :obj:`Client` and the path to the responder, and then you call it with the method and params. :param client: A :obj:`werkzeug.Client`. :param rpc_path: The path to the XML-RPC handler. :param allow_none: Allow to pass None values. """ __test__ = False # prevents Nose from collecting it def __init__(self, client, rpc_path, allow_none=False): self.client = client self.rpc_path = rpc_path self.allow_none = allow_none def call(self, method, *params): """ This calls the client's :obj:`post` method with the responder path, the marshaled method call, and a content type of ``text/xml``. It will return the unmarshaled response or fault. You can just call the instance like a function for the same effect. These two calls are equivalent:: tester.call('hello', 'world') tester('hello', 'world') :param method: The name of the method to call. :param params: The parameters to pass to the method. """ return test_xmlrpc_call(self.client, self.rpc_path, method, *params, allow_none=self.allow_none) def __call__(self, method, *params): return self.call(method, *params)
PypiClean
/MetaCalls-0.0.5-cp310-cp310-manylinux2014_x86_64.whl/metacalls/node_modules/typescript/lib/lib.dom.iterable.d.ts
///////////////////////////// /// Window Iterable APIs ///////////////////////////// interface AudioParam { setValueCurveAtTime(values: Iterable<number>, startTime: number, duration: number): AudioParam; } interface AudioParamMap extends ReadonlyMap<string, AudioParam> { } interface BaseAudioContext { createIIRFilter(feedforward: Iterable<number>, feedback: Iterable<number>): IIRFilterNode; createPeriodicWave(real: Iterable<number>, imag: Iterable<number>, constraints?: PeriodicWaveConstraints): PeriodicWave; } interface CSSRuleList { [Symbol.iterator](): IterableIterator<CSSRule>; } interface CSSStyleDeclaration { [Symbol.iterator](): IterableIterator<string>; } interface Cache { addAll(requests: Iterable<RequestInfo>): Promise<void>; } interface CanvasPath { roundRect(x: number, y: number, w: number, h: number, radii?: number | DOMPointInit | Iterable<number | DOMPointInit>): void; } interface CanvasPathDrawingStyles { setLineDash(segments: Iterable<number>): void; } interface DOMRectList { [Symbol.iterator](): IterableIterator<DOMRect>; } interface DOMStringList { [Symbol.iterator](): IterableIterator<string>; } interface DOMTokenList { [Symbol.iterator](): IterableIterator<string>; entries(): IterableIterator<[number, string]>; keys(): IterableIterator<number>; values(): IterableIterator<string>; } interface DataTransferItemList { [Symbol.iterator](): IterableIterator<DataTransferItem>; } interface EventCounts extends ReadonlyMap<string, number> { } interface FileList { [Symbol.iterator](): IterableIterator<File>; } interface FontFaceSet extends Set<FontFace> { } interface FormData { [Symbol.iterator](): IterableIterator<[string, FormDataEntryValue]>; /** Returns an array of key, value pairs for every entry in the list. */ entries(): IterableIterator<[string, FormDataEntryValue]>; /** Returns a list of keys in the list. */ keys(): IterableIterator<string>; /** Returns a list of values in the list. */ values(): IterableIterator<FormDataEntryValue>; } interface HTMLAllCollection { [Symbol.iterator](): IterableIterator<Element>; } interface HTMLCollectionBase { [Symbol.iterator](): IterableIterator<Element>; } interface HTMLCollectionOf<T extends Element> { [Symbol.iterator](): IterableIterator<T>; } interface HTMLFormElement { [Symbol.iterator](): IterableIterator<Element>; } interface HTMLSelectElement { [Symbol.iterator](): IterableIterator<HTMLOptionElement>; } interface Headers { [Symbol.iterator](): IterableIterator<[string, string]>; /** Returns an iterator allowing to go through all key/value pairs contained in this object. */ entries(): IterableIterator<[string, string]>; /** Returns an iterator allowing to go through all keys of the key/value pairs contained in this object. */ keys(): IterableIterator<string>; /** Returns an iterator allowing to go through all values of the key/value pairs contained in this object. */ values(): IterableIterator<string>; } interface IDBDatabase { /** Returns a new transaction with the given mode ("readonly" or "readwrite") and scope which can be a single object store name or an array of names. */ transaction(storeNames: string | Iterable<string>, mode?: IDBTransactionMode, options?: IDBTransactionOptions): IDBTransaction; } interface IDBObjectStore { /** * Creates a new index in store with the given name, keyPath and options and returns a new IDBIndex. If the keyPath and options define constraints that cannot be satisfied with the data already in store the upgrade transaction will abort with a "ConstraintError" DOMException. * * Throws an "InvalidStateError" DOMException if not called within an upgrade transaction. */ createIndex(name: string, keyPath: string | Iterable<string>, options?: IDBIndexParameters): IDBIndex; } interface MediaKeyStatusMap { [Symbol.iterator](): IterableIterator<[BufferSource, MediaKeyStatus]>; entries(): IterableIterator<[BufferSource, MediaKeyStatus]>; keys(): IterableIterator<BufferSource>; values(): IterableIterator<MediaKeyStatus>; } interface MediaList { [Symbol.iterator](): IterableIterator<string>; } interface MessageEvent<T = any> { /** @deprecated */ initMessageEvent(type: string, bubbles?: boolean, cancelable?: boolean, data?: any, origin?: string, lastEventId?: string, source?: MessageEventSource | null, ports?: Iterable<MessagePort>): void; } interface MimeTypeArray { [Symbol.iterator](): IterableIterator<MimeType>; } interface NamedNodeMap { [Symbol.iterator](): IterableIterator<Attr>; } interface Navigator { /** Available only in secure contexts. */ requestMediaKeySystemAccess(keySystem: string, supportedConfigurations: Iterable<MediaKeySystemConfiguration>): Promise<MediaKeySystemAccess>; vibrate(pattern: Iterable<number>): boolean; } interface NodeList { [Symbol.iterator](): IterableIterator<Node>; /** Returns an array of key, value pairs for every entry in the list. */ entries(): IterableIterator<[number, Node]>; /** Returns an list of keys in the list. */ keys(): IterableIterator<number>; /** Returns an list of values in the list. */ values(): IterableIterator<Node>; } interface NodeListOf<TNode extends Node> { [Symbol.iterator](): IterableIterator<TNode>; /** Returns an array of key, value pairs for every entry in the list. */ entries(): IterableIterator<[number, TNode]>; /** Returns an list of keys in the list. */ keys(): IterableIterator<number>; /** Returns an list of values in the list. */ values(): IterableIterator<TNode>; } interface Plugin { [Symbol.iterator](): IterableIterator<MimeType>; } interface PluginArray { [Symbol.iterator](): IterableIterator<Plugin>; } interface RTCRtpTransceiver { setCodecPreferences(codecs: Iterable<RTCRtpCodecCapability>): void; } interface RTCStatsReport extends ReadonlyMap<string, any> { } interface SVGLengthList { [Symbol.iterator](): IterableIterator<SVGLength>; } interface SVGNumberList { [Symbol.iterator](): IterableIterator<SVGNumber>; } interface SVGPointList { [Symbol.iterator](): IterableIterator<DOMPoint>; } interface SVGStringList { [Symbol.iterator](): IterableIterator<string>; } interface SVGTransformList { [Symbol.iterator](): IterableIterator<SVGTransform>; } interface SourceBufferList { [Symbol.iterator](): IterableIterator<SourceBuffer>; } interface SpeechRecognitionResult { [Symbol.iterator](): IterableIterator<SpeechRecognitionAlternative>; } interface SpeechRecognitionResultList { [Symbol.iterator](): IterableIterator<SpeechRecognitionResult>; } interface StyleSheetList { [Symbol.iterator](): IterableIterator<CSSStyleSheet>; } interface SubtleCrypto { deriveKey(algorithm: AlgorithmIdentifier | EcdhKeyDeriveParams | HkdfParams | Pbkdf2Params, baseKey: CryptoKey, derivedKeyType: AlgorithmIdentifier | AesDerivedKeyParams | HmacImportParams | HkdfParams | Pbkdf2Params, extractable: boolean, keyUsages: Iterable<KeyUsage>): Promise<CryptoKey>; generateKey(algorithm: RsaHashedKeyGenParams | EcKeyGenParams, extractable: boolean, keyUsages: ReadonlyArray<KeyUsage>): Promise<CryptoKeyPair>; generateKey(algorithm: AesKeyGenParams | HmacKeyGenParams | Pbkdf2Params, extractable: boolean, keyUsages: ReadonlyArray<KeyUsage>): Promise<CryptoKey>; generateKey(algorithm: AlgorithmIdentifier, extractable: boolean, keyUsages: Iterable<KeyUsage>): Promise<CryptoKeyPair | CryptoKey>; importKey(format: "jwk", keyData: JsonWebKey, algorithm: AlgorithmIdentifier | RsaHashedImportParams | EcKeyImportParams | HmacImportParams | AesKeyAlgorithm, extractable: boolean, keyUsages: ReadonlyArray<KeyUsage>): Promise<CryptoKey>; importKey(format: Exclude<KeyFormat, "jwk">, keyData: BufferSource, algorithm: AlgorithmIdentifier | RsaHashedImportParams | EcKeyImportParams | HmacImportParams | AesKeyAlgorithm, extractable: boolean, keyUsages: Iterable<KeyUsage>): Promise<CryptoKey>; unwrapKey(format: KeyFormat, wrappedKey: BufferSource, unwrappingKey: CryptoKey, unwrapAlgorithm: AlgorithmIdentifier | RsaOaepParams | AesCtrParams | AesCbcParams | AesGcmParams, unwrappedKeyAlgorithm: AlgorithmIdentifier | RsaHashedImportParams | EcKeyImportParams | HmacImportParams | AesKeyAlgorithm, extractable: boolean, keyUsages: Iterable<KeyUsage>): Promise<CryptoKey>; } interface TextTrackCueList { [Symbol.iterator](): IterableIterator<TextTrackCue>; } interface TextTrackList { [Symbol.iterator](): IterableIterator<TextTrack>; } interface TouchList { [Symbol.iterator](): IterableIterator<Touch>; } interface URLSearchParams { [Symbol.iterator](): IterableIterator<[string, string]>; /** Returns an array of key, value pairs for every entry in the search params. */ entries(): IterableIterator<[string, string]>; /** Returns a list of keys in the search params. */ keys(): IterableIterator<string>; /** Returns a list of values in the search params. */ values(): IterableIterator<string>; } interface WEBGL_draw_buffers { drawBuffersWEBGL(buffers: Iterable<GLenum>): void; } interface WEBGL_multi_draw { multiDrawArraysInstancedWEBGL(mode: GLenum, firstsList: Int32Array | Iterable<GLint>, firstsOffset: GLuint, countsList: Int32Array | Iterable<GLsizei>, countsOffset: GLuint, instanceCountsList: Int32Array | Iterable<GLsizei>, instanceCountsOffset: GLuint, drawcount: GLsizei): void; multiDrawArraysWEBGL(mode: GLenum, firstsList: Int32Array | Iterable<GLint>, firstsOffset: GLuint, countsList: Int32Array | Iterable<GLsizei>, countsOffset: GLuint, drawcount: GLsizei): void; multiDrawElementsInstancedWEBGL(mode: GLenum, countsList: Int32Array | Iterable<GLsizei>, countsOffset: GLuint, type: GLenum, offsetsList: Int32Array | Iterable<GLsizei>, offsetsOffset: GLuint, instanceCountsList: Int32Array | Iterable<GLsizei>, instanceCountsOffset: GLuint, drawcount: GLsizei): void; multiDrawElementsWEBGL(mode: GLenum, countsList: Int32Array | Iterable<GLsizei>, countsOffset: GLuint, type: GLenum, offsetsList: Int32Array | Iterable<GLsizei>, offsetsOffset: GLuint, drawcount: GLsizei): void; } interface WebGL2RenderingContextBase { clearBufferfv(buffer: GLenum, drawbuffer: GLint, values: Iterable<GLfloat>, srcOffset?: GLuint): void; clearBufferiv(buffer: GLenum, drawbuffer: GLint, values: Iterable<GLint>, srcOffset?: GLuint): void; clearBufferuiv(buffer: GLenum, drawbuffer: GLint, values: Iterable<GLuint>, srcOffset?: GLuint): void; drawBuffers(buffers: Iterable<GLenum>): void; getActiveUniforms(program: WebGLProgram, uniformIndices: Iterable<GLuint>, pname: GLenum): any; getUniformIndices(program: WebGLProgram, uniformNames: Iterable<string>): Iterable<GLuint> | null; invalidateFramebuffer(target: GLenum, attachments: Iterable<GLenum>): void; invalidateSubFramebuffer(target: GLenum, attachments: Iterable<GLenum>, x: GLint, y: GLint, width: GLsizei, height: GLsizei): void; transformFeedbackVaryings(program: WebGLProgram, varyings: Iterable<string>, bufferMode: GLenum): void; uniform1uiv(location: WebGLUniformLocation | null, data: Iterable<GLuint>, srcOffset?: GLuint, srcLength?: GLuint): void; uniform2uiv(location: WebGLUniformLocation | null, data: Iterable<GLuint>, srcOffset?: GLuint, srcLength?: GLuint): void; uniform3uiv(location: WebGLUniformLocation | null, data: Iterable<GLuint>, srcOffset?: GLuint, srcLength?: GLuint): void; uniform4uiv(location: WebGLUniformLocation | null, data: Iterable<GLuint>, srcOffset?: GLuint, srcLength?: GLuint): void; uniformMatrix2x3fv(location: WebGLUniformLocation | null, transpose: GLboolean, data: Iterable<GLfloat>, srcOffset?: GLuint, srcLength?: GLuint): void; uniformMatrix2x4fv(location: WebGLUniformLocation | null, transpose: GLboolean, data: Iterable<GLfloat>, srcOffset?: GLuint, srcLength?: GLuint): void; uniformMatrix3x2fv(location: WebGLUniformLocation | null, transpose: GLboolean, data: Iterable<GLfloat>, srcOffset?: GLuint, srcLength?: GLuint): void; uniformMatrix3x4fv(location: WebGLUniformLocation | null, transpose: GLboolean, data: Iterable<GLfloat>, srcOffset?: GLuint, srcLength?: GLuint): void; uniformMatrix4x2fv(location: WebGLUniformLocation | null, transpose: GLboolean, data: Iterable<GLfloat>, srcOffset?: GLuint, srcLength?: GLuint): void; uniformMatrix4x3fv(location: WebGLUniformLocation | null, transpose: GLboolean, data: Iterable<GLfloat>, srcOffset?: GLuint, srcLength?: GLuint): void; vertexAttribI4iv(index: GLuint, values: Iterable<GLint>): void; vertexAttribI4uiv(index: GLuint, values: Iterable<GLuint>): void; } interface WebGL2RenderingContextOverloads { uniform1fv(location: WebGLUniformLocation | null, data: Iterable<GLfloat>, srcOffset?: GLuint, srcLength?: GLuint): void; uniform1iv(location: WebGLUniformLocation | null, data: Iterable<GLint>, srcOffset?: GLuint, srcLength?: GLuint): void; uniform2fv(location: WebGLUniformLocation | null, data: Iterable<GLfloat>, srcOffset?: GLuint, srcLength?: GLuint): void; uniform2iv(location: WebGLUniformLocation | null, data: Iterable<GLint>, srcOffset?: GLuint, srcLength?: GLuint): void; uniform3fv(location: WebGLUniformLocation | null, data: Iterable<GLfloat>, srcOffset?: GLuint, srcLength?: GLuint): void; uniform3iv(location: WebGLUniformLocation | null, data: Iterable<GLint>, srcOffset?: GLuint, srcLength?: GLuint): void; uniform4fv(location: WebGLUniformLocation | null, data: Iterable<GLfloat>, srcOffset?: GLuint, srcLength?: GLuint): void; uniform4iv(location: WebGLUniformLocation | null, data: Iterable<GLint>, srcOffset?: GLuint, srcLength?: GLuint): void; uniformMatrix2fv(location: WebGLUniformLocation | null, transpose: GLboolean, data: Iterable<GLfloat>, srcOffset?: GLuint, srcLength?: GLuint): void; uniformMatrix3fv(location: WebGLUniformLocation | null, transpose: GLboolean, data: Iterable<GLfloat>, srcOffset?: GLuint, srcLength?: GLuint): void; uniformMatrix4fv(location: WebGLUniformLocation | null, transpose: GLboolean, data: Iterable<GLfloat>, srcOffset?: GLuint, srcLength?: GLuint): void; } interface WebGLRenderingContextBase { vertexAttrib1fv(index: GLuint, values: Iterable<GLfloat>): void; vertexAttrib2fv(index: GLuint, values: Iterable<GLfloat>): void; vertexAttrib3fv(index: GLuint, values: Iterable<GLfloat>): void; vertexAttrib4fv(index: GLuint, values: Iterable<GLfloat>): void; } interface WebGLRenderingContextOverloads { uniform1fv(location: WebGLUniformLocation | null, v: Iterable<GLfloat>): void; uniform1iv(location: WebGLUniformLocation | null, v: Iterable<GLint>): void; uniform2fv(location: WebGLUniformLocation | null, v: Iterable<GLfloat>): void; uniform2iv(location: WebGLUniformLocation | null, v: Iterable<GLint>): void; uniform3fv(location: WebGLUniformLocation | null, v: Iterable<GLfloat>): void; uniform3iv(location: WebGLUniformLocation | null, v: Iterable<GLint>): void; uniform4fv(location: WebGLUniformLocation | null, v: Iterable<GLfloat>): void; uniform4iv(location: WebGLUniformLocation | null, v: Iterable<GLint>): void; uniformMatrix2fv(location: WebGLUniformLocation | null, transpose: GLboolean, value: Iterable<GLfloat>): void; uniformMatrix3fv(location: WebGLUniformLocation | null, transpose: GLboolean, value: Iterable<GLfloat>): void; uniformMatrix4fv(location: WebGLUniformLocation | null, transpose: GLboolean, value: Iterable<GLfloat>): void; }
PypiClean
/DI_engine-0.4.9-py3-none-any.whl/ding/example/sqil_continuous.py
from ditk import logging import torch from ding.model import QAC from ding.policy import SQILSACPolicy from ding.envs import BaseEnvManagerV2 from ding.data import DequeBuffer from ding.config import compile_config from ding.framework import task from ding.framework.context import OnlineRLContext from ding.framework.middleware import OffPolicyLearner, StepCollector, interaction_evaluator, \ CkptSaver, sqil_data_pusher, termination_checker from ding.utils import set_pkg_seed from dizoo.classic_control.pendulum.envs.pendulum_env import PendulumEnv from dizoo.classic_control.pendulum.config.pendulum_sac_config import main_config as ex_main_config from dizoo.classic_control.pendulum.config.pendulum_sac_config import create_config as ex_create_config from dizoo.classic_control.pendulum.config.pendulum_sqil_sac_config import main_config, create_config def main(): logging.getLogger().setLevel(logging.INFO) cfg = compile_config(main_config, create_cfg=create_config, auto=True) expert_cfg = compile_config(ex_main_config, create_cfg=ex_create_config, auto=True) # expert config must have the same `n_sample`. The line below ensure we do not need to modify the expert configs expert_cfg.policy.collect.n_sample = cfg.policy.collect.n_sample with task.start(async_mode=False, ctx=OnlineRLContext()): collector_env = BaseEnvManagerV2( env_fn=[lambda: PendulumEnv(cfg.env) for _ in range(cfg.env.collector_env_num)], cfg=cfg.env.manager ) expert_collector_env = BaseEnvManagerV2( env_fn=[lambda: PendulumEnv(cfg.env) for _ in range(cfg.env.collector_env_num)], cfg=cfg.env.manager ) evaluator_env = BaseEnvManagerV2( env_fn=[lambda: PendulumEnv(cfg.env) for _ in range(cfg.env.evaluator_env_num)], cfg=cfg.env.manager ) set_pkg_seed(cfg.seed, use_cuda=cfg.policy.cuda) model = QAC(**cfg.policy.model) expert_model = QAC(**cfg.policy.model) buffer_ = DequeBuffer(size=cfg.policy.other.replay_buffer.replay_buffer_size) expert_buffer = DequeBuffer(size=cfg.policy.other.replay_buffer.replay_buffer_size) policy = SQILSACPolicy(cfg.policy, model=model) expert_policy = SQILSACPolicy(expert_cfg.policy, model=expert_model) state_dict = torch.load(cfg.policy.collect.model_path, map_location='cpu') expert_policy.collect_mode.load_state_dict(state_dict) task.use(interaction_evaluator(cfg, policy.eval_mode, evaluator_env)) task.use( StepCollector(cfg, policy.collect_mode, collector_env, random_collect_size=cfg.policy.random_collect_size) ) # agent data collector task.use(sqil_data_pusher(cfg, buffer_, expert=False)) task.use( StepCollector( cfg, expert_policy.collect_mode, expert_collector_env, random_collect_size=cfg.policy.expert_random_collect_size ) ) # expert data collector task.use(sqil_data_pusher(cfg, expert_buffer, expert=True)) task.use(OffPolicyLearner(cfg, policy.learn_mode, [(buffer_, 0.5), (expert_buffer, 0.5)])) task.use(CkptSaver(policy, cfg.exp_name, train_freq=100)) task.use(termination_checker(max_train_iter=10000)) task.run() if __name__ == "__main__": main()
PypiClean
/GeoNode-3.2.0-py3-none-any.whl/geonode/static/geonode/js/ol-2.13/lib/OpenLayers/Lang/br.js
* @requires OpenLayers/Lang.js */ /** * Namespace: OpenLayers.Lang["br"] * Dictionary for Brezhoneg. Keys for entries are used in calls to * <OpenLayers.Lang.translate>. Entry bodies are normal strings or * strings formatted for use with <OpenLayers.String.format> calls. */ OpenLayers.Lang["br"] = OpenLayers.Util.applyDefaults({ 'unhandledRequest': "Distro evel reked anveret ${statusText}", 'Permalink': "Peurliamm", 'Overlays': "Gwiskadoù", 'Base Layer': "Gwiskad diazez", 'noFID': "N\'haller ket hizivaat un elfenn ma n\'eus ket a niverenn-anaout (FID) eviti.", 'browserNotSupported': "N\'eo ket skoret an daskor vektorel gant ho merdeer. Setu aze an daskorerioù skoret evit ar poent :\n${renderers}", 'minZoomLevelError': "Ne zleer implijout ar perzh minZoomLevel nemet evit gwiskadoù FixedZoomLevels-descendent. Ar fed ma wiria ar gwiskad WHS-se hag-eñ ez eus eus minZoomLevel zo un aspadenn gozh. Koulskoude n\'omp ket evit e ziverkañ kuit da derriñ arloadoù diazezet war OL a c\'hallfe bezañ stag outañ. Setu perak eo dispredet -- Lamet kuit e vo ar gwiriañ minZoomLevel a-is er stumm 3.0. Ober gant an arventennoù bihanañ/brasañ evel deskrivet amañ e plas : http://trac.openlayers.org/wiki/SettingZoomLevels", 'commitSuccess': "Treuzgread WFS : MAT EO ${response}", 'commitFailed': "Treuzgread WFS Transaction: C\'HWITET ${response}", 'googleWarning': "N\'eus ket bet gallet kargañ ar gwiskad Google ent reizh.\x3cbr\x3e\x3cbr\x3eEvit en em zizober eus ar c\'hemenn-mañ, dibabit ur BaseLayer nevez en diuzer gwiskadoù er c\'horn dehoù el laez.\x3cbr\x3e\x3cbr\x3eSur a-walc\'h eo peogwir n\'eo ket bet ensoc\'het levraoueg Google Maps pe neuze ne glot ket an alc\'hwez API gant ho lec\'hienn.\x3cbr\x3e\x3cbr\x3eDiorroerien : Evit reizhañ an dra-se, \x3ca href=\'http://trac.openlayers.org/wiki/Google\' target=\'_blank\'\x3eclick here\x3c/a\x3e", 'getLayerWarning': "N\'haller ket kargañ ar gwiskad ${layerType} ent reizh.\x3cbr\x3e\x3cbr\x3eEvit en em zizober eus ar c\'hemenn-mañ, dibabit ur BaseLayer nevez en diuzer gwiskadoù er c\'horn dehoù el laez.\x3cbr\x3e\x3cbr\x3eSur a-walc\'h eo peogwir n\'eo ket bet ensoc\'het mat al levraoueg ${layerLib}.\x3cbr\x3e\x3cbr\x3eDiorroerien : Evit gouzout penaos reizhañ an dra-se, \x3ca href=\'http://trac.openlayers.org/wiki/${layerLib}\' target=\'_blank\'\x3eclick here\x3c/a\x3e", 'Scale = 1 : ${scaleDenom}': "Skeul = 1 : ${scaleDenom}", 'W': "K", 'E': "R", 'N': "N", 'S': "S", 'reprojectDeprecated': "Emaoc\'h oc\'h implijout an dibarzh \'reproject\' war ar gwiskad ${layerName}. Dispredet eo an dibarzh-mañ : bet eo hag e talveze da ziskwel roadennoù war-c\'horre kartennoù diazez kenwerzhel, un dra hag a c\'haller ober bremañ gant an arc\'hwel dre skor banndres boullek Mercator. Muioc\'h a ditouroù a c\'haller da gaout war http://trac.openlayers.org/wiki/SphericalMercator.", 'methodDeprecated': "Dispredet eo an daore-se ha tennet e vo kuit eus ar stumm 3.0. Grit gant ${newMethod} e plas." });
PypiClean
/KratosSwimmingDEMApplication-9.2.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/KratosMultiphysics/SwimmingDEMApplication/GentleInjectionAndErasureTestFactory.py
import os # This factory encodes to different tests consisting in a cubic domain of fluid # with a few particles submerged in it. # The first test involves a DEM inlet and it checks that the 'gentle' injection of # particles improves the convergence of the fluid solver. In this case the bounding # box (which is responsible for deleating particles) is inactive. # The second test only involves particles initially in the domain (no DEM inlet), # while the bounding box is active and set to eliminate particles that cross a certain # height within the fluid domain. Here the improvement of the fluid convergence due to # gentle elimination of particles is checked. # In both cases the problem is run twice: one time without the 'gentle' technique and # the other with it. It is checked that the number of nonlinear iterations is, on average, # less for the latter case. # Importing the Kratos Library import KratosMultiphysics as Kratos import numpy as np # Import KratosUnittest import KratosMultiphysics.KratosUnittest as KratosUnittest import KratosMultiphysics.SwimmingDEMApplication as SDEM from KratosMultiphysics.SwimmingDEMApplication.swimming_DEM_analysis import SwimmingDEMAnalysis # This utility will control the execution scope debug_mode=False class controlledExecutionScope: def __init__(self, scope): self.currentPath = os.getcwd() self.scope = scope def __enter__(self): os.chdir(self.scope) def __exit__(self, type, value, traceback): os.chdir(self.currentPath) class GentleInjectionAndErasureTestFactory(KratosUnittest.TestCase): def setUp(self): with open(self.file_parameters_harsh, 'r') as parameter_file: self.parameters_harsh = Kratos.Parameters(parameter_file.read()) with open(self.file_parameters_gentle,'r') as parameter_file: self.parameters_gentle = Kratos.Parameters(parameter_file.read()) fluid_solver_settings = self.parameters_harsh['fluid_parameters']['solver_settings'] self.max_nonlinear_iterations = fluid_solver_settings['maximum_iterations'].GetInt() if not debug_mode: for parameters in {self.parameters_harsh, self.parameters_gentle}: parameters['do_print_results_option'].SetBool(False) parameters['fluid_parameters']['output_processes'] = Kratos.Parameters('''{}''') # Create Model model_harsh = Kratos.Model() model_gentle = Kratos.Model() self.test_harsh_injection = GentleAnalysis(model_harsh, self.parameters_harsh) self.test_gentle_injection = GentleAnalysis(model_gentle, self.parameters_gentle) self.test_harsh_injection.name = 'harsh' self.test_gentle_injection.name = 'gentle' def test_execution(self): with controlledExecutionScope(os.path.dirname(os.path.realpath(__file__))): self.test_harsh_injection.Run() self.test_gentle_injection.Run() times = np.array(self.test_harsh_injection.times) n_iterations_harsh = np.array(self.test_harsh_injection.n_iterations) n_iterations_gentle = np.array(self.test_gentle_injection.n_iterations) # Check that the number of iterations needed with gentle injection is on average less assert(sum(n_iterations_harsh) > sum(n_iterations_gentle)) if debug_mode: import matplotlib.pyplot as plt parameter_value, parameter_name = self.GetGentleParameterValueAndName(self.parameters_gentle) plt.plot(times, n_iterations_harsh, label='harsh (' + parameter_name + '=' + str(0.0) + ')') plt.plot(times, n_iterations_gentle, label='gentle (' +parameter_name + '=' + str(round(parameter_value, 2)) + ')') plt.xlabel('time (s)') plt.ylabel('nonlinear iterations') ax = plt.gca() ax.set_ylim([0, self.max_nonlinear_iterations]) plt.legend() plt.savefig('nonlinear_iterations' + type(self).__name__ + '.pdf') plt.close() class GentleAnalysis(SwimmingDEMAnalysis): def __init__(self, model, parameters=Kratos.Parameters("{}")): super().__init__(model, parameters) self.n_iterations = [] self.times = [] self._GetDEMAnalysis().mdpas_folder_path = os.path.join(self._GetDEMAnalysis().main_path, 'fluid_convergence_tests/') self.problem_name = parameters def FinalizeSolutionStep(self): time = self.fluid_model_part.ProcessInfo[Kratos.TIME] n_iterations = self.fluid_model_part.ProcessInfo[Kratos.NL_ITERATION_NUMBER] self.times.append(time) self.n_iterations.append(n_iterations) super(GentleAnalysis, self).FinalizeSolutionStep()
PypiClean
/FlaskCms-0.0.4.tar.gz/FlaskCms-0.0.4/flask_cms/static/js/ckeditor/lang/ku.js
/* Copyright (c) 2003-2013, CKSource - Frederico Knabben. All rights reserved. For licensing, see LICENSE.html or http://ckeditor.com/license */ CKEDITOR.lang['ku']={"dir":"rtl","editor":"سەرنووسەی دەقی بە پیت","common":{"editorHelp":"کلیکی ALT لەگەڵ 0 بکه بۆ یارمەتی","browseServer":"هێنانی ڕاژە","url":"ناونیشانی بەستەر","protocol":"پڕۆتۆکۆڵ","upload":"بارکردن","uploadSubmit":"ناردنی بۆ ڕاژە","image":"وێنە","flash":"فلاش","form":"داڕشتە","checkbox":"خانەی نیشانکردن","radio":"جێگرەوەی دوگمە","textField":"خانەی دەق","textarea":"ڕووبەری دەق","hiddenField":"شاردنەوی خانە","button":"دوگمە","select":"هەڵبژاردەی خانە","imageButton":"دوگمەی وێنە","notSet":"<هیچ دانەدراوە>","id":"ناسنامە","name":"ناو","langDir":"ئاراستەی زمان","langDirLtr":"چەپ بۆ ڕاست (LTR)","langDirRtl":"ڕاست بۆ چەپ (RTL)","langCode":"هێمای زمان","longDescr":"پێناسەی درێژی بەستەر","cssClass":"شێوازی چینی پهڕە","advisoryTitle":"ڕاوێژکاری سەردێڕ","cssStyle":"شێواز","ok":"باشە","cancel":"هەڵوەشاندن","close":"داخستن","preview":"پێشبینین","resize":"گۆڕینی ئەندازە","generalTab":"گشتی","advancedTab":"پەرەسەندوو","validateNumberFailed":"ئەم نرخە ژمارە نیە، تکایە نرخێکی ژمارە بنووسە.","confirmNewPage":"سەرجەم گۆڕانکاریەکان و پێکهاتەکانی ناووەوە لەدەست دەدەی گەر بێتوو پاشکەوتی نەکەی یەکەم جار، تۆ هەر دڵنیایی لەکردنەوەی پەنجەرەکی نوێ؟","confirmCancel":"هەندێك هەڵبژاردە گۆڕدراوە. تۆ دڵنیایی لە داخستنی ئەم دیالۆگە؟","options":"هەڵبژاردەکان","target":"ئامانج","targetNew":"پەنجەرەیەکی نوێ (_blank)","targetTop":"لووتکەی پەنجەرە (_top)","targetSelf":"لەهەمان پەنجەرە (_self)","targetParent":"پەنجەرەی باوان (_parent)","langDirLTR":"چەپ بۆ ڕاست (LTR)","langDirRTL":"ڕاست بۆ چەپ (RTL)","styles":"شێواز","cssClasses":"شێوازی چینی پەڕە","width":"پانی","height":"درێژی","align":"ڕێککەرەوە","alignLeft":"چەپ","alignRight":"ڕاست","alignCenter":"ناوەڕاست","alignTop":"سەرەوە","alignMiddle":"ناوەند","alignBottom":"ژێرەوە","invalidValue":"نرخێکی نادرووست.","invalidHeight":"درێژی دەبێت ژمارە بێت.","invalidWidth":"پانی دەبێت ژمارە بێت.","invalidCssLength":"ئەم نرخەی دراوە بۆ خانەی \"%1\" دەبێت ژمارەکی درووست بێت یان بێ ناونیشانی ئامرازی (px, %, in, cm, mm, em, ex, pt, یان pc).","invalidHtmlLength":"ئەم نرخەی دراوە بۆ خانەی \"%1\" دەبێت ژمارەکی درووست بێت یان بێ ناونیشانی ئامرازی HTML (px یان %).","invalidInlineStyle":"دانەی نرخی شێوازی ناوهێڵ دەبێت پێکهاتبێت لەیەك یان زیاتری داڕشتە \"ناو : نرخ\", جیاکردنەوەی بە فاریزە و خاڵ","cssLengthTooltip":"ژمارەیەك بنووسه بۆ نرخی piksel یان ئامرازێکی درووستی CSS (px, %, in, cm, mm, em, ex, pt, یان pc).","unavailable":"%1<span class=\"cke_accessibility\">, ئامادە نیە</span>"},"about":{"copy":"مافی لەبەرگەرتنەوەی &copy; $1. گشتی پارێزراوه.","dlgTitle":"دەربارەی CKEditor","help":"سەیری $1 بکه بۆ یارمەتی.","moreInfo":"بۆ زانیاری زیاتری مۆڵەت, تکایه سەردانی ماڵپەڕەکەمان بکه:","title":"دەربارەی CKEditor","userGuide":"ڕێپیشاندەری CKEditors"},"basicstyles":{"bold":"قەڵەو","italic":"لار","strike":"لێدان","subscript":"ژێرنووس","superscript":"سەرنووس","underline":"ژێرهێڵ"},"bidi":{"ltr":"ئاراستەی نووسە لە چەپ بۆ ڕاست","rtl":"ئاراستەی نووسە لە ڕاست بۆ چەپ"},"blockquote":{"toolbar":"بەربەستکردنی ووتەی وەرگیراو"},"clipboard":{"copy":"لەبەرگرتنەوە","copyError":"پارێزی وێبگەڕەکەت ڕێگەنادات بەسەرنووسەکە لە لکاندنی دەقی خۆکارارنە. تکایە لەبری ئەمە ئەم فەرمانە بەکاربهێنە بەداگرتنی کلیلی (Ctrl/Cmd+C).","cut":"بڕین","cutError":"پارێزی وێبگەڕەکەت ڕێگەنادات بە سەرنووسەکە لەبڕینی خۆکارانە. تکایە لەبری ئەمە ئەم فەرمانە بەکاربهێنە بەداگرتنی کلیلی (Ctrl/Cmd+X).","paste":"لکاندن","pasteArea":"ناوچەی لکاندن","pasteMsg":"تکایە بیلکێنە لەناوەوەی ئەم سنوقە لەڕێی تەختەکلیلەکەت بە بەکارهێنانی کلیلی (<STRONG>Ctrl/Cmd+V</STRONG>) دووای کلیکی باشە بکە.","securityMsg":"بەهۆی شێوەپێدانی پارێزی وێبگەڕەکەت، سەرنووسەکه ناتوانێت دەستبگەیەنێت بەهەڵگیراوەکە ڕاستەوخۆ. بۆیه پێویسته دووباره بیلکێنیت لەم پەنجەرەیه.","title":"لکاندن"},"colorbutton":{"auto":"خۆکار","bgColorTitle":"ڕەنگی پاشبنەما","colors":{"000":"ڕەش","800000":"سۆرو ماڕوونی","8B4513":"ماڕوونی","2F4F4F":"سەوزی تاریك","008080":"سەوز و شین","000080":"شینی تۆخ","4B0082":"مۆری تۆخ","696969":"ڕەساسی تۆخ","B22222":"سۆری تۆخ","A52A2A":"قاوەیی","DAA520":"قاوەیی بریسکەدار","006400":"سەوزی تۆخ","40E0D0":"شینی ناتۆخی بریسکەدار","0000CD":"شینی مامناوەند","800080":"پەمبەیی","808080":"ڕەساسی","F00":"سۆر","FF8C00":"نارەنجی تۆخ","FFD700":"زەرد","008000":"سەوز","0FF":"شینی ئاسمانی","00F":"شین","EE82EE":"پەمەیی","A9A9A9":"ڕەساسی ناتۆخ","FFA07A":"نارەنجی ناتۆخ","FFA500":"نارەنجی","FFFF00":"زەرد","00FF00":"سەوز","AFEEEE":"شینی ناتۆخ","ADD8E6":"شینی زۆر ناتۆخ","DDA0DD":"پەمەیی ناتۆخ","D3D3D3":"ڕەساسی بریسکەدار","FFF0F5":"جەرگی زۆر ناتۆخ","FAEBD7":"جەرگی ناتۆخ","FFFFE0":"سپی ناتۆخ","F0FFF0":"هەنگوینی ناتۆخ","F0FFFF":"شینێکی زۆر ناتۆخ","F0F8FF":"شینێکی ئاسمانی زۆر ناتۆخ","E6E6FA":"شیری","FFF":"سپی"},"more":"ڕەنگی زیاتر...","panelTitle":"ڕەنگەکان","textColorTitle":"ڕەنگی دەق"},"colordialog":{"clear":"پاکیکەوە","highlight":"نیشانکردن","options":"هەڵبژاردەی ڕەنگەکان","selected":"ڕەنگی هەڵبژێردراو","title":"هەڵبژاردنی ڕەنگ"},"templates":{"button":"ڕووکار","emptyListMsg":"(هیچ ڕووکارێك دیارینەکراوە)","insertOption":"لە شوێن دانانی ئەم پێکهاتانەی ئێستا","options":"هەڵبژاردەکانی ڕووکار","selectPromptMsg":"ڕووکارێك هەڵبژێره بۆ کردنەوەی له سەرنووسەر:","title":"پێکهاتەی ڕووکار"},"contextmenu":{"options":"هەڵبژاردەی لیستەی کلیکی دەستی ڕاست"},"div":{"IdInputLabel":"ناسنامە","advisoryTitleInputLabel":"سەردێڕ","cssClassInputLabel":"شێوازی چینی پەڕه","edit":"چاکسازی Div","inlineStyleInputLabel":"شێوازی ناوهێڵ","langDirLTRLabel":"چەپ بۆ ڕاست (LTR)","langDirLabel":"ئاراستەی زمان","langDirRTLLabel":"ڕاست بۆ چەپ (RTL)","languageCodeInputLabel":"هێمای زمان","remove":"لابردنی Div","styleSelectLabel":"شێواز","title":"دروستکردنی لەخۆگری Div","toolbar":"دروستکردنی لەخۆگری Div"},"toolbar":{"toolbarCollapse":"شاردنەوی هێڵی تووڵامراز","toolbarExpand":"نیشاندانی هێڵی تووڵامراز","toolbarGroups":{"document":"پەڕه","clipboard":"بڕین/پووچکردنەوە","editing":"چاکسازی","forms":"داڕشتە","basicstyles":"شێوازی بنچینەیی","paragraph":"بڕگە","links":"بەستەر","insert":"خستنە ناو","styles":"شێواز","colors":"ڕەنگەکان","tools":"ئامرازەکان"},"toolbars":"تووڵامرازی دەسکاریکەر"},"elementspath":{"eleLabel":"ڕێڕەوی توخمەکان","eleTitle":"%1 توخم"},"find":{"find":"گەڕان","findOptions":"هەڵبژاردەکانی گەڕان","findWhat":"گەڕان بەدووای:","matchCase":"جیاکردنەوه لەنێوان پیتی گەورەو بچووك","matchCyclic":"گەڕان لەهەموو پەڕەکه","matchWord":"تەنەا هەموو وشەکه","notFoundMsg":"هیچ دەقه گەڕانێك نەدۆزراوه.","replace":"لەبریدانان","replaceAll":"لەبریدانانی هەمووی","replaceSuccessMsg":" پێشهاتە(ی) لەبری دانرا. %1","replaceWith":"لەبریدانان به:","title":"گەڕان و لەبریدانان"},"fakeobjects":{"anchor":"لەنگەر","flash":"فلاش","hiddenfield":"شاردنەوەی خانه","iframe":"لەچوارچێوە","unknown":"بەرکارێکی نەناسراو"},"flash":{"access":"دەستپێگەیشتنی نووسراو","accessAlways":"هەمیشه","accessNever":"هەرگیز","accessSameDomain":"هەمان دۆمەین","alignAbsBottom":"له ژێرەوه","alignAbsMiddle":"لەناوەند","alignBaseline":"هێڵەبنەڕەت","alignTextTop":"دەق لەسەرەوه","bgcolor":"ڕەنگی پاشبنەما","chkFull":"ڕێپێدان بە پڕی شاشه","chkLoop":"گرێ","chkMenu":"چالاککردنی لیستەی فلاش","chkPlay":"پێکردنی یان لێدانی خۆکار","flashvars":"گۆڕاوەکان بۆ فلاش","hSpace":"بۆشایی ئاسۆیی","properties":"خاسیەتی فلاش","propertiesTab":"خاسیەت","quality":"جۆرایەتی","qualityAutoHigh":"بەرزی خۆکار","qualityAutoLow":"نزمی خۆکار","qualityBest":"باشترین","qualityHigh":"بەرزی","qualityLow":"نزم","qualityMedium":"مامناوەند","scale":"پێوانه","scaleAll":"نیشاندانی هەموو","scaleFit":"بەوردی بگونجێت","scaleNoBorder":"بێ پەراوێز","title":"خاسیەتی فلاش","vSpace":"بۆشایی ئەستونی","validateHSpace":"بۆشایی ئاسۆیی دەبێت ژمارە بێت.","validateSrc":"ناونیشانی بەستەر نابێت خاڵی بێت","validateVSpace":"بۆشایی ئەستونی دەبێت ژماره بێت.","windowMode":"شێوازی پەنجەره","windowModeOpaque":"ناڕوون","windowModeTransparent":"ڕۆشن","windowModeWindow":"پەنجەره"},"font":{"fontSize":{"label":"گەورەیی","voiceLabel":"گەورەیی فۆنت","panelTitle":"گەورەیی فۆنت"},"label":"فۆنت","panelTitle":"ناوی فۆنت","voiceLabel":"فۆنت"},"forms":{"button":{"title":"خاسیەتی دوگمە","text":"(نرخی) دەق","type":"جۆر","typeBtn":"دوگمە","typeSbm":"بنێرە","typeRst":"ڕێکخستنەوە"},"checkboxAndRadio":{"checkboxTitle":"خاسیەتی چووارگۆشی پشکنین","radioTitle":"خاسیەتی جێگرەوەی دوگمە","value":"نرخ","selected":"هەڵبژاردرا"},"form":{"title":"خاسیەتی داڕشتە","menu":"خاسیەتی داڕشتە","action":"کردار","method":"ڕێگە","encoding":"بەکۆدکەر"},"hidden":{"title":"خاسیەتی خانەی شاردراوە","name":"ناو","value":"نرخ"},"select":{"title":"هەڵبژاردەی خاسیەتی خانە","selectInfo":"زانیاری","opAvail":"هەڵبژاردەی لەبەردەستدابوون","value":"نرخ","size":"گەورەیی","lines":"هێڵەکان","chkMulti":"ڕێدان بەفره هەڵبژارده","opText":"دەق","opValue":"نرخ","btnAdd":"زیادکردن","btnModify":"گۆڕانکاری","btnUp":"سەرەوه","btnDown":"خوارەوە","btnSetValue":"دابنێ وەك نرخێکی هەڵبژێردراو","btnDelete":"سڕینەوه"},"textarea":{"title":"خاسیەتی ڕووبەری دەق","cols":"ستوونەکان","rows":"ڕیزەکان"},"textfield":{"title":"خاسیەتی خانەی دەق","name":"ناو","value":"نرخ","charWidth":"پانی نووسە","maxChars":"ئەوپەڕی نووسە","type":"جۆر","typeText":"دەق","typePass":"پێپەڕەوشە","typeEmail":"ئیمەیل","typeSearch":"گەڕان","typeTel":"ژمارەی تەلەفۆن","typeUrl":"ناونیشانی بەستەر"}},"format":{"label":"ڕازاندنەوە","panelTitle":"بەشی ڕازاندنەوه","tag_address":"ناونیشان","tag_div":"(DIV)-ی ئاسایی","tag_h1":"سەرنووسەی ١","tag_h2":"سەرنووسەی ٢","tag_h3":"سەرنووسەی ٣","tag_h4":"سەرنووسەی ٤","tag_h5":"سەرنووسەی ٥","tag_h6":"سەرنووسەی ٦","tag_p":"ئاسایی","tag_pre":"شێوازکراو"},"horizontalrule":{"toolbar":"دانانی هێلی ئاسۆیی"},"iframe":{"border":"نیشاندانی لاکێشه بە چوواردەوری چووارچێوە","noUrl":"تکایه ناونیشانی بەستەر بنووسه بۆ چووارچێوه","scrolling":"چالاککردنی هاتووچۆپێکردن","title":"دیالۆگی چووارچێوه","toolbar":"چووارچێوه"},"image":{"alertUrl":"تکایه ناونیشانی بەستەری وێنه بنووسه","alt":"جێگرەوەی دەق","border":"پەراوێز","btnUpload":"ناردنی بۆ ڕاژه","button2Img":"تۆ دەتەوێت دوگمەی وێنەی دیاریکراو بگۆڕیت بۆ وێنەیەکی ئاسایی؟","hSpace":"بۆشایی ئاسۆیی","img2Button":"تۆ دەتەوێت وێنەی دیاریکراو بگۆڕیت بۆ دوگمەی وێنه؟","infoTab":"زانیاری وێنه","linkTab":"بەستەر","lockRatio":"داخستنی ڕێژه","menu":"خاسیەتی وێنه","resetSize":"ڕێکخستنەوەی قەباره","title":"خاسیەتی وێنه","titleButton":"خاسیەتی دوگمەی وێنه","upload":"بارکردن","urlMissing":"سەرچاوەی بەستەری وێنه بزره","vSpace":"بۆشایی ئەستونی","validateBorder":"پەراوێز دەبێت بەتەواوی تەنها ژماره بێت.","validateHSpace":"بۆشایی ئاسۆیی دەبێت بەتەواوی تەنها ژمارە بێت.","validateVSpace":"بۆشایی ئەستونی دەبێت بەتەواوی تەنها ژماره بێت."},"indent":{"indent":"زیادکردنی بۆشایی","outdent":"کەمکردنەوەی بۆشایی"},"smiley":{"options":"هەڵبژاردەی زەردەخەنه","title":"دانانی زەردەخەنەیەك","toolbar":"زەردەخەنه"},"justify":{"block":"هاوستوونی","center":"ناوەڕاست","left":"بەهێڵ کردنی چەپ","right":"بەهێڵ کردنی ڕاست"},"link":{"acccessKey":"کلیلی دەستپێگەیشتن","advanced":"پێشکەوتوو","advisoryContentType":"جۆری ناوەڕۆکی ڕاویژکار","advisoryTitle":"ڕاوێژکاری سەردێڕ","anchor":{"toolbar":"دانان/چاکسازی لەنگەر","menu":"چاکسازی لەنگەر","title":"خاسیەتی لەنگەر","name":"ناوی لەنگەر","errorName":"تکایه ناوی لەنگەر بنووسه","remove":"لابردنی لەنگەر"},"anchorId":"بەپێی ناسنامەی توخم","anchorName":"بەپێی ناوی لەنگەر","charset":"بەستەری سەرچاوەی نووسە","cssClasses":"شێوازی چینی پەڕه","emailAddress":"ناونیشانی ئیمەیل","emailBody":"ناوەڕۆکی نامە","emailSubject":"بابەتی نامە","id":"ناسنامە","info":"زانیاری بەستەر","langCode":"هێمای زمان","langDir":"ئاراستەی زمان","langDirLTR":"چەپ بۆ ڕاست (LTR)","langDirRTL":"ڕاست بۆ چەپ (RTL)","menu":"چاکسازی بەستەر","name":"ناو","noAnchors":"(هیچ جۆرێکی لەنگەر ئامادە نیە لەم پەڕەیه)","noEmail":"تکایە ناونیشانی ئیمەیل بنووسە","noUrl":"تکایە ناونیشانی بەستەر بنووسە","other":"<هیتر>","popupDependent":"پێوەبەستراو (Netscape)","popupFeatures":"خاسیەتی پەنجەرەی سەرهەڵدەر","popupFullScreen":"پڕ بەپڕی شاشە (IE)","popupLeft":"جێگای چەپ","popupLocationBar":"هێڵی ناونیشانی بەستەر","popupMenuBar":"هێڵی لیسته","popupResizable":"توانای گۆڕینی قەباره","popupScrollBars":"هێڵی هاتووچۆپێکردن","popupStatusBar":"هێڵی دۆخ","popupToolbar":"هێڵی تووڵامراز","popupTop":"جێگای سەرەوە","rel":"پەیوەندی","selectAnchor":"هەڵبژاردنی لەنگەرێك","styles":"شێواز","tabIndex":"بازدەری تابی ئیندێکس","target":"ئامانج","targetFrame":"<چووارچێوە>","targetFrameName":"ناوی ئامانجی چووارچێوە","targetPopup":"<پەنجەرەی سەرهەڵدەر>","targetPopupName":"ناوی پەنجەرەی سەرهەڵدەر","title":"بەستەر","toAnchor":"بەستەر بۆ لەنگەر له دەق","toEmail":"ئیمەیل","toUrl":"ناونیشانی بەستەر","toolbar":"دانان/ڕێکخستنی بەستەر","type":"جۆری بەستەر","unlink":"لابردنی بەستەر","upload":"بارکردن"},"list":{"bulletedlist":"دانان/لابردنی خاڵی لیست","numberedlist":"دانان/لابردنی ژمارەی لیست"},"liststyle":{"armenian":"ئاراستەی ژمارەی ئەرمەنی","bulletedTitle":"خاسیەتی لیستی خاڵی","circle":"بازنه","decimal":"ژمارە (1, 2, 3, وە هیتر.)","decimalLeadingZero":"ژمارە سفڕی لەپێشەوه (01, 02, 03, وە هیتر.)","disc":"پەپکە","georgian":"ئاراستەی ژمارەی جۆڕجی (an, ban, gan, وە هیتر.)","lowerAlpha":"ئەلفابێی بچووك (a, b, c, d, e, وە هیتر.)","lowerGreek":"یۆنانی بچووك (alpha, beta, gamma, وە هیتر.)","lowerRoman":"ژمارەی ڕۆمی بچووك (i, ii, iii, iv, v, وە هیتر.)","none":"هیچ","notset":"<دانەندراوه>","numberedTitle":"خاسیەتی لیستی ژمارەیی","square":"چووراگۆشە","start":"دەستپێکردن","type":"جۆر","upperAlpha":"ئەلفابێی گەوره (A, B, C, D, E, وە هیتر.)","upperRoman":"ژمارەی ڕۆمی گەوره (I, II, III, IV, V, وە هیتر.)","validateStartNumber":"دەستپێکەری لیستی ژمارەیی دەبێت تەنها ژمارە بێت."},"magicline":{"title":"بڕگە لێرە دابنێ"},"maximize":{"maximize":"ئەوپەڕی گەورەیی","minimize":"ئەوپەڕی بچووکی"},"newpage":{"toolbar":"پەڕەیەکی نوێ"},"pagebreak":{"alt":"پشووی پەڕە","toolbar":"دانانی پشووی پەڕە بۆ چاپکردن"},"pastetext":{"button":"لکاندنی وەك دەقی ڕوون","title":"لکاندنی وەك دەقی ڕوون"},"pastefromword":{"confirmCleanup":"ئەم دەقەی بەتەمای بیلکێنی پێدەچێت له word هێنرابێت. دەتەوێت پاکی بکەیوه پێش ئەوەی بیلکێنی؟","error":"هیچ ڕێگەیەك نەبوو لەلکاندنی دەقەکه بەهۆی هەڵەیەکی ناوەخۆیی","title":"لکاندنی لەلایەن Word","toolbar":"لکاندنی لەڕێی Word"},"preview":{"preview":"پێشبینین"},"print":{"toolbar":"چاپکردن"},"removeformat":{"toolbar":"لابردنی داڕشتەکە"},"save":{"toolbar":"پاشکەوتکردن"},"selectall":{"toolbar":"دیاریکردنی هەمووی"},"showblocks":{"toolbar":"نیشاندانی بەربەستەکان"},"sourcearea":{"toolbar":"سەرچاوە"},"specialchar":{"options":"هەڵبژاردەی نووسەی تایبەتی","title":"هەڵبژاردنی نووسەی تایبەتی","toolbar":"دانانی نووسەی تایبەتی"},"scayt":{"about":"دهربارهی SCAYT","aboutTab":"دهربارهی","addWord":"زیادکردنی ووشه","allCaps":"پشتگوێخستنی وشانهی پێکهاتووه لهپیتی گهوره","dic_create":"درووستکردن","dic_delete":"سڕینهوه","dic_field_name":"ناوی فهرههنگ","dic_info":"لهبنچینهدا فهرههنگی بهکارهێنهر کۆگاکردن کراوه له شهکرۆکه Cookie, ههرچۆنێك بێت شهکۆرکه سنووردار کراوه له قهباره کۆگاکردن.کاتێك فهرههنگی بهکارهێنهر گهیشته ئهم خاڵهی کهناتوانرێت زیاتر کۆگاکردن بکرێت له شهکرۆکه، ئهوسا فهرههنگهکه پێویسته کۆگابکرێت له ڕاژهکهی ئێمه. بۆ کۆگاکردنی زانیاری تایبهتی فهرههنگهکه له ڕاژهکهی ئێمه, پێویسته ناوێك ههڵبژێریت بۆ فهرههنگهکه. گهر تۆ فهرههنگێکی کۆگاکراوت ههیه, تکایه ناوی فهرههنگهکه بنووسه وه کلیکی دوگمهی گهڕاندنهوه بکه.","dic_rename":"گۆڕینی ناو","dic_restore":"گهڕاندنهوه","dictionariesTab":"فهرههنگهکان","disable":"ناچالاککردنی SCAYT","emptyDic":"ناوی فهرههنگ نابێت خاڵی بێت.","enable":"چالاککردنی SCAYT","ignore":"پشتگوێخستن","ignoreAll":"پشتگوێخستنی ههمووی","ignoreDomainNames":"پشتگوێخستنی دۆمهین","langs":"زمانهکان","languagesTab":"زمانهکان","mixedCase":"پشتگوێخستنی وشانهی پێکهاتووه لهپیتی گهورهو بچووك","mixedWithDigits":"پشتگوێخستنی وشانهی پێکهاتووه لهژماره","moreSuggestions":"پێشنیاری زیاتر","opera_title":"پشتیوانی نهکراوه لهلایهن Opera","options":"ههڵبژارده","optionsTab":"ههڵبژارده","title":"پشکنینی نووسه لهکاتی نووسین","toggle":"گۆڕینی SCAYT","noSuggestions":"No suggestion"},"stylescombo":{"label":"شێواز","panelTitle":"شێوازی ڕازاندنەوە","panelTitle1":"شێوازی خشت","panelTitle2":"شێوازی ناوهێڵ","panelTitle3":"شێوازی بەرکار"},"table":{"border":"گەورەیی پەراوێز","caption":"سەردێڕ","cell":{"menu":"خانه","insertBefore":"دانانی خانه لەپێش","insertAfter":"دانانی خانه لەپاش","deleteCell":"سڕینەوەی خانه","merge":"تێکەڵکردنی خانە","mergeRight":"تێکەڵکردنی لەگەڵ ڕاست","mergeDown":"تێکەڵکردنی لەگەڵ خوارەوە","splitHorizontal":"دابەشکردنی خانەی ئاسۆیی","splitVertical":"دابەشکردنی خانەی ئەستونی","title":"خاسیەتی خانه","cellType":"جۆری خانه","rowSpan":"ماوەی نێوان ڕیز","colSpan":"بستی ئەستونی","wordWrap":"پێچانەوەی وشە","hAlign":"ڕیزکردنی ئاسۆیی","vAlign":"ڕیزکردنی ئەستونی","alignBaseline":"هێڵەبنەڕەت","bgColor":"ڕەنگی پاشبنەما","borderColor":"ڕەنگی پەراوێز","data":"داتا","header":"سەرپەڕه","yes":"بەڵێ","no":"نەخێر","invalidWidth":"پانی خانه دەبێت بەتەواوی ژماره بێت.","invalidHeight":"درێژی خانه بەتەواوی دەبێت ژمارە بێت.","invalidRowSpan":"ماوەی نێوان ڕیز بەتەواوی دەبێت ژمارە بێت.","invalidColSpan":"ماوەی نێوان ئەستونی بەتەواوی دەبێت ژمارە بێت.","chooseColor":"هەڵبژێرە"},"cellPad":"بۆشایی ناوپۆش","cellSpace":"بۆشایی خانه","column":{"menu":"ئەستون","insertBefore":"دانانی ئەستون لەپێش","insertAfter":"دانانی ئەستوون لەپاش","deleteColumn":"سڕینەوەی ئەستوون"},"columns":"ستوونەکان","deleteTable":"سڕینەوەی خشتە","headers":"سەرپەڕه","headersBoth":"هەردووك","headersColumn":"یەکەم ئەستوون","headersNone":"هیچ","headersRow":"یەکەم ڕیز","invalidBorder":"ژمارەی پەراوێز دەبێت تەنها ژماره بێت.","invalidCellPadding":"ناوپۆشی خانه دەبێت ژمارەکی درووست بێت.","invalidCellSpacing":"بۆشایی خانه دەبێت ژمارەکی درووست بێت.","invalidCols":"ژمارەی ئەستوونی دەبێت گەورەتر بێت لەژمارەی 0.","invalidHeight":"درێژی خشته دهبێت تهنها ژماره بێت.","invalidRows":"ژمارەی ڕیز دەبێت گەورەتر بێت لەژمارەی 0.","invalidWidth":"پانی خشته دەبێت تەنها ژماره بێت.","menu":"خاسیەتی خشتە","row":{"menu":"ڕیز","insertBefore":"دانانی ڕیز لەپێش","insertAfter":"دانانی ڕیز لەپاش","deleteRow":"سڕینەوەی ڕیز"},"rows":"ڕیز","summary":"کورتە","title":"خاسیەتی خشتە","toolbar":"خشتە","widthPc":"لەسەدا","widthPx":"وێنەخاڵ - پیکسل","widthUnit":"پانی یەکە"},"undo":{"redo":"هەڵگەڕاندنەوە","undo":"پووچکردنەوە"},"wsc":{"btnIgnore":"پشتگوێ کردن","btnIgnoreAll":"پشتگوێکردنی ههمووی","btnReplace":"لهبریدانن","btnReplaceAll":"لهبریدانانی ههمووی","btnUndo":"پووچکردنهوه","changeTo":"گۆڕینی بۆ","errorLoading":"ههڵه لههێنانی داخوازینامهی خانهخۆێی ڕاژه: %s.","ieSpellDownload":"پشکنینی ڕێنووس دانهمزراوه. دهتهوێت ئێستا دایبگریت?","manyChanges":"پشکنینی ڕێنووس کۆتای هات: لهسهدا %1 ی وشهکان گۆڕدرا","noChanges":"پشکنینی ڕێنووس کۆتای هات: هیچ وشهیهك نۆگۆڕدرا","noMispell":"پشکنینی ڕێنووس کۆتای هات: هیچ ههڵهیهکی ڕێنووس نهدۆزراوه","noSuggestions":"- هیچ پێشنیارێك -","notAvailable":"ببووره، لهمکاتهدا ڕاژهکه لهبهردهستا نیه.","notInDic":"لهفهرههنگ دانیه","oneChange":"پشکنینی ڕێنووس کۆتای هات: یهك وشه گۆڕدرا","progress":"پشکنینی ڕێنووس لهبهردهوامبوون دایه...","title":"پشکنینی ڕێنووس","toolbar":"پشکنینی ڕێنووس"}};
PypiClean
/GB2260-v2-0.2.1.tar.gz/GB2260-v2-0.2.1/gb2260_v2/data/curated/revision_201508.py
from __future__ import unicode_literals name = '201508' division_schema = { '110000': '北京市', '110101': '东城区', '110102': '西城区', '110105': '朝阳区', '110106': '丰台区', '110107': '石景山区', '110108': '海淀区', '110109': '门头沟区', '110111': '房山区', '110112': '通州区', '110113': '顺义区', '110114': '昌平区', '110115': '大兴区', '110116': '怀柔区', '110117': '平谷区', '110228': '密云县', '110229': '延庆县', '120000': '天津市', '120101': '和平区', '120102': '河东区', '120103': '河西区', '120104': '南开区', '120105': '河北区', '120106': '红桥区', '120110': '东丽区', '120111': '西青区', '120112': '津南区', '120113': '北辰区', '120114': '武清区', '120115': '宝坻区', '120116': '滨海新区', '120221': '宁河县', '120223': '静海县', '120225': '蓟县', '130000': '河北省', '130100': '石家庄市', '130102': '长安区', '130104': '桥西区', '130105': '新华区', '130107': '井陉矿区', '130108': '裕华区', '130109': '藁城区', '130110': '鹿泉区', '130111': '栾城区', '130121': '井陉县', '130123': '正定县', '130125': '行唐县', '130126': '灵寿县', '130127': '高邑县', '130128': '深泽县', '130129': '赞皇县', '130130': '无极县', '130131': '平山县', '130132': '元氏县', '130133': '赵县', '130181': '辛集市', '130183': '晋州市', '130184': '新乐市', '130200': '唐山市', '130202': '路南区', '130203': '路北区', '130204': '古冶区', '130205': '开平区', '130207': '丰南区', '130208': '丰润区', '130209': '曹妃甸区', '130223': '滦县', '130224': '滦南县', '130225': '乐亭县', '130227': '迁西县', '130229': '玉田县', '130281': '遵化市', '130283': '迁安市', '130300': '秦皇岛市', '130302': '海港区', '130303': '山海关区', '130304': '北戴河区', '130321': '青龙满族自治县', '130322': '昌黎县', '130323': '抚宁县', '130324': '卢龙县', '130400': '邯郸市', '130402': '邯山区', '130403': '丛台区', '130404': '复兴区', '130406': '峰峰矿区', '130421': '邯郸县', '130423': '临漳县', '130424': '成安县', '130425': '大名县', '130426': '涉县', '130427': '磁县', '130428': '肥乡县', '130429': '永年县', '130430': '邱县', '130431': '鸡泽县', '130432': '广平县', '130433': '馆陶县', '130434': '魏县', '130435': '曲周县', '130481': '武安市', '130500': '邢台市', '130502': '桥东区', '130503': '桥西区', '130521': '邢台县', '130522': '临城县', '130523': '内丘县', '130524': '柏乡县', '130525': '隆尧县', '130526': '任县', '130527': '南和县', '130528': '宁晋县', '130529': '巨鹿县', '130530': '新河县', '130531': '广宗县', '130532': '平乡县', '130533': '威县', '130534': '清河县', '130535': '临西县', '130581': '南宫市', '130582': '沙河市', '130600': '保定市', '130602': '竞秀区', '130604': '南市区', '130606': '莲池区', '130607': '满城区', '130608': '清苑区', '130609': '徐水区', '130623': '涞水县', '130624': '阜平县', '130626': '定兴县', '130627': '唐县', '130628': '高阳县', '130629': '容城县', '130630': '涞源县', '130631': '望都县', '130632': '安新县', '130633': '易县', '130634': '曲阳县', '130635': '蠡县', '130636': '顺平县', '130637': '博野县', '130638': '雄县', '130681': '涿州市', '130682': '定州市', '130683': '安国市', '130684': '高碑店市', '130700': '张家口市', '130702': '桥东区', '130703': '桥西区', '130705': '宣化区', '130706': '下花园区', '130721': '宣化县', '130722': '张北县', '130723': '康保县', '130724': '沽源县', '130725': '尚义县', '130726': '蔚县', '130727': '阳原县', '130728': '怀安县', '130729': '万全县', '130730': '怀来县', '130731': '涿鹿县', '130732': '赤城县', '130733': '崇礼县', '130800': '承德市', '130802': '双桥区', '130803': '双滦区', '130804': '鹰手营子矿区', '130821': '承德县', '130822': '兴隆县', '130823': '平泉县', '130824': '滦平县', '130825': '隆化县', '130826': '丰宁满族自治县', '130827': '宽城满族自治县', '130828': '围场满族蒙古族自治县', '130900': '沧州市', '130902': '新华区', '130903': '运河区', '130921': '沧县', '130922': '青县', '130923': '东光县', '130924': '海兴县', '130925': '盐山县', '130926': '肃宁县', '130927': '南皮县', '130928': '吴桥县', '130929': '献县', '130930': '孟村回族自治县', '130981': '泊头市', '130982': '任丘市', '130983': '黄骅市', '130984': '河间市', '131000': '廊坊市', '131002': '安次区', '131003': '广阳区', '131022': '固安县', '131023': '永清县', '131024': '香河县', '131025': '大城县', '131026': '文安县', '131028': '大厂回族自治县', '131081': '霸州市', '131082': '三河市', '131100': '衡水市', '131102': '桃城区', '131121': '枣强县', '131122': '武邑县', '131123': '武强县', '131124': '饶阳县', '131125': '安平县', '131126': '故城县', '131127': '景县', '131128': '阜城县', '131181': '冀州市', '131182': '深州市', '140000': '山西省', '140100': '太原市', '140105': '小店区', '140106': '迎泽区', '140107': '杏花岭区', '140108': '尖草坪区', '140109': '万柏林区', '140110': '晋源区', '140121': '清徐县', '140122': '阳曲县', '140123': '娄烦县', '140181': '古交市', '140200': '大同市', '140202': '城区', '140203': '矿区', '140211': '南郊区', '140212': '新荣区', '140221': '阳高县', '140222': '天镇县', '140223': '广灵县', '140224': '灵丘县', '140225': '浑源县', '140226': '左云县', '140227': '大同县', '140300': '阳泉市', '140302': '城区', '140303': '矿区', '140311': '郊区', '140321': '平定县', '140322': '盂县', '140400': '长治市', '140402': '城区', '140411': '郊区', '140421': '长治县', '140423': '襄垣县', '140424': '屯留县', '140425': '平顺县', '140426': '黎城县', '140427': '壶关县', '140428': '长子县', '140429': '武乡县', '140430': '沁县', '140431': '沁源县', '140481': '潞城市', '140500': '晋城市', '140502': '城区', '140521': '沁水县', '140522': '阳城县', '140524': '陵川县', '140525': '泽州县', '140581': '高平市', '140600': '朔州市', '140602': '朔城区', '140603': '平鲁区', '140621': '山阴县', '140622': '应县', '140623': '右玉县', '140624': '怀仁县', '140700': '晋中市', '140702': '榆次区', '140721': '榆社县', '140722': '左权县', '140723': '和顺县', '140724': '昔阳县', '140725': '寿阳县', '140726': '太谷县', '140727': '祁县', '140728': '平遥县', '140729': '灵石县', '140781': '介休市', '140800': '运城市', '140802': '盐湖区', '140821': '临猗县', '140822': '万荣县', '140823': '闻喜县', '140824': '稷山县', '140825': '新绛县', '140826': '绛县', '140827': '垣曲县', '140828': '夏县', '140829': '平陆县', '140830': '芮城县', '140881': '永济市', '140882': '河津市', '140900': '忻州市', '140902': '忻府区', '140921': '定襄县', '140922': '五台县', '140923': '代县', '140924': '繁峙县', '140925': '宁武县', '140926': '静乐县', '140927': '神池县', '140928': '五寨县', '140929': '岢岚县', '140930': '河曲县', '140931': '保德县', '140932': '偏关县', '140981': '原平市', '141000': '临汾市', '141002': '尧都区', '141021': '曲沃县', '141022': '翼城县', '141023': '襄汾县', '141024': '洪洞县', '141025': '古县', '141026': '安泽县', '141027': '浮山县', '141028': '吉县', '141029': '乡宁县', '141030': '大宁县', '141031': '隰县', '141032': '永和县', '141033': '蒲县', '141034': '汾西县', '141081': '侯马市', '141082': '霍州市', '141100': '吕梁市', '141102': '离石区', '141121': '文水县', '141122': '交城县', '141123': '兴县', '141124': '临县', '141125': '柳林县', '141126': '石楼县', '141127': '岚县', '141128': '方山县', '141129': '中阳县', '141130': '交口县', '141181': '孝义市', '141182': '汾阳市', '150000': '内蒙古自治区', '150100': '呼和浩特市', '150102': '新城区', '150103': '回民区', '150104': '玉泉区', '150105': '赛罕区', '150121': '土默特左旗', '150122': '托克托县', '150123': '和林格尔县', '150124': '清水河县', '150125': '武川县', '150200': '包头市', '150202': '东河区', '150203': '昆都仑区', '150204': '青山区', '150205': '石拐区', '150206': '白云鄂博矿区', '150207': '九原区', '150221': '土默特右旗', '150222': '固阳县', '150223': '达尔罕茂明安联合旗', '150300': '乌海市', '150302': '海勃湾区', '150303': '海南区', '150304': '乌达区', '150400': '赤峰市', '150402': '红山区', '150403': '元宝山区', '150404': '松山区', '150421': '阿鲁科尔沁旗', '150422': '巴林左旗', '150423': '巴林右旗', '150424': '林西县', '150425': '克什克腾旗', '150426': '翁牛特旗', '150428': '喀喇沁旗', '150429': '宁城县', '150430': '敖汉旗', '150500': '通辽市', '150502': '科尔沁区', '150521': '科尔沁左翼中旗', '150522': '科尔沁左翼后旗', '150523': '开鲁县', '150524': '库伦旗', '150525': '奈曼旗', '150526': '扎鲁特旗', '150581': '霍林郭勒市', '150600': '鄂尔多斯市', '150602': '东胜区', '150621': '达拉特旗', '150622': '准格尔旗', '150623': '鄂托克前旗', '150624': '鄂托克旗', '150625': '杭锦旗', '150626': '乌审旗', '150627': '伊金霍洛旗', '150700': '呼伦贝尔市', '150702': '海拉尔区', '150703': '扎赉诺尔区', '150721': '阿荣旗', '150722': '莫力达瓦达斡尔族自治旗', '150723': '鄂伦春自治旗', '150724': '鄂温克族自治旗', '150725': '陈巴尔虎旗', '150726': '新巴尔虎左旗', '150727': '新巴尔虎右旗', '150781': '满洲里市', '150782': '牙克石市', '150783': '扎兰屯市', '150784': '额尔古纳市', '150785': '根河市', '150800': '巴彦淖尔市', '150802': '临河区', '150821': '五原县', '150822': '磴口县', '150823': '乌拉特前旗', '150824': '乌拉特中旗', '150825': '乌拉特后旗', '150826': '杭锦后旗', '150900': '乌兰察布市', '150902': '集宁区', '150921': '卓资县', '150922': '化德县', '150923': '商都县', '150924': '兴和县', '150925': '凉城县', '150926': '察哈尔右翼前旗', '150927': '察哈尔右翼中旗', '150928': '察哈尔右翼后旗', '150929': '四子王旗', '150981': '丰镇市', '152200': '兴安盟', '152201': '乌兰浩特市', '152202': '阿尔山市', '152221': '科尔沁右翼前旗', '152222': '科尔沁右翼中旗', '152223': '扎赉特旗', '152224': '突泉县', '152500': '锡林郭勒盟', '152501': '二连浩特市', '152502': '锡林浩特市', '152522': '阿巴嘎旗', '152523': '苏尼特左旗', '152524': '苏尼特右旗', '152525': '东乌珠穆沁旗', '152526': '西乌珠穆沁旗', '152527': '太仆寺旗', '152528': '镶黄旗', '152529': '正镶白旗', '152530': '正蓝旗', '152531': '多伦县', '152900': '阿拉善盟', '152921': '阿拉善左旗', '152922': '阿拉善右旗', '152923': '额济纳旗', '210000': '辽宁省', '210100': '沈阳市', '210102': '和平区', '210103': '沈河区', '210104': '大东区', '210105': '皇姑区', '210106': '铁西区', '210111': '苏家屯区', '210112': '东陵区', '210113': '沈北新区', '210114': '于洪区', '210122': '辽中县', '210123': '康平县', '210124': '法库县', '210181': '新民市', '210200': '大连市', '210202': '中山区', '210203': '西岗区', '210204': '沙河口区', '210211': '甘井子区', '210212': '旅顺口区', '210213': '金州区', '210224': '长海县', '210281': '瓦房店市', '210282': '普兰店市', '210283': '庄河市', '210300': '鞍山市', '210302': '铁东区', '210303': '铁西区', '210304': '立山区', '210311': '千山区', '210321': '台安县', '210323': '岫岩满族自治县', '210381': '海城市', '210400': '抚顺市', '210402': '新抚区', '210403': '东洲区', '210404': '望花区', '210411': '顺城区', '210421': '抚顺县', '210422': '新宾满族自治县', '210423': '清原满族自治县', '210500': '本溪市', '210502': '平山区', '210503': '溪湖区', '210504': '明山区', '210505': '南芬区', '210521': '本溪满族自治县', '210522': '桓仁满族自治县', '210600': '丹东市', '210602': '元宝区', '210603': '振兴区', '210604': '振安区', '210624': '宽甸满族自治县', '210681': '东港市', '210682': '凤城市', '210700': '锦州市', '210702': '古塔区', '210703': '凌河区', '210711': '太和区', '210726': '黑山县', '210727': '义县', '210781': '凌海市', '210782': '北镇市', '210800': '营口市', '210802': '站前区', '210803': '西市区', '210804': '鲅鱼圈区', '210811': '老边区', '210881': '盖州市', '210882': '大石桥市', '210900': '阜新市', '210902': '海州区', '210903': '新邱区', '210904': '太平区', '210905': '清河门区', '210911': '细河区', '210921': '阜新蒙古族自治县', '210922': '彰武县', '211000': '辽阳市', '211002': '白塔区', '211003': '文圣区', '211004': '宏伟区', '211005': '弓长岭区', '211011': '太子河区', '211021': '辽阳县', '211081': '灯塔市', '211100': '盘锦市', '211102': '双台子区', '211103': '兴隆台区', '211121': '大洼县', '211122': '盘山县', '211200': '铁岭市', '211202': '银州区', '211204': '清河区', '211221': '铁岭县', '211223': '西丰县', '211224': '昌图县', '211281': '调兵山市', '211282': '开原市', '211300': '朝阳市', '211302': '双塔区', '211303': '龙城区', '211321': '朝阳县', '211322': '建平县', '211324': '喀喇沁左翼蒙古族自治县', '211381': '北票市', '211382': '凌源市', '211400': '葫芦岛市', '211402': '连山区', '211403': '龙港区', '211404': '南票区', '211421': '绥中县', '211422': '建昌县', '211481': '兴城市', '220000': '吉林省', '220100': '长春市', '220102': '南关区', '220103': '宽城区', '220104': '朝阳区', '220105': '二道区', '220106': '绿园区', '220112': '双阳区', '220113': '九台区', '220122': '农安县', '220182': '榆树市', '220183': '德惠市', '220200': '吉林市', '220202': '昌邑区', '220203': '龙潭区', '220204': '船营区', '220211': '丰满区', '220221': '永吉县', '220281': '蛟河市', '220282': '桦甸市', '220283': '舒兰市', '220284': '磐石市', '220300': '四平市', '220302': '铁西区', '220303': '铁东区', '220322': '梨树县', '220323': '伊通满族自治县', '220381': '公主岭市', '220382': '双辽市', '220400': '辽源市', '220402': '龙山区', '220403': '西安区', '220421': '东丰县', '220422': '东辽县', '220500': '通化市', '220502': '东昌区', '220503': '二道江区', '220521': '通化县', '220523': '辉南县', '220524': '柳河县', '220581': '梅河口市', '220582': '集安市', '220600': '白山市', '220602': '浑江区', '220605': '江源区', '220621': '抚松县', '220622': '靖宇县', '220623': '长白朝鲜族自治县', '220681': '临江市', '220700': '松原市', '220702': '宁江区', '220721': '前郭尔罗斯蒙古族自治县', '220722': '长岭县', '220723': '乾安县', '220781': '扶余市', '220800': '白城市', '220802': '洮北区', '220821': '镇赉县', '220822': '通榆县', '220881': '洮南市', '220882': '大安市', '222400': '延边朝鲜族自治州', '222401': '延吉市', '222402': '图们市', '222403': '敦化市', '222404': '珲春市', '222405': '龙井市', '222406': '和龙市', '222424': '汪清县', '222426': '安图县', '230000': '黑龙江省', '230100': '哈尔滨市', '230102': '道里区', '230103': '南岗区', '230104': '道外区', '230108': '平房区', '230109': '松北区', '230110': '香坊区', '230111': '呼兰区', '230112': '阿城区', '230113': '双城区', '230123': '依兰县', '230124': '方正县', '230125': '宾县', '230126': '巴彦县', '230127': '木兰县', '230128': '通河县', '230129': '延寿县', '230183': '尚志市', '230184': '五常市', '230200': '齐齐哈尔市', '230202': '龙沙区', '230203': '建华区', '230204': '铁锋区', '230205': '昂昂溪区', '230206': '富拉尔基区', '230207': '碾子山区', '230208': '梅里斯达斡尔族区', '230221': '龙江县', '230223': '依安县', '230224': '泰来县', '230225': '甘南县', '230227': '富裕县', '230229': '克山县', '230230': '克东县', '230231': '拜泉县', '230281': '讷河市', '230300': '鸡西市', '230302': '鸡冠区', '230303': '恒山区', '230304': '滴道区', '230305': '梨树区', '230306': '城子河区', '230307': '麻山区', '230321': '鸡东县', '230381': '虎林市', '230382': '密山市', '230400': '鹤岗市', '230402': '向阳区', '230403': '工农区', '230404': '南山区', '230405': '兴安区', '230406': '东山区', '230407': '兴山区', '230421': '萝北县', '230422': '绥滨县', '230500': '双鸭山市', '230502': '尖山区', '230503': '岭东区', '230505': '四方台区', '230506': '宝山区', '230521': '集贤县', '230522': '友谊县', '230523': '宝清县', '230524': '饶河县', '230600': '大庆市', '230602': '萨尔图区', '230603': '龙凤区', '230604': '让胡路区', '230605': '红岗区', '230606': '大同区', '230621': '肇州县', '230622': '肇源县', '230623': '林甸县', '230624': '杜尔伯特蒙古族自治县', '230700': '伊春市', '230702': '伊春区', '230703': '南岔区', '230704': '友好区', '230705': '西林区', '230706': '翠峦区', '230707': '新青区', '230708': '美溪区', '230709': '金山屯区', '230710': '五营区', '230711': '乌马河区', '230712': '汤旺河区', '230713': '带岭区', '230714': '乌伊岭区', '230715': '红星区', '230716': '上甘岭区', '230722': '嘉荫县', '230781': '铁力市', '230800': '佳木斯市', '230803': '向阳区', '230804': '前进区', '230805': '东风区', '230811': '郊区', '230822': '桦南县', '230826': '桦川县', '230828': '汤原县', '230833': '抚远县', '230881': '同江市', '230882': '富锦市', '230900': '七台河市', '230902': '新兴区', '230903': '桃山区', '230904': '茄子河区', '230921': '勃利县', '231000': '牡丹江市', '231002': '东安区', '231003': '阳明区', '231004': '爱民区', '231005': '西安区', '231024': '东宁县', '231025': '林口县', '231081': '绥芬河市', '231083': '海林市', '231084': '宁安市', '231085': '穆棱市', '231100': '黑河市', '231102': '爱辉区', '231121': '嫩江县', '231123': '逊克县', '231124': '孙吴县', '231181': '北安市', '231182': '五大连池市', '231200': '绥化市', '231202': '北林区', '231221': '望奎县', '231222': '兰西县', '231223': '青冈县', '231224': '庆安县', '231225': '明水县', '231226': '绥棱县', '231281': '安达市', '231282': '肇东市', '231283': '海伦市', '232700': '大兴安岭地区', '232721': '呼玛县', '232722': '塔河县', '232723': '漠河县', '310000': '上海市', '310101': '黄浦区', '310104': '徐汇区', '310105': '长宁区', '310106': '静安区', '310107': '普陀区', '310108': '闸北区', '310109': '虹口区', '310110': '杨浦区', '310112': '闵行区', '310113': '宝山区', '310114': '嘉定区', '310115': '浦东新区', '310116': '金山区', '310117': '松江区', '310118': '青浦区', '310120': '奉贤区', '310230': '崇明县', '320000': '江苏省', '320100': '南京市', '320102': '玄武区', '320104': '秦淮区', '320105': '建邺区', '320106': '鼓楼区', '320111': '浦口区', '320113': '栖霞区', '320114': '雨花台区', '320115': '江宁区', '320116': '六合区', '320117': '溧水区', '320118': '高淳区', '320200': '无锡市', '320202': '崇安区', '320203': '南长区', '320204': '北塘区', '320205': '锡山区', '320206': '惠山区', '320211': '滨湖区', '320281': '江阴市', '320282': '宜兴市', '320300': '徐州市', '320302': '鼓楼区', '320303': '云龙区', '320305': '贾汪区', '320311': '泉山区', '320312': '铜山区', '320321': '丰县', '320322': '沛县', '320324': '睢宁县', '320381': '新沂市', '320382': '邳州市', '320400': '常州市', '320402': '天宁区', '320404': '钟楼区', '320411': '新北区', '320412': '武进区', '320413': '金坛区', '320481': '溧阳市', '320500': '苏州市', '320505': '虎丘区', '320506': '吴中区', '320507': '相城区', '320508': '姑苏区', '320509': '吴江区', '320581': '常熟市', '320582': '张家港市', '320583': '昆山市', '320585': '太仓市', '320600': '南通市', '320602': '崇川区', '320611': '港闸区', '320612': '通州区', '320621': '海安县', '320623': '如东县', '320681': '启东市', '320682': '如皋市', '320684': '海门市', '320700': '连云港市', '320703': '连云区', '320706': '海州区', '320707': '赣榆区', '320722': '东海县', '320723': '灌云县', '320724': '灌南县', '320800': '淮安市', '320802': '清河区', '320803': '淮安区', '320804': '淮阴区', '320811': '清浦区', '320826': '涟水县', '320829': '洪泽县', '320830': '盱眙县', '320831': '金湖县', '320900': '盐城市', '320902': '亭湖区', '320903': '盐都区', '320904': '大丰区', '320921': '响水县', '320922': '滨海县', '320923': '阜宁县', '320924': '射阳县', '320925': '建湖县', '320981': '东台市', '321000': '扬州市', '321002': '广陵区', '321003': '邗江区', '321012': '江都区', '321023': '宝应县', '321081': '仪征市', '321084': '高邮市', '321100': '镇江市', '321102': '京口区', '321111': '润州区', '321112': '丹徒区', '321181': '丹阳市', '321182': '扬中市', '321183': '句容市', '321200': '泰州市', '321202': '海陵区', '321203': '高港区', '321204': '姜堰区', '321281': '兴化市', '321282': '靖江市', '321283': '泰兴市', '321300': '宿迁市', '321302': '宿城区', '321311': '宿豫区', '321322': '沭阳县', '321323': '泗阳县', '321324': '泗洪县', '330000': '浙江省', '330100': '杭州市', '330102': '上城区', '330103': '下城区', '330104': '江干区', '330105': '拱墅区', '330106': '西湖区', '330108': '滨江区', '330109': '萧山区', '330110': '余杭区', '330111': '富阳区', '330122': '桐庐县', '330127': '淳安县', '330182': '建德市', '330185': '临安市', '330200': '宁波市', '330203': '海曙区', '330204': '江东区', '330205': '江北区', '330206': '北仑区', '330211': '镇海区', '330212': '鄞州区', '330225': '象山县', '330226': '宁海县', '330281': '余姚市', '330282': '慈溪市', '330283': '奉化市', '330300': '温州市', '330302': '鹿城区', '330303': '龙湾区', '330304': '瓯海区', '330305': '洞头区', '330324': '永嘉县', '330326': '平阳县', '330327': '苍南县', '330328': '文成县', '330329': '泰顺县', '330381': '瑞安市', '330382': '乐清市', '330400': '嘉兴市', '330402': '南湖区', '330411': '秀洲区', '330421': '嘉善县', '330424': '海盐县', '330481': '海宁市', '330482': '平湖市', '330483': '桐乡市', '330500': '湖州市', '330502': '吴兴区', '330503': '南浔区', '330521': '德清县', '330522': '长兴县', '330523': '安吉县', '330600': '绍兴市', '330602': '越城区', '330603': '柯桥区', '330604': '上虞区', '330624': '新昌县', '330681': '诸暨市', '330683': '嵊州市', '330700': '金华市', '330702': '婺城区', '330703': '金东区', '330723': '武义县', '330726': '浦江县', '330727': '磐安县', '330781': '兰溪市', '330782': '义乌市', '330783': '东阳市', '330784': '永康市', '330800': '衢州市', '330802': '柯城区', '330803': '衢江区', '330822': '常山县', '330824': '开化县', '330825': '龙游县', '330881': '江山市', '330900': '舟山市', '330902': '定海区', '330903': '普陀区', '330921': '岱山县', '330922': '嵊泗县', '331000': '台州市', '331002': '椒江区', '331003': '黄岩区', '331004': '路桥区', '331021': '玉环县', '331022': '三门县', '331023': '天台县', '331024': '仙居县', '331081': '温岭市', '331082': '临海市', '331100': '丽水市', '331102': '莲都区', '331121': '青田县', '331122': '缙云县', '331123': '遂昌县', '331124': '松阳县', '331125': '云和县', '331126': '庆元县', '331127': '景宁畲族自治县', '331181': '龙泉市', '340000': '安徽省', '340100': '合肥市', '340102': '瑶海区', '340103': '庐阳区', '340104': '蜀山区', '340111': '包河区', '340121': '长丰县', '340122': '肥东县', '340123': '肥西县', '340124': '庐江县', '340181': '巢湖市', '340200': '芜湖市', '340202': '镜湖区', '340203': '弋江区', '340207': '鸠江区', '340208': '三山区', '340221': '芜湖县', '340222': '繁昌县', '340223': '南陵县', '340225': '无为县', '340300': '蚌埠市', '340302': '龙子湖区', '340303': '蚌山区', '340304': '禹会区', '340311': '淮上区', '340321': '怀远县', '340322': '五河县', '340323': '固镇县', '340400': '淮南市', '340402': '大通区', '340403': '田家庵区', '340404': '谢家集区', '340405': '八公山区', '340406': '潘集区', '340421': '凤台县', '340500': '马鞍山市', '340503': '花山区', '340504': '雨山区', '340506': '博望区', '340521': '当涂县', '340522': '含山县', '340523': '和县', '340600': '淮北市', '340602': '杜集区', '340603': '相山区', '340604': '烈山区', '340621': '濉溪县', '340700': '铜陵市', '340702': '铜官山区', '340703': '狮子山区', '340711': '郊区', '340721': '铜陵县', '340800': '安庆市', '340802': '迎江区', '340803': '大观区', '340811': '宜秀区', '340822': '怀宁县', '340823': '枞阳县', '340824': '潜山县', '340825': '太湖县', '340826': '宿松县', '340827': '望江县', '340828': '岳西县', '340881': '桐城市', '341000': '黄山市', '341002': '屯溪区', '341003': '黄山区', '341004': '徽州区', '341021': '歙县', '341022': '休宁县', '341023': '黟县', '341024': '祁门县', '341100': '滁州市', '341102': '琅琊区', '341103': '南谯区', '341122': '来安县', '341124': '全椒县', '341125': '定远县', '341126': '凤阳县', '341181': '天长市', '341182': '明光市', '341200': '阜阳市', '341202': '颍州区', '341203': '颍东区', '341204': '颍泉区', '341221': '临泉县', '341222': '太和县', '341225': '阜南县', '341226': '颍上县', '341282': '界首市', '341300': '宿州市', '341302': '埇桥区', '341321': '砀山县', '341322': '萧县', '341323': '灵璧县', '341324': '泗县', '341500': '六安市', '341502': '金安区', '341503': '裕安区', '341521': '寿县', '341522': '霍邱县', '341523': '舒城县', '341524': '金寨县', '341525': '霍山县', '341600': '亳州市', '341602': '谯城区', '341621': '涡阳县', '341622': '蒙城县', '341623': '利辛县', '341700': '池州市', '341702': '贵池区', '341721': '东至县', '341722': '石台县', '341723': '青阳县', '341800': '宣城市', '341802': '宣州区', '341821': '郎溪县', '341822': '广德县', '341823': '泾县', '341824': '绩溪县', '341825': '旌德县', '341881': '宁国市', '350000': '福建省', '350100': '福州市', '350102': '鼓楼区', '350103': '台江区', '350104': '仓山区', '350105': '马尾区', '350111': '晋安区', '350121': '闽侯县', '350122': '连江县', '350123': '罗源县', '350124': '闽清县', '350125': '永泰县', '350128': '平潭县', '350181': '福清市', '350182': '长乐市', '350200': '厦门市', '350203': '思明区', '350205': '海沧区', '350206': '湖里区', '350211': '集美区', '350212': '同安区', '350213': '翔安区', '350300': '莆田市', '350302': '城厢区', '350303': '涵江区', '350304': '荔城区', '350305': '秀屿区', '350322': '仙游县', '350400': '三明市', '350402': '梅列区', '350403': '三元区', '350421': '明溪县', '350423': '清流县', '350424': '宁化县', '350425': '大田县', '350426': '尤溪县', '350427': '沙县', '350428': '将乐县', '350429': '泰宁县', '350430': '建宁县', '350481': '永安市', '350500': '泉州市', '350502': '鲤城区', '350503': '丰泽区', '350504': '洛江区', '350505': '泉港区', '350521': '惠安县', '350524': '安溪县', '350525': '永春县', '350526': '德化县', '350527': '金门县', '350581': '石狮市', '350582': '晋江市', '350583': '南安市', '350600': '漳州市', '350602': '芗城区', '350603': '龙文区', '350622': '云霄县', '350623': '漳浦县', '350624': '诏安县', '350625': '长泰县', '350626': '东山县', '350627': '南靖县', '350628': '平和县', '350629': '华安县', '350681': '龙海市', '350700': '南平市', '350702': '延平区', '350703': '建阳区', '350721': '顺昌县', '350722': '浦城县', '350723': '光泽县', '350724': '松溪县', '350725': '政和县', '350781': '邵武市', '350782': '武夷山市', '350783': '建瓯市', '350800': '龙岩市', '350802': '新罗区', '350803': '永定区', '350821': '长汀县', '350823': '上杭县', '350824': '武平县', '350825': '连城县', '350881': '漳平市', '350900': '宁德市', '350902': '蕉城区', '350921': '霞浦县', '350922': '古田县', '350923': '屏南县', '350924': '寿宁县', '350925': '周宁县', '350926': '柘荣县', '350981': '福安市', '350982': '福鼎市', '360000': '江西省', '360100': '南昌市', '360102': '东湖区', '360103': '西湖区', '360104': '青云谱区', '360105': '湾里区', '360111': '青山湖区', '360121': '南昌县', '360122': '新建县', '360123': '安义县', '360124': '进贤县', '360200': '景德镇市', '360202': '昌江区', '360203': '珠山区', '360222': '浮梁县', '360281': '乐平市', '360300': '萍乡市', '360302': '安源区', '360313': '湘东区', '360321': '莲花县', '360322': '上栗县', '360323': '芦溪县', '360400': '九江市', '360402': '庐山区', '360403': '浔阳区', '360421': '九江县', '360423': '武宁县', '360424': '修水县', '360425': '永修县', '360426': '德安县', '360427': '星子县', '360428': '都昌县', '360429': '湖口县', '360430': '彭泽县', '360481': '瑞昌市', '360482': '共青城市', '360500': '新余市', '360502': '渝水区', '360521': '分宜县', '360600': '鹰潭市', '360602': '月湖区', '360622': '余江县', '360681': '贵溪市', '360700': '赣州市', '360702': '章贡区', '360703': '南康区', '360721': '赣县', '360722': '信丰县', '360723': '大余县', '360724': '上犹县', '360725': '崇义县', '360726': '安远县', '360727': '龙南县', '360728': '定南县', '360729': '全南县', '360730': '宁都县', '360731': '于都县', '360732': '兴国县', '360733': '会昌县', '360734': '寻乌县', '360735': '石城县', '360781': '瑞金市', '360800': '吉安市', '360802': '吉州区', '360803': '青原区', '360821': '吉安县', '360822': '吉水县', '360823': '峡江县', '360824': '新干县', '360825': '永丰县', '360826': '泰和县', '360827': '遂川县', '360828': '万安县', '360829': '安福县', '360830': '永新县', '360881': '井冈山市', '360900': '宜春市', '360902': '袁州区', '360921': '奉新县', '360922': '万载县', '360923': '上高县', '360924': '宜丰县', '360925': '靖安县', '360926': '铜鼓县', '360981': '丰城市', '360982': '樟树市', '360983': '高安市', '361000': '抚州市', '361002': '临川区', '361021': '南城县', '361022': '黎川县', '361023': '南丰县', '361024': '崇仁县', '361025': '乐安县', '361026': '宜黄县', '361027': '金溪县', '361028': '资溪县', '361029': '东乡县', '361030': '广昌县', '361100': '上饶市', '361102': '信州区', '361121': '上饶县', '361103': '广丰区', '361123': '玉山县', '361124': '铅山县', '361125': '横峰县', '361126': '弋阳县', '361127': '余干县', '361128': '鄱阳县', '361129': '万年县', '361130': '婺源县', '361181': '德兴市', '370000': '山东省', '370100': '济南市', '370102': '历下区', '370103': '市中区', '370104': '槐荫区', '370105': '天桥区', '370112': '历城区', '370113': '长清区', '370124': '平阴县', '370125': '济阳县', '370126': '商河县', '370181': '章丘市', '370200': '青岛市', '370202': '市南区', '370203': '市北区', '370211': '黄岛区', '370212': '崂山区', '370213': '李沧区', '370214': '城阳区', '370281': '胶州市', '370282': '即墨市', '370283': '平度市', '370285': '莱西市', '370300': '淄博市', '370302': '淄川区', '370303': '张店区', '370304': '博山区', '370305': '临淄区', '370306': '周村区', '370321': '桓台县', '370322': '高青县', '370323': '沂源县', '370400': '枣庄市', '370402': '市中区', '370403': '薛城区', '370404': '峄城区', '370405': '台儿庄区', '370406': '山亭区', '370481': '滕州市', '370500': '东营市', '370502': '东营区', '370503': '河口区', '370521': '垦利县', '370522': '利津县', '370523': '广饶县', '370600': '烟台市', '370602': '芝罘区', '370611': '福山区', '370612': '牟平区', '370613': '莱山区', '370634': '长岛县', '370681': '龙口市', '370682': '莱阳市', '370683': '莱州市', '370684': '蓬莱市', '370685': '招远市', '370686': '栖霞市', '370687': '海阳市', '370700': '潍坊市', '370702': '潍城区', '370703': '寒亭区', '370704': '坊子区', '370705': '奎文区', '370724': '临朐县', '370725': '昌乐县', '370781': '青州市', '370782': '诸城市', '370783': '寿光市', '370784': '安丘市', '370785': '高密市', '370786': '昌邑市', '370800': '济宁市', '370811': '任城区', '370812': '兖州区', '370826': '微山县', '370827': '鱼台县', '370828': '金乡县', '370829': '嘉祥县', '370830': '汶上县', '370831': '泗水县', '370832': '梁山县', '370881': '曲阜市', '370883': '邹城市', '370900': '泰安市', '370902': '泰山区', '370911': '岱岳区', '370921': '宁阳县', '370923': '东平县', '370982': '新泰市', '370983': '肥城市', '371000': '威海市', '371002': '环翠区', '371003': '文登区', '371082': '荣成市', '371083': '乳山市', '371100': '日照市', '371102': '东港区', '371103': '岚山区', '371121': '五莲县', '371122': '莒县', '371200': '莱芜市', '371202': '莱城区', '371203': '钢城区', '371300': '临沂市', '371302': '兰山区', '371311': '罗庄区', '371312': '河东区', '371321': '沂南县', '371322': '郯城县', '371323': '沂水县', '371324': '兰陵县', '371325': '费县', '371326': '平邑县', '371327': '莒南县', '371328': '蒙阴县', '371329': '临沭县', '371400': '德州市', '371402': '德城区', '371403': '陵城区', '371422': '宁津县', '371423': '庆云县', '371424': '临邑县', '371425': '齐河县', '371426': '平原县', '371427': '夏津县', '371428': '武城县', '371481': '乐陵市', '371482': '禹城市', '371500': '聊城市', '371502': '东昌府区', '371521': '阳谷县', '371522': '莘县', '371523': '茌平县', '371524': '东阿县', '371525': '冠县', '371526': '高唐县', '371581': '临清市', '371600': '滨州市', '371602': '滨城区', '371603': '沾化区', '371621': '惠民县', '371622': '阳信县', '371623': '无棣县', '371625': '博兴县', '371626': '邹平县', '371700': '菏泽市', '371702': '牡丹区', '371721': '曹县', '371722': '单县', '371723': '成武县', '371724': '巨野县', '371725': '郓城县', '371726': '鄄城县', '371727': '定陶县', '371728': '东明县', '410000': '河南省', '410100': '郑州市', '410102': '中原区', '410103': '二七区', '410104': '管城回族区', '410105': '金水区', '410106': '上街区', '410108': '惠济区', '410122': '中牟县', '410181': '巩义市', '410182': '荥阳市', '410183': '新密市', '410184': '新郑市', '410185': '登封市', '410200': '开封市', '410202': '龙亭区', '410203': '顺河回族区', '410204': '鼓楼区', '410205': '禹王台区', '410212': '祥符区', '410221': '杞县', '410222': '通许县', '410223': '尉氏县', '410225': '兰考县', '410300': '洛阳市', '410302': '老城区', '410303': '西工区', '410304': '瀍河回族区', '410305': '涧西区', '410306': '吉利区', '410311': '洛龙区', '410322': '孟津县', '410323': '新安县', '410324': '栾川县', '410325': '嵩县', '410326': '汝阳县', '410327': '宜阳县', '410328': '洛宁县', '410329': '伊川县', '410381': '偃师市', '410400': '平顶山市', '410402': '新华区', '410403': '卫东区', '410404': '石龙区', '410411': '湛河区', '410421': '宝丰县', '410422': '叶县', '410423': '鲁山县', '410425': '郏县', '410481': '舞钢市', '410482': '汝州市', '410500': '安阳市', '410502': '文峰区', '410503': '北关区', '410505': '殷都区', '410506': '龙安区', '410522': '安阳县', '410523': '汤阴县', '410526': '滑县', '410527': '内黄县', '410581': '林州市', '410600': '鹤壁市', '410602': '鹤山区', '410603': '山城区', '410611': '淇滨区', '410621': '浚县', '410622': '淇县', '410700': '新乡市', '410702': '红旗区', '410703': '卫滨区', '410704': '凤泉区', '410711': '牧野区', '410721': '新乡县', '410724': '获嘉县', '410725': '原阳县', '410726': '延津县', '410727': '封丘县', '410728': '长垣县', '410781': '卫辉市', '410782': '辉县市', '410800': '焦作市', '410802': '解放区', '410803': '中站区', '410804': '马村区', '410811': '山阳区', '410821': '修武县', '410822': '博爱县', '410823': '武陟县', '410825': '温县', '410882': '沁阳市', '410883': '孟州市', '410900': '濮阳市', '410902': '华龙区', '410922': '清丰县', '410923': '南乐县', '410926': '范县', '410927': '台前县', '410928': '濮阳县', '411000': '许昌市', '411002': '魏都区', '411023': '许昌县', '411024': '鄢陵县', '411025': '襄城县', '411081': '禹州市', '411082': '长葛市', '411100': '漯河市', '411102': '源汇区', '411103': '郾城区', '411104': '召陵区', '411121': '舞阳县', '411122': '临颍县', '411200': '三门峡市', '411202': '湖滨区', '411203': '陕州区', '411221': '渑池县', '411224': '卢氏县', '411281': '义马市', '411282': '灵宝市', '411300': '南阳市', '411302': '宛城区', '411303': '卧龙区', '411321': '南召县', '411322': '方城县', '411323': '西峡县', '411324': '镇平县', '411325': '内乡县', '411326': '淅川县', '411327': '社旗县', '411328': '唐河县', '411329': '新野县', '411330': '桐柏县', '411381': '邓州市', '411400': '商丘市', '411402': '梁园区', '411403': '睢阳区', '411421': '民权县', '411422': '睢县', '411423': '宁陵县', '411424': '柘城县', '411425': '虞城县', '411426': '夏邑县', '411481': '永城市', '411500': '信阳市', '411502': '浉河区', '411503': '平桥区', '411521': '罗山县', '411522': '光山县', '411523': '新县', '411524': '商城县', '411525': '固始县', '411526': '潢川县', '411527': '淮滨县', '411528': '息县', '411600': '周口市', '411602': '川汇区', '411621': '扶沟县', '411622': '西华县', '411623': '商水县', '411624': '沈丘县', '411625': '郸城县', '411626': '淮阳县', '411627': '太康县', '411628': '鹿邑县', '411681': '项城市', '411700': '驻马店市', '411702': '驿城区', '411721': '西平县', '411722': '上蔡县', '411723': '平舆县', '411724': '正阳县', '411725': '确山县', '411726': '泌阳县', '411727': '汝南县', '411728': '遂平县', '411729': '新蔡县', '419001': '济源市', '420000': '湖北省', '420100': '武汉市', '420102': '江岸区', '420103': '江汉区', '420104': '硚口区', '420105': '汉阳区', '420106': '武昌区', '420107': '青山区', '420111': '洪山区', '420112': '东西湖区', '420113': '汉南区', '420114': '蔡甸区', '420115': '江夏区', '420116': '黄陂区', '420117': '新洲区', '420200': '黄石市', '420202': '黄石港区', '420203': '西塞山区', '420204': '下陆区', '420205': '铁山区', '420222': '阳新县', '420281': '大冶市', '420300': '十堰市', '420302': '茅箭区', '420303': '张湾区', '420304': '郧阳区', '420322': '郧西县', '420323': '竹山县', '420324': '竹溪县', '420325': '房县', '420381': '丹江口市', '420500': '宜昌市', '420502': '西陵区', '420503': '伍家岗区', '420504': '点军区', '420505': '猇亭区', '420506': '夷陵区', '420525': '远安县', '420526': '兴山县', '420527': '秭归县', '420528': '长阳土家族自治县', '420529': '五峰土家族自治县', '420581': '宜都市', '420582': '当阳市', '420583': '枝江市', '420600': '襄阳市', '420602': '襄城区', '420606': '樊城区', '420607': '襄州区', '420624': '南漳县', '420625': '谷城县', '420626': '保康县', '420682': '老河口市', '420683': '枣阳市', '420684': '宜城市', '420700': '鄂州市', '420702': '梁子湖区', '420703': '华容区', '420704': '鄂城区', '420800': '荆门市', '420802': '东宝区', '420804': '掇刀区', '420821': '京山县', '420822': '沙洋县', '420881': '钟祥市', '420900': '孝感市', '420902': '孝南区', '420921': '孝昌县', '420922': '大悟县', '420923': '云梦县', '420981': '应城市', '420982': '安陆市', '420984': '汉川市', '421000': '荆州市', '421002': '沙市区', '421003': '荆州区', '421022': '公安县', '421023': '监利县', '421024': '江陵县', '421081': '石首市', '421083': '洪湖市', '421087': '松滋市', '421100': '黄冈市', '421102': '黄州区', '421121': '团风县', '421122': '红安县', '421123': '罗田县', '421124': '英山县', '421125': '浠水县', '421126': '蕲春县', '421127': '黄梅县', '421181': '麻城市', '421182': '武穴市', '421200': '咸宁市', '421202': '咸安区', '421221': '嘉鱼县', '421222': '通城县', '421223': '崇阳县', '421224': '通山县', '421281': '赤壁市', '421300': '随州市', '421303': '曾都区', '421321': '随县', '421381': '广水市', '422800': '恩施土家族苗族自治州', '422801': '恩施市', '422802': '利川市', '422822': '建始县', '422823': '巴东县', '422825': '宣恩县', '422826': '咸丰县', '422827': '来凤县', '422828': '鹤峰县', '429004': '仙桃市', '429005': '潜江市', '429006': '天门市', '429021': '神农架林区', '430000': '湖南省', '430100': '长沙市', '430102': '芙蓉区', '430103': '天心区', '430104': '岳麓区', '430105': '开福区', '430111': '雨花区', '430112': '望城区', '430121': '长沙县', '430124': '宁乡县', '430181': '浏阳市', '430200': '株洲市', '430202': '荷塘区', '430203': '芦淞区', '430204': '石峰区', '430211': '天元区', '430221': '株洲县', '430223': '攸县', '430224': '茶陵县', '430225': '炎陵县', '430281': '醴陵市', '430300': '湘潭市', '430302': '雨湖区', '430304': '岳塘区', '430321': '湘潭县', '430381': '湘乡市', '430382': '韶山市', '430400': '衡阳市', '430405': '珠晖区', '430406': '雁峰区', '430407': '石鼓区', '430408': '蒸湘区', '430412': '南岳区', '430421': '衡阳县', '430422': '衡南县', '430423': '衡山县', '430424': '衡东县', '430426': '祁东县', '430481': '耒阳市', '430482': '常宁市', '430500': '邵阳市', '430502': '双清区', '430503': '大祥区', '430511': '北塔区', '430521': '邵东县', '430522': '新邵县', '430523': '邵阳县', '430524': '隆回县', '430525': '洞口县', '430527': '绥宁县', '430528': '新宁县', '430529': '城步苗族自治县', '430581': '武冈市', '430600': '岳阳市', '430602': '岳阳楼区', '430603': '云溪区', '430611': '君山区', '430621': '岳阳县', '430623': '华容县', '430624': '湘阴县', '430626': '平江县', '430681': '汨罗市', '430682': '临湘市', '430700': '常德市', '430702': '武陵区', '430703': '鼎城区', '430721': '安乡县', '430722': '汉寿县', '430723': '澧县', '430724': '临澧县', '430725': '桃源县', '430726': '石门县', '430781': '津市市', '430800': '张家界市', '430802': '永定区', '430811': '武陵源区', '430821': '慈利县', '430822': '桑植县', '430900': '益阳市', '430902': '资阳区', '430903': '赫山区', '430921': '南县', '430922': '桃江县', '430923': '安化县', '430981': '沅江市', '431000': '郴州市', '431002': '北湖区', '431003': '苏仙区', '431021': '桂阳县', '431022': '宜章县', '431023': '永兴县', '431024': '嘉禾县', '431025': '临武县', '431026': '汝城县', '431027': '桂东县', '431028': '安仁县', '431081': '资兴市', '431100': '永州市', '431102': '零陵区', '431103': '冷水滩区', '431121': '祁阳县', '431122': '东安县', '431123': '双牌县', '431124': '道县', '431125': '江永县', '431126': '宁远县', '431127': '蓝山县', '431128': '新田县', '431129': '江华瑶族自治县', '431200': '怀化市', '431202': '鹤城区', '431221': '中方县', '431222': '沅陵县', '431223': '辰溪县', '431224': '溆浦县', '431225': '会同县', '431226': '麻阳苗族自治县', '431227': '新晃侗族自治县', '431228': '芷江侗族自治县', '431229': '靖州苗族侗族自治县', '431230': '通道侗族自治县', '431281': '洪江市', '431300': '娄底市', '431302': '娄星区', '431321': '双峰县', '431322': '新化县', '431381': '冷水江市', '431382': '涟源市', '433100': '湘西土家族苗族自治州', '433101': '吉首市', '433122': '泸溪县', '433123': '凤凰县', '433124': '花垣县', '433125': '保靖县', '433126': '古丈县', '433127': '永顺县', '433130': '龙山县', '440000': '广东省', '440100': '广州市', '440103': '荔湾区', '440104': '越秀区', '440105': '海珠区', '440106': '天河区', '440111': '白云区', '440112': '黄埔区', '440113': '番禺区', '440114': '花都区', '440115': '南沙区', '440117': '从化区', '440118': '增城区', '440200': '韶关市', '440203': '武江区', '440204': '浈江区', '440205': '曲江区', '440222': '始兴县', '440224': '仁化县', '440229': '翁源县', '440232': '乳源瑶族自治县', '440233': '新丰县', '440281': '乐昌市', '440282': '南雄市', '440300': '深圳市', '440303': '罗湖区', '440304': '福田区', '440305': '南山区', '440306': '宝安区', '440307': '龙岗区', '440308': '盐田区', '440400': '珠海市', '440402': '香洲区', '440403': '斗门区', '440404': '金湾区', '440500': '汕头市', '440507': '龙湖区', '440511': '金平区', '440512': '濠江区', '440513': '潮阳区', '440514': '潮南区', '440515': '澄海区', '440523': '南澳县', '440600': '佛山市', '440604': '禅城区', '440605': '南海区', '440606': '顺德区', '440607': '三水区', '440608': '高明区', '440700': '江门市', '440703': '蓬江区', '440704': '江海区', '440705': '新会区', '440781': '台山市', '440783': '开平市', '440784': '鹤山市', '440785': '恩平市', '440800': '湛江市', '440802': '赤坎区', '440803': '霞山区', '440804': '坡头区', '440811': '麻章区', '440823': '遂溪县', '440825': '徐闻县', '440881': '廉江市', '440882': '雷州市', '440883': '吴川市', '440900': '茂名市', '440902': '茂南区', '440904': '电白区', '440981': '高州市', '440982': '化州市', '440983': '信宜市', '441200': '肇庆市', '441202': '端州区', '441203': '鼎湖区', '441204': '高要区', '441223': '广宁县', '441224': '怀集县', '441225': '封开县', '441226': '德庆县', '441284': '四会市', '441300': '惠州市', '441302': '惠城区', '441303': '惠阳区', '441322': '博罗县', '441323': '惠东县', '441324': '龙门县', '441400': '梅州市', '441402': '梅江区', '441403': '梅县区', '441422': '大埔县', '441423': '丰顺县', '441424': '五华县', '441426': '平远县', '441427': '蕉岭县', '441481': '兴宁市', '441500': '汕尾市', '441502': '城区', '441521': '海丰县', '441523': '陆河县', '441581': '陆丰市', '441600': '河源市', '441602': '源城区', '441621': '紫金县', '441622': '龙川县', '441623': '连平县', '441624': '和平县', '441625': '东源县', '441700': '阳江市', '441702': '江城区', '441704': '阳东区', '441721': '阳西县', '441781': '阳春市', '441800': '清远市', '441802': '清城区', '441803': '清新区', '441821': '佛冈县', '441823': '阳山县', '441825': '连山壮族瑶族自治县', '441826': '连南瑶族自治县', '441881': '英德市', '441882': '连州市', '441900': '东莞市', '442000': '中山市', '445100': '潮州市', '445102': '湘桥区', '445103': '潮安区', '445122': '饶平县', '445200': '揭阳市', '445202': '榕城区', '445203': '揭东区', '445222': '揭西县', '445224': '惠来县', '445281': '普宁市', '445300': '云浮市', '445302': '云城区', '445303': '云安区', '445321': '新兴县', '445322': '郁南县', '445381': '罗定市', '450000': '广西壮族自治区', '450100': '南宁市', '450102': '兴宁区', '450103': '青秀区', '450105': '江南区', '450107': '西乡塘区', '450108': '良庆区', '450109': '邕宁区', '450110': '武鸣区', '450123': '隆安县', '450124': '马山县', '450125': '上林县', '450126': '宾阳县', '450127': '横县', '450200': '柳州市', '450202': '城中区', '450203': '鱼峰区', '450204': '柳南区', '450205': '柳北区', '450221': '柳江县', '450222': '柳城县', '450223': '鹿寨县', '450224': '融安县', '450225': '融水苗族自治县', '450226': '三江侗族自治县', '450300': '桂林市', '450302': '秀峰区', '450303': '叠彩区', '450304': '象山区', '450305': '七星区', '450311': '雁山区', '450312': '临桂区', '450321': '阳朔县', '450323': '灵川县', '450324': '全州县', '450325': '兴安县', '450326': '永福县', '450327': '灌阳县', '450328': '龙胜各族自治县', '450329': '资源县', '450330': '平乐县', '450331': '荔浦县', '450332': '恭城瑶族自治县', '450400': '梧州市', '450403': '万秀区', '450405': '长洲区', '450406': '龙圩区', '450421': '苍梧县', '450422': '藤县', '450423': '蒙山县', '450481': '岑溪市', '450500': '北海市', '450502': '海城区', '450503': '银海区', '450512': '铁山港区', '450521': '合浦县', '450600': '防城港市', '450602': '港口区', '450603': '防城区', '450621': '上思县', '450681': '东兴市', '450700': '钦州市', '450702': '钦南区', '450703': '钦北区', '450721': '灵山县', '450722': '浦北县', '450800': '贵港市', '450802': '港北区', '450803': '港南区', '450804': '覃塘区', '450821': '平南县', '450881': '桂平市', '450900': '玉林市', '450902': '玉州区', '450903': '福绵区', '450921': '容县', '450922': '陆川县', '450923': '博白县', '450924': '兴业县', '450981': '北流市', '451000': '百色市', '451002': '右江区', '451021': '田阳县', '451022': '田东县', '451023': '平果县', '451024': '德保县', '451025': '靖西县', '451026': '那坡县', '451027': '凌云县', '451028': '乐业县', '451029': '田林县', '451030': '西林县', '451031': '隆林各族自治县', '451100': '贺州市', '451102': '八步区', '451121': '昭平县', '451122': '钟山县', '451123': '富川瑶族自治县', '451200': '河池市', '451202': '金城江区', '451221': '南丹县', '451222': '天峨县', '451223': '凤山县', '451224': '东兰县', '451225': '罗城仫佬族自治县', '451226': '环江毛南族自治县', '451227': '巴马瑶族自治县', '451228': '都安瑶族自治县', '451229': '大化瑶族自治县', '451281': '宜州市', '451300': '来宾市', '451302': '兴宾区', '451321': '忻城县', '451322': '象州县', '451323': '武宣县', '451324': '金秀瑶族自治县', '451381': '合山市', '451400': '崇左市', '451402': '江州区', '451421': '扶绥县', '451422': '宁明县', '451423': '龙州县', '451424': '大新县', '451425': '天等县', '451481': '凭祥市', '460000': '海南省', '460100': '海口市', '460105': '秀英区', '460106': '龙华区', '460107': '琼山区', '460108': '美兰区', '460200': '三亚市', '460202': '海棠区', '460203': '吉阳区', '460204': '天涯区', '460205': '崖州区', '460300': '三沙市', '460400': '儋州市', '469001': '五指山市', '469002': '琼海市', '469005': '文昌市', '469006': '万宁市', '469007': '东方市', '469021': '定安县', '469022': '屯昌县', '469023': '澄迈县', '469024': '临高县', '469025': '白沙黎族自治县', '469026': '昌江黎族自治县', '469027': '乐东黎族自治县', '469028': '陵水黎族自治县', '469029': '保亭黎族苗族自治县', '469030': '琼中黎族苗族自治县', '500000': '重庆市', '500101': '万州区', '500102': '涪陵区', '500103': '渝中区', '500104': '大渡口区', '500105': '江北区', '500106': '沙坪坝区', '500107': '九龙坡区', '500108': '南岸区', '500109': '北碚区', '500110': '綦江区', '500111': '大足区', '500112': '渝北区', '500113': '巴南区', '500114': '黔江区', '500115': '长寿区', '500116': '江津区', '500117': '合川区', '500118': '永川区', '500119': '南川区', '500120': '璧山区', '500151': '铜梁区', '500152': '潼南区', '500153': '荣昌区', '500228': '梁平县', '500229': '城口县', '500230': '丰都县', '500231': '垫江县', '500232': '武隆县', '500233': '忠县', '500234': '开县', '500235': '云阳县', '500236': '奉节县', '500237': '巫山县', '500238': '巫溪县', '500240': '石柱土家族自治县', '500241': '秀山土家族苗族自治县', '500242': '酉阳土家族苗族自治县', '500243': '彭水苗族土家族自治县', '510000': '四川省', '510100': '成都市', '510104': '锦江区', '510105': '青羊区', '510106': '金牛区', '510107': '武侯区', '510108': '成华区', '510112': '龙泉驿区', '510113': '青白江区', '510114': '新都区', '510115': '温江区', '510121': '金堂县', '510122': '双流县', '510124': '郫县', '510129': '大邑县', '510131': '蒲江县', '510132': '新津县', '510181': '都江堰市', '510182': '彭州市', '510183': '邛崃市', '510184': '崇州市', '510300': '自贡市', '510302': '自流井区', '510303': '贡井区', '510304': '大安区', '510311': '沿滩区', '510321': '荣县', '510322': '富顺县', '510400': '攀枝花市', '510402': '东区', '510403': '西区', '510411': '仁和区', '510421': '米易县', '510422': '盐边县', '510500': '泸州市', '510502': '江阳区', '510503': '纳溪区', '510504': '龙马潭区', '510521': '泸县', '510522': '合江县', '510524': '叙永县', '510525': '古蔺县', '510600': '德阳市', '510603': '旌阳区', '510623': '中江县', '510626': '罗江县', '510681': '广汉市', '510682': '什邡市', '510683': '绵竹市', '510700': '绵阳市', '510703': '涪城区', '510704': '游仙区', '510722': '三台县', '510723': '盐亭县', '510724': '安县', '510725': '梓潼县', '510726': '北川羌族自治县', '510727': '平武县', '510781': '江油市', '510800': '广元市', '510802': '利州区', '510811': '昭化区', '510812': '朝天区', '510821': '旺苍县', '510822': '青川县', '510823': '剑阁县', '510824': '苍溪县', '510900': '遂宁市', '510903': '船山区', '510904': '安居区', '510921': '蓬溪县', '510922': '射洪县', '510923': '大英县', '511000': '内江市', '511002': '市中区', '511011': '东兴区', '511024': '威远县', '511025': '资中县', '511028': '隆昌县', '511100': '乐山市', '511102': '市中区', '511111': '沙湾区', '511112': '五通桥区', '511113': '金口河区', '511123': '犍为县', '511124': '井研县', '511126': '夹江县', '511129': '沐川县', '511132': '峨边彝族自治县', '511133': '马边彝族自治县', '511181': '峨眉山市', '511300': '南充市', '511302': '顺庆区', '511303': '高坪区', '511304': '嘉陵区', '511321': '南部县', '511322': '营山县', '511323': '蓬安县', '511324': '仪陇县', '511325': '西充县', '511381': '阆中市', '511400': '眉山市', '511402': '东坡区', '511403': '彭山区', '511421': '仁寿县', '511423': '洪雅县', '511424': '丹棱县', '511425': '青神县', '511500': '宜宾市', '511502': '翠屏区', '511503': '南溪区', '511521': '宜宾县', '511523': '江安县', '511524': '长宁县', '511525': '高县', '511526': '珙县', '511527': '筠连县', '511528': '兴文县', '511529': '屏山县', '511600': '广安市', '511602': '广安区', '511603': '前锋区', '511621': '岳池县', '511622': '武胜县', '511623': '邻水县', '511681': '华蓥市', '511700': '达州市', '511702': '通川区', '511703': '达川区', '511722': '宣汉县', '511723': '开江县', '511724': '大竹县', '511725': '渠县', '511781': '万源市', '511800': '雅安市', '511802': '雨城区', '511803': '名山区', '511822': '荥经县', '511823': '汉源县', '511824': '石棉县', '511825': '天全县', '511826': '芦山县', '511827': '宝兴县', '511900': '巴中市', '511902': '巴州区', '511903': '恩阳区', '511921': '通江县', '511922': '南江县', '511923': '平昌县', '512000': '资阳市', '512002': '雁江区', '512021': '安岳县', '512022': '乐至县', '512081': '简阳市', '513200': '阿坝藏族羌族自治州', '513221': '汶川县', '513222': '理县', '513223': '茂县', '513224': '松潘县', '513225': '九寨沟县', '513226': '金川县', '513227': '小金县', '513228': '黑水县', '513229': '马尔康县', '513230': '壤塘县', '513231': '阿坝县', '513232': '若尔盖县', '513233': '红原县', '513300': '甘孜藏族自治州', '513301': '康定市', '513322': '泸定县', '513323': '丹巴县', '513324': '九龙县', '513325': '雅江县', '513326': '道孚县', '513327': '炉霍县', '513328': '甘孜县', '513329': '新龙县', '513330': '德格县', '513331': '白玉县', '513332': '石渠县', '513333': '色达县', '513334': '理塘县', '513335': '巴塘县', '513336': '乡城县', '513337': '稻城县', '513338': '得荣县', '513400': '凉山彝族自治州', '513401': '西昌市', '513422': '木里藏族自治县', '513423': '盐源县', '513424': '德昌县', '513425': '会理县', '513426': '会东县', '513427': '宁南县', '513428': '普格县', '513429': '布拖县', '513430': '金阳县', '513431': '昭觉县', '513432': '喜德县', '513433': '冕宁县', '513434': '越西县', '513435': '甘洛县', '513436': '美姑县', '513437': '雷波县', '520000': '贵州省', '520100': '贵阳市', '520102': '南明区', '520103': '云岩区', '520111': '花溪区', '520112': '乌当区', '520113': '白云区', '520115': '观山湖区', '520121': '开阳县', '520122': '息烽县', '520123': '修文县', '520181': '清镇市', '520200': '六盘水市', '520201': '钟山区', '520203': '六枝特区', '520221': '水城县', '520222': '盘县', '520300': '遵义市', '520302': '红花岗区', '520303': '汇川区', '520321': '遵义县', '520322': '桐梓县', '520323': '绥阳县', '520324': '正安县', '520325': '道真仡佬族苗族自治县', '520326': '务川仡佬族苗族自治县', '520327': '凤冈县', '520328': '湄潭县', '520329': '余庆县', '520330': '习水县', '520381': '赤水市', '520382': '仁怀市', '520400': '安顺市', '520402': '西秀区', '520403': '平坝区', '520422': '普定县', '520423': '镇宁布依族苗族自治县', '520424': '关岭布依族苗族自治县', '520425': '紫云苗族布依族自治县', '520500': '毕节市', '520502': '七星关区', '520521': '大方县', '520522': '黔西县', '520523': '金沙县', '520524': '织金县', '520525': '纳雍县', '520526': '威宁彝族回族苗族自治县', '520527': '赫章县', '520600': '铜仁市', '520602': '碧江区', '520603': '万山区', '520621': '江口县', '520622': '玉屏侗族自治县', '520623': '石阡县', '520624': '思南县', '520625': '印江土家族苗族自治县', '520626': '德江县', '520627': '沿河土家族自治县', '520628': '松桃苗族自治县', '522300': '黔西南布依族苗族自治州', '522301': '兴义市', '522322': '兴仁县', '522323': '普安县', '522324': '晴隆县', '522325': '贞丰县', '522326': '望谟县', '522327': '册亨县', '522328': '安龙县', '522600': '黔东南苗族侗族自治州', '522601': '凯里市', '522622': '黄平县', '522623': '施秉县', '522624': '三穗县', '522625': '镇远县', '522626': '岑巩县', '522627': '天柱县', '522628': '锦屏县', '522629': '剑河县', '522630': '台江县', '522631': '黎平县', '522632': '榕江县', '522633': '从江县', '522634': '雷山县', '522635': '麻江县', '522636': '丹寨县', '522700': '黔南布依族苗族自治州', '522701': '都匀市', '522702': '福泉市', '522722': '荔波县', '522723': '贵定县', '522725': '瓮安县', '522726': '独山县', '522727': '平塘县', '522728': '罗甸县', '522729': '长顺县', '522730': '龙里县', '522731': '惠水县', '522732': '三都水族自治县', '530000': '云南省', '530100': '昆明市', '530102': '五华区', '530103': '盘龙区', '530111': '官渡区', '530112': '西山区', '530113': '东川区', '530114': '呈贡区', '530122': '晋宁县', '530124': '富民县', '530125': '宜良县', '530126': '石林彝族自治县', '530127': '嵩明县', '530128': '禄劝彝族苗族自治县', '530129': '寻甸回族彝族自治县', '530181': '安宁市', '530300': '曲靖市', '530302': '麒麟区', '530321': '马龙县', '530322': '陆良县', '530323': '师宗县', '530324': '罗平县', '530325': '富源县', '530326': '会泽县', '530328': '沾益县', '530381': '宣威市', '530400': '玉溪市', '530402': '红塔区', '530421': '江川县', '530422': '澄江县', '530423': '通海县', '530424': '华宁县', '530425': '易门县', '530426': '峨山彝族自治县', '530427': '新平彝族傣族自治县', '530428': '元江哈尼族彝族傣族自治县', '530500': '保山市', '530502': '隆阳区', '530521': '施甸县', '530522': '腾冲县', '530523': '龙陵县', '530524': '昌宁县', '530600': '昭通市', '530602': '昭阳区', '530621': '鲁甸县', '530622': '巧家县', '530623': '盐津县', '530624': '大关县', '530625': '永善县', '530626': '绥江县', '530627': '镇雄县', '530628': '彝良县', '530629': '威信县', '530630': '水富县', '530700': '丽江市', '530702': '古城区', '530721': '玉龙纳西族自治县', '530722': '永胜县', '530723': '华坪县', '530724': '宁蒗彝族自治县', '530800': '普洱市', '530802': '思茅区', '530821': '宁洱哈尼族彝族自治县', '530822': '墨江哈尼族自治县', '530823': '景东彝族自治县', '530824': '景谷傣族彝族自治县', '530825': '镇沅彝族哈尼族拉祜族自治县', '530826': '江城哈尼族彝族自治县', '530827': '孟连傣族拉祜族佤族自治县', '530828': '澜沧拉祜族自治县', '530829': '西盟佤族自治县', '530900': '临沧市', '530902': '临翔区', '530921': '凤庆县', '530922': '云县', '530923': '永德县', '530924': '镇康县', '530925': '双江拉祜族佤族布朗族傣族自治县', '530926': '耿马傣族佤族自治县', '530927': '沧源佤族自治县', '532300': '楚雄彝族自治州', '532301': '楚雄市', '532322': '双柏县', '532323': '牟定县', '532324': '南华县', '532325': '姚安县', '532326': '大姚县', '532327': '永仁县', '532328': '元谋县', '532329': '武定县', '532331': '禄丰县', '532500': '红河哈尼族彝族自治州', '532501': '个旧市', '532502': '开远市', '532503': '蒙自市', '532504': '弥勒市', '532523': '屏边苗族自治县', '532524': '建水县', '532525': '石屏县', '532527': '泸西县', '532528': '元阳县', '532529': '红河县', '532530': '金平苗族瑶族傣族自治县', '532531': '绿春县', '532532': '河口瑶族自治县', '532600': '文山壮族苗族自治州', '532601': '文山市', '532622': '砚山县', '532623': '西畴县', '532624': '麻栗坡县', '532625': '马关县', '532626': '丘北县', '532627': '广南县', '532628': '富宁县', '532800': '西双版纳傣族自治州', '532801': '景洪市', '532822': '勐海县', '532823': '勐腊县', '532900': '大理白族自治州', '532901': '大理市', '532922': '漾濞彝族自治县', '532923': '祥云县', '532924': '宾川县', '532925': '弥渡县', '532926': '南涧彝族自治县', '532927': '巍山彝族回族自治县', '532928': '永平县', '532929': '云龙县', '532930': '洱源县', '532931': '剑川县', '532932': '鹤庆县', '533100': '德宏傣族景颇族自治州', '533102': '瑞丽市', '533103': '芒市', '533122': '梁河县', '533123': '盈江县', '533124': '陇川县', '533300': '怒江傈僳族自治州', '533321': '泸水县', '533323': '福贡县', '533324': '贡山独龙族怒族自治县', '533325': '兰坪白族普米族自治县', '533400': '迪庆藏族自治州', '533401': '香格里拉市', '533422': '德钦县', '533423': '维西傈僳族自治县', '540000': '西藏自治区', '540100': '拉萨市', '540102': '城关区', '540121': '林周县', '540122': '当雄县', '540123': '尼木县', '540124': '曲水县', '540125': '堆龙德庆县', '540126': '达孜县', '540127': '墨竹工卡县', '540200': '日喀则市', '540202': '桑珠孜区', '540221': '南木林县', '540222': '江孜县', '540223': '定日县', '540224': '萨迦县', '540225': '拉孜县', '540226': '昂仁县', '540227': '谢通门县', '540228': '白朗县', '540229': '仁布县', '540230': '康马县', '540231': '定结县', '540232': '仲巴县', '540233': '亚东县', '540234': '吉隆县', '540235': '聂拉木县', '540236': '萨嘎县', '540237': '岗巴县', '540300': '昌都市', '540302': '卡若区', '540321': '江达县', '540322': '贡觉县', '540323': '类乌齐县', '540324': '丁青县', '540325': '察雅县', '540326': '八宿县', '540327': '左贡县', '540328': '芒康县', '540329': '洛隆县', '540330': '边坝县', '540400': '林芝市', '540402': '巴宜区', '540421': '工布江达县', '540422': '米林县', '540423': '墨脱县', '540424': '波密县', '540425': '察隅县', '540426': '朗县', '542200': '山南地区', '542221': '乃东县', '542222': '扎囊县', '542223': '贡嘎县', '542224': '桑日县', '542225': '琼结县', '542226': '曲松县', '542227': '措美县', '542228': '洛扎县', '542229': '加查县', '542231': '隆子县', '542232': '错那县', '542233': '浪卡子县', '542400': '那曲地区', '542421': '那曲县', '542422': '嘉黎县', '542423': '比如县', '542424': '聂荣县', '542425': '安多县', '542426': '申扎县', '542427': '索县', '542428': '班戈县', '542429': '巴青县', '542430': '尼玛县', '542431': '双湖县', '542500': '阿里地区', '542521': '普兰县', '542522': '札达县', '542523': '噶尔县', '542524': '日土县', '542525': '革吉县', '542526': '改则县', '542527': '措勤县', '610000': '陕西省', '610100': '西安市', '610102': '新城区', '610103': '碑林区', '610104': '莲湖区', '610111': '灞桥区', '610112': '未央区', '610113': '雁塔区', '610114': '阎良区', '610115': '临潼区', '610116': '长安区', '610117': '高陵区', '610122': '蓝田县', '610124': '周至县', '610125': '户县', '610200': '铜川市', '610202': '王益区', '610203': '印台区', '610204': '耀州区', '610222': '宜君县', '610300': '宝鸡市', '610302': '渭滨区', '610303': '金台区', '610304': '陈仓区', '610322': '凤翔县', '610323': '岐山县', '610324': '扶风县', '610326': '眉县', '610327': '陇县', '610328': '千阳县', '610329': '麟游县', '610330': '凤县', '610331': '太白县', '610400': '咸阳市', '610402': '秦都区', '610403': '杨陵区', '610404': '渭城区', '610422': '三原县', '610423': '泾阳县', '610424': '乾县', '610425': '礼泉县', '610426': '永寿县', '610427': '彬县', '610428': '长武县', '610429': '旬邑县', '610430': '淳化县', '610431': '武功县', '610481': '兴平市', '610500': '渭南市', '610502': '临渭区', '610521': '华县', '610522': '潼关县', '610523': '大荔县', '610524': '合阳县', '610525': '澄城县', '610526': '蒲城县', '610527': '白水县', '610528': '富平县', '610581': '韩城市', '610582': '华阴市', '610600': '延安市', '610602': '宝塔区', '610621': '延长县', '610622': '延川县', '610623': '子长县', '610624': '安塞县', '610625': '志丹县', '610626': '吴起县', '610627': '甘泉县', '610628': '富县', '610629': '洛川县', '610630': '宜川县', '610631': '黄龙县', '610632': '黄陵县', '610700': '汉中市', '610702': '汉台区', '610721': '南郑县', '610722': '城固县', '610723': '洋县', '610724': '西乡县', '610725': '勉县', '610726': '宁强县', '610727': '略阳县', '610728': '镇巴县', '610729': '留坝县', '610730': '佛坪县', '610800': '榆林市', '610802': '榆阳区', '610821': '神木县', '610822': '府谷县', '610823': '横山县', '610824': '靖边县', '610825': '定边县', '610826': '绥德县', '610827': '米脂县', '610828': '佳县', '610829': '吴堡县', '610830': '清涧县', '610831': '子洲县', '610900': '安康市', '610902': '汉滨区', '610921': '汉阴县', '610922': '石泉县', '610923': '宁陕县', '610924': '紫阳县', '610925': '岚皋县', '610926': '平利县', '610927': '镇坪县', '610928': '旬阳县', '610929': '白河县', '611000': '商洛市', '611002': '商州区', '611021': '洛南县', '611022': '丹凤县', '611023': '商南县', '611024': '山阳县', '611025': '镇安县', '611026': '柞水县', '620000': '甘肃省', '620100': '兰州市', '620102': '城关区', '620103': '七里河区', '620104': '西固区', '620105': '安宁区', '620111': '红古区', '620121': '永登县', '620122': '皋兰县', '620123': '榆中县', '620200': '嘉峪关市', '620300': '金昌市', '620302': '金川区', '620321': '永昌县', '620400': '白银市', '620402': '白银区', '620403': '平川区', '620421': '靖远县', '620422': '会宁县', '620423': '景泰县', '620500': '天水市', '620502': '秦州区', '620503': '麦积区', '620521': '清水县', '620522': '秦安县', '620523': '甘谷县', '620524': '武山县', '620525': '张家川回族自治县', '620600': '武威市', '620602': '凉州区', '620621': '民勤县', '620622': '古浪县', '620623': '天祝藏族自治县', '620700': '张掖市', '620702': '甘州区', '620721': '肃南裕固族自治县', '620722': '民乐县', '620723': '临泽县', '620724': '高台县', '620725': '山丹县', '620800': '平凉市', '620802': '崆峒区', '620821': '泾川县', '620822': '灵台县', '620823': '崇信县', '620824': '华亭县', '620825': '庄浪县', '620826': '静宁县', '620900': '酒泉市', '620902': '肃州区', '620921': '金塔县', '620922': '瓜州县', '620923': '肃北蒙古族自治县', '620924': '阿克塞哈萨克族自治县', '620981': '玉门市', '620982': '敦煌市', '621000': '庆阳市', '621002': '西峰区', '621021': '庆城县', '621022': '环县', '621023': '华池县', '621024': '合水县', '621025': '正宁县', '621026': '宁县', '621027': '镇原县', '621100': '定西市', '621102': '安定区', '621121': '通渭县', '621122': '陇西县', '621123': '渭源县', '621124': '临洮县', '621125': '漳县', '621126': '岷县', '621200': '陇南市', '621202': '武都区', '621221': '成县', '621222': '文县', '621223': '宕昌县', '621224': '康县', '621225': '西和县', '621226': '礼县', '621227': '徽县', '621228': '两当县', '622900': '临夏回族自治州', '622901': '临夏市', '622921': '临夏县', '622922': '康乐县', '622923': '永靖县', '622924': '广河县', '622925': '和政县', '622926': '东乡族自治县', '622927': '积石山保安族东乡族撒拉族自治县', '623000': '甘南藏族自治州', '623001': '合作市', '623021': '临潭县', '623022': '卓尼县', '623023': '舟曲县', '623024': '迭部县', '623025': '玛曲县', '623026': '碌曲县', '623027': '夏河县', '630000': '青海省', '630100': '西宁市', '630102': '城东区', '630103': '城中区', '630104': '城西区', '630105': '城北区', '630121': '大通回族土族自治县', '630122': '湟中县', '630123': '湟源县', '630200': '海东市', '630202': '乐都区', '630203': '平安区', '630222': '民和回族土族自治县', '630223': '互助土族自治县', '630224': '化隆回族自治县', '630225': '循化撒拉族自治县', '632200': '海北藏族自治州', '632221': '门源回族自治县', '632222': '祁连县', '632223': '海晏县', '632224': '刚察县', '632300': '黄南藏族自治州', '632321': '同仁县', '632322': '尖扎县', '632323': '泽库县', '632324': '河南蒙古族自治县', '632500': '海南藏族自治州', '632521': '共和县', '632522': '同德县', '632523': '贵德县', '632524': '兴海县', '632525': '贵南县', '632600': '果洛藏族自治州', '632621': '玛沁县', '632622': '班玛县', '632623': '甘德县', '632624': '达日县', '632625': '久治县', '632626': '玛多县', '632700': '玉树藏族自治州', '632701': '玉树市', '632722': '杂多县', '632723': '称多县', '632724': '治多县', '632725': '囊谦县', '632726': '曲麻莱县', '632800': '海西蒙古族藏族自治州', '632801': '格尔木市', '632802': '德令哈市', '632821': '乌兰县', '632822': '都兰县', '632823': '天峻县', '640000': '宁夏回族自治区', '640100': '银川市', '640104': '兴庆区', '640105': '西夏区', '640106': '金凤区', '640121': '永宁县', '640122': '贺兰县', '640181': '灵武市', '640200': '石嘴山市', '640202': '大武口区', '640205': '惠农区', '640221': '平罗县', '640300': '吴忠市', '640302': '利通区', '640303': '红寺堡区', '640323': '盐池县', '640324': '同心县', '640381': '青铜峡市', '640400': '固原市', '640402': '原州区', '640422': '西吉县', '640423': '隆德县', '640424': '泾源县', '640425': '彭阳县', '640500': '中卫市', '640502': '沙坡头区', '640521': '中宁县', '640522': '海原县', '650000': '新疆维吾尔自治区', '650100': '乌鲁木齐市', '650102': '天山区', '650103': '沙依巴克区', '650104': '新市区', '650105': '水磨沟区', '650106': '头屯河区', '650107': '达坂城区', '650109': '米东区', '650121': '乌鲁木齐县', '650200': '克拉玛依市', '650202': '独山子区', '650203': '克拉玛依区', '650204': '白碱滩区', '650205': '乌尔禾区', '650400': '吐鲁番市', '650402': '高昌区', '650421': '鄯善县', '650422': '托克逊县', '652200': '哈密地区', '652201': '哈密市', '652222': '巴里坤哈萨克自治县', '652223': '伊吾县', '652300': '昌吉回族自治州', '652301': '昌吉市', '652302': '阜康市', '652323': '呼图壁县', '652324': '玛纳斯县', '652325': '奇台县', '652327': '吉木萨尔县', '652328': '木垒哈萨克自治县', '652700': '博尔塔拉蒙古自治州', '652701': '博乐市', '652702': '阿拉山口市', '652722': '精河县', '652723': '温泉县', '652800': '巴音郭楞蒙古自治州', '652801': '库尔勒市', '652822': '轮台县', '652823': '尉犁县', '652824': '若羌县', '652825': '且末县', '652826': '焉耆回族自治县', '652827': '和静县', '652828': '和硕县', '652829': '博湖县', '652900': '阿克苏地区', '652901': '阿克苏市', '652922': '温宿县', '652923': '库车县', '652924': '沙雅县', '652925': '新和县', '652926': '拜城县', '652927': '乌什县', '652928': '阿瓦提县', '652929': '柯坪县', '653000': '克孜勒苏柯尔克孜自治州', '653001': '阿图什市', '653022': '阿克陶县', '653023': '阿合奇县', '653024': '乌恰县', '653100': '喀什地区', '653101': '喀什市', '653121': '疏附县', '653122': '疏勒县', '653123': '英吉沙县', '653124': '泽普县', '653125': '莎车县', '653126': '叶城县', '653127': '麦盖提县', '653128': '岳普湖县', '653129': '伽师县', '653130': '巴楚县', '653131': '塔什库尔干塔吉克自治县', '653200': '和田地区', '653201': '和田市', '653221': '和田县', '653222': '墨玉县', '653223': '皮山县', '653224': '洛浦县', '653225': '策勒县', '653226': '于田县', '653227': '民丰县', '654000': '伊犁哈萨克自治州', '654002': '伊宁市', '654003': '奎屯市', '654004': '霍尔果斯市', '654021': '伊宁县', '654022': '察布查尔锡伯自治县', '654023': '霍城县', '654024': '巩留县', '654025': '新源县', '654026': '昭苏县', '654027': '特克斯县', '654028': '尼勒克县', '654200': '塔城地区', '654201': '塔城市', '654202': '乌苏市', '654221': '额敏县', '654223': '沙湾县', '654224': '托里县', '654225': '裕民县', '654226': '和布克赛尔蒙古自治县', '654300': '阿勒泰地区', '654301': '阿勒泰市', '654321': '布尔津县', '654322': '富蕴县', '654323': '福海县', '654324': '哈巴河县', '654325': '青河县', '654326': '吉木乃县', '659001': '石河子市', '659002': '阿拉尔市', '659003': '图木舒克市', '659004': '五家渠市', '659005': '北屯市', '659006': '铁门关市', '659007': '双河市', '659008': '可克达拉市', '710000': '台湾省', '810000': '香港特别行政区', '820000': '澳门特别行政区', }
PypiClean
/ForcastingModels-0.0.1.tar.gz/ForcastingModels-0.0.1/Models/Multistep_Models.py
from numpy import array from keras.models import Sequential from keras.layers import Dense, Flatten, LSTM, TimeDistributed, RepeatVector from keras.layers.convolutional import Conv1D from keras.layers.convolutional import MaxPooling1D class Singleton(type): _instances = {} def __call__(cls, *args, **kwargs): if cls not in cls._instances: cls._instances[cls] = super( Singleton, cls).__call__(*args, **kwargs) return cls._instances[cls] class MLP_Model(metaclass=Singleton): model = Sequential() def __init__(self, epoch): self.epochs = epoch # using for univariate multistep def build(self, n_steps_in, n_steps_out, X, y): MLP_Model.model.add(Dense(100, activation='relu', input_dim=n_steps_in)) MLP_Model.model.add(Dense(n_steps_out)) MLP_Model.model.compile(optimizer='adam', loss='mse') # fit model MLP_Model.model.fit(X, y, epochs=self.epochs, verbose=0) # demonstrate prediction def predict(self, pred, n_steps_in): x_input = array(pred) x_input = x_input.reshape((1, n_steps_in)) yhat = MLP_Model.model.predict(x_input, verbose=0) return yhat class CNN_Model(metaclass=Singleton): model = Sequential() def __init__(self, epoch): self.epochs = epoch def build(self, n_steps_in, n_features, n_steps_out, X, y): X = X.reshape((X.shape[0], X.shape[1], n_features)) CNN_Model.model.add(Conv1D(filters=64, kernel_size=2, activation='relu', input_shape=(n_steps_in, n_features))) CNN_Model.model.add(MaxPooling1D(pool_size=2)) CNN_Model.model.add(Flatten()) CNN_Model.model.add(Dense(50, activation='relu')) CNN_Model.model.add(Dense(n_steps_out)) CNN_Model.model.compile(optimizer='adam', loss='mse') # fit model CNN_Model.model.fit(X, y, epochs=self.epochs, verbose=0) # demonstrate prediction def predict(self, pred, n_steps_in, n_features): x_input = array(pred) x_input = x_input.reshape((1, n_steps_in, n_features)) yhat = CNN_Model.model.predict(x_input, verbose=0) return yhat class Vector_output_Model(metaclass=Singleton): model = Sequential() def __init__(self, epoch): self.epochs = epoch def build(self, n_steps_in, n_features, n_steps_out, X, y): X = X.reshape((X.shape[0], X.shape[1], n_features)) Vector_output_Model.model.add( LSTM(100, activation='relu', input_shape=(n_steps_in, n_features))) Vector_output_Model.model.add( LSTM(100, activation='relu')) Vector_output_Model.model.add(Dense(n_steps_out)) Vector_output_Model.model.compile(optimizer='adam', loss='mse') # fit model Vector_output_Model.model.fit(X, y, epochs=self.epochs, verbose=0) # demonstrate prediction def predict(self, pred, n_steps_in, n_features): x_input = array(pred) x_input = x_input.reshape((1, n_steps_in, n_features)) yhat = Vector_output_Model.model.predict(x_input, verbose=0) return yhat class Encoder_decoder_Model(metaclass=Singleton): model = Sequential() def __init__(self, epoch): self.epochs = epoch def build(self, n_steps_in, n_features, n_steps_out, X, y): X = X.reshape((X.shape[0], X.shape[1], n_features)) y = y.reshape((X.shape[0], X.shape[1], n_features)) Encoder_decoder_Model.model.add(LSTM( 100, activation='relu', input_shape=(n_steps_in, n_features))) Encoder_decoder_Model.model.add(RepeatVector(n_steps_out)) Encoder_decoder_Model.model.add(LSTM(100, activation='relu', return_sequences=True)) Encoder_decoder_Model.model.add(TimeDistributed(Dense(1))) Encoder_decoder_Model.model.compile(optimizer='adam', loss='mse') # fit model Encoder_decoder_Model.model.fit(X, y, epochs=self.epochs, verbose=0) # demonstrate prediction def predict(self, pred, n_steps_in, n_features): x_input = array(pred) x_input = x_input.reshape((1, n_steps_in, n_features)) yhat = Encoder_decoder_Model.model.predict(x_input, verbose=0) return yhat
PypiClean
/EISeg-1.1.1-py3-none-any.whl/eiseg/widget/create.py
from qtpy.QtWidgets import QDockWidget from qtpy import QtCore, QtGui, QtWidgets from qtpy.QtCore import Qt ## 创建文本 def create_text(parent, text_name=None, text_text=None): text = QtWidgets.QLabel(parent) if text_name is not None: text.setObjectName(text_name) if text_text is not None: text.setText(text_text) return text ## 创建可编辑文本 def create_edit(parent, text_name=None, text_text=None): edit = QtWidgets.QLineEdit(parent) if text_name is not None: edit.setObjectName(text_name) if text_text is not None: edit.setText(text_text) edit.setValidator(QtGui.QIntValidator()) edit.setMaxLength(5) return edit ## 创建按钮 def create_button(parent, btn_name, btn_text, ico_path=None, curt=None): # 创建和设置按钮 sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed) min_size = QtCore.QSize(0, 40) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) btn = QtWidgets.QPushButton(parent) sizePolicy.setHeightForWidth(btn.sizePolicy().hasHeightForWidth()) btn.setSizePolicy(sizePolicy) btn.setMinimumSize(min_size) btn.setObjectName(btn_name) if ico_path is not None: btn.setIcon(QtGui.QIcon(ico_path)) btn.setText(btn_text) if curt is not None: btn.setShortcut(curt) return btn ## 创建滑块区域 def create_slider(parent, sld_name, text_name, text, default_value=50, max_value=100, min_value=0, text_rate=0.01, edit=False): Region = QtWidgets.QHBoxLayout() lab = create_text(parent, None, text) Region.addWidget(lab) if edit is False: labShow = create_text(parent, text_name, str(default_value * text_rate)) else: labShow = create_edit(parent, text_name, str(default_value * text_rate)) labShow.setMaximumWidth(100) Region.addWidget(labShow) Region.addStretch() sld = QtWidgets.QSlider(parent) sld.setMaximum(max_value) # 好像只能整数的,这里是扩大了10倍,1 . 10 sld.setMinimum(min_value) sld.setProperty("value", default_value) sld.setOrientation(QtCore.Qt.Horizontal) sld.setObjectName(sld_name) sld.setStyleSheet(""" QSlider::sub-page:horizontal { background: #9999F1 } QSlider::handle:horizontal { background: #3334E3; width: 12px; border-radius: 4px; } """) sld.textLab = labShow return sld, labShow, Region class DockWidget(QDockWidget): def __init__(self, parent, name, text): super().__init__(parent=parent) self.setObjectName(name) self.setAllowedAreas(Qt.RightDockWidgetArea | Qt.LeftDockWidgetArea) # 感觉不给关闭好点。可以在显示里面取消显示 self.setFeatures(QDockWidget.DockWidgetMovable | QDockWidget.DockWidgetFloatable) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.sizePolicy().hasHeightForWidth()) self.setSizePolicy(sizePolicy) self.setMinimumWidth(230) self.setWindowTitle(text) self.setStyleSheet("QDockWidget { background-color:rgb(204,204,248); }") self.topLevelChanged.connect(self.changeBackColor) def changeBackColor(self, isFloating): if isFloating: self.setStyleSheet( "QDockWidget { background-color:rgb(255,255,255); }") else: self.setStyleSheet( "QDockWidget { background-color:rgb(204,204,248); }") ## 创建dock def creat_dock(parent, name, text, widget): dock = DockWidget(parent, name, text) dock.setMinimumWidth(300) # Uniform size dock.setWidget(widget) return dock
PypiClean
/AIS2.py-2.2.1.tar.gz/AIS2.py-2.2.1/README.rst
AIS2.py ======= .. image:: https://img.shields.io/pypi/v/AIS2.py.svg :target: https://pypi.org/project/AIS2.py :alt: PyPI version .. image:: https://img.shields.io/pypi/pyversions/AIS2.py.svg :target: https://pypi.org/project/AIS2.py :alt: Python versions .. image:: https://github.com/seantis/AIS2.py/actions/workflows/python-tox.yaml/badge.svg :target: https://github.com/seantis/AIS2.py/actions :alt: Tests .. image:: https://readthedocs.org/projects/ais2py/badge/?version=latest :target: https://ais2py.readthedocs.io/en/latest/?badge=latest :alt: Documentation Status .. image:: https://codecov.io/gh/seantis/AIS2.py/branch/master/graph/badge.svg?token=NRPFO5L0PG :target: https://codecov.io/gh/seantis/AIS2.py :alt: Codecov.io .. image:: https://img.shields.io/badge/pre--commit-enabled-brightgreen?logo=pre-commit&logoColor=white :target: https://github.com/pre-commit/pre-commit :alt: pre-commit AIS.py: a Python interface for the Swisscom All-in Signing Service (aka AIS). AIS2.py is a fork created to get rid of the licensing woes affected itext dependency and replace it with pyHanko. Furthermore the API was slightly adjusted to be more flexible, so buffers can be passed around rather than files that need to exist on the filesystem. AIS2.py works like this: .. code-block:: python >>> from AIS import AIS, PDF >>> client = AIS('alice', 'a_secret', 'a.crt', 'a.key') >>> pdf = PDF('source.pdf') >>> ais.sign_one_pdf(pdf) >>> with open('target.pdf', 'wb') as fp: ... fp.write(pdf.out_stream.getvalue()) ... License ------- Copyright (C) 2016 Camptocamp SA This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details.
PypiClean
/DLTA-AI-1.1.tar.gz/DLTA-AI-1.1/DLTA_AI_app/mmdetection/configs/grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco.py
_base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py' ] # model settings model = dict( type='GridRCNN', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5), rpn_head=dict( type='RPNHead', in_channels=256, feat_channels=256, anchor_generator=dict( type='AnchorGenerator', scales=[8], ratios=[0.5, 1.0, 2.0], strides=[4, 8, 16, 32, 64]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0]), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)), roi_head=dict( type='GridRoIHead', bbox_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), out_channels=256, featmap_strides=[4, 8, 16, 32]), bbox_head=dict( type='Shared2FCBBoxHead', with_reg=False, in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.1, 0.1, 0.2, 0.2]), reg_class_agnostic=False), grid_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), out_channels=256, featmap_strides=[4, 8, 16, 32]), grid_head=dict( type='GridHead', grid_points=9, num_convs=8, in_channels=256, point_feat_channels=64, norm_cfg=dict(type='GN', num_groups=36), loss_grid=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=15))), # model training and testing settings train_cfg=dict( rpn=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.7, neg_iou_thr=0.3, min_pos_iou=0.3, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=256, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=False), allowed_border=0, pos_weight=-1, debug=False), rpn_proposal=dict( nms_pre=2000, max_per_img=2000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0), rcnn=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.5, min_pos_iou=0.5, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=512, pos_fraction=0.25, neg_pos_ub=-1, add_gt_as_proposals=True), pos_radius=1, pos_weight=-1, max_num_grid=192, debug=False)), test_cfg=dict( rpn=dict( nms_pre=1000, max_per_img=1000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0), rcnn=dict( score_thr=0.03, nms=dict(type='nms', iou_threshold=0.3), max_per_img=100))) # optimizer optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) optimizer_config = dict(grad_clip=None) # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=3665, warmup_ratio=1.0 / 80, step=[17, 23]) runner = dict(type='EpochBasedRunner', max_epochs=25)
PypiClean
/BEAT_Guang-1.0.1-py3-none-any.whl/econml/dml/causal_forest.py
from warnings import warn import numpy as np from sklearn.linear_model import LogisticRegressionCV from sklearn.base import clone, BaseEstimator from sklearn.preprocessing import FunctionTransformer from sklearn.pipeline import Pipeline from sklearn.model_selection import train_test_split from itertools import product from .dml import _BaseDML from .dml import _FirstStageWrapper from ..sklearn_extensions.linear_model import WeightedLassoCVWrapper from ..sklearn_extensions.model_selection import WeightedStratifiedKFold from ..inference import NormalInferenceResults from ..inference._inference import Inference from ..utilities import (add_intercept, shape, check_inputs, check_input_arrays, _deprecate_positional, cross_product, Summary) from ..grf import CausalForest, MultiOutputGRF from .._cate_estimator import LinearCateEstimator from .._shap import _shap_explain_multitask_model_cate from .._ortho_learner import _OrthoLearner class _CausalForestFinalWrapper: def __init__(self, model_final, featurizer, discrete_treatment, drate): self._model = clone(model_final, safe=False) self._original_featurizer = clone(featurizer, safe=False) self._featurizer = self._original_featurizer self._discrete_treatment = discrete_treatment self._drate = drate def _combine(self, X, fitting=True): if X is not None: if self._featurizer is not None: F = self._featurizer.fit_transform(X) if fitting else self._featurizer.transform(X) else: F = X else: raise AttributeError("Cannot use this method with X=None. Consider " "using the LinearDML estimator.") return F def _ate_and_stderr(self, drpreds, mask=None): if mask is not None: drpreds = drpreds[mask] point = np.nanmean(drpreds, axis=0).reshape(self._d_y + self._d_t) nonnan = np.sum(~np.isnan(drpreds)) stderr = (np.nanstd(drpreds, axis=0) / np.sqrt(nonnan)).reshape(self._d_y + self._d_t) return point, stderr def fit(self, X, T, T_res, Y_res, sample_weight=None, freq_weight=None, sample_var=None, groups=None): # Track training dimensions to see if Y or T is a vector instead of a 2-dimensional array self._d_t = shape(T_res)[1:] self._d_y = shape(Y_res)[1:] fts = self._combine(X) if T_res.ndim == 1: T_res = T_res.reshape((-1, 1)) if Y_res.ndim == 1: Y_res = Y_res.reshape((-1, 1)) self._model.fit(fts, T_res, Y_res, sample_weight=sample_weight) # Fit a doubly robust average effect if self._discrete_treatment and self._drate: oob_preds = self._model.oob_predict(fts) self._oob_preds = oob_preds if np.any(np.isnan(oob_preds)): warn("Could not generate out-of-bag predictions on some training data. " "Consider increasing the number of trees. `ate_` results will take the " "average of the subset of training data for which out-of-bag predictions " "where available.") residuals = Y_res - np.einsum('ijk,ik->ij', oob_preds, T_res) propensities = T - T_res VarT = np.clip(propensities * (1 - propensities), 1e-2, np.inf) drpreds = oob_preds drpreds += cross_product(residuals, T_res / VarT).reshape((-1, Y_res.shape[1], T_res.shape[1])) drpreds[np.isnan(oob_preds)] = np.nan self.ate_, self.ate_stderr_ = self._ate_and_stderr(drpreds) self.att_ = [] self.att_stderr_ = [] att, stderr = self._ate_and_stderr(drpreds, np.all(T == 0, axis=1)) self.att_.append(att) self.att_stderr_.append(stderr) for t in range(self._d_t[0]): att, stderr = self._ate_and_stderr(drpreds, (T[:, t] == 1)) self.att_.append(att) self.att_stderr_.append(stderr) return self def predict(self, X): return self._model.predict(self._combine(X, fitting=False)).reshape((-1,) + self._d_y + self._d_t) @property def ate_(self): if not self._discrete_treatment: raise AttributeError("Doubly Robust ATE calculation on training data " "is available only on discrete treatments!") if not self._drate: raise AttributeError("Doubly Robust ATE calculation on training data " "is available only when `drate=True`!") return self._ate @ate_.setter def ate_(self, value): self._ate = value @property def ate_stderr_(self): if not self._discrete_treatment: raise AttributeError("Doubly Robust ATE calculation on training data " "is available only on discrete treatments!") if not self._drate: raise AttributeError("Doubly Robust ATE calculation on training data " "is available only when `drate=True`!") return self._ate_stderr @ate_stderr_.setter def ate_stderr_(self, value): self._ate_stderr = value @property def att_(self): if not self._discrete_treatment: raise AttributeError("Doubly Robust ATT calculation on training data " "is available only on discrete treatments!") if not self._drate: raise AttributeError("Doubly Robust ATT calculation on training data " "is available only when `drate=True`!") return self._att @att_.setter def att_(self, value): self._att = value @property def att_stderr_(self): if not self._discrete_treatment: raise AttributeError("Doubly Robust ATT calculation on training data " "is available only on discrete treatments!") if not self._drate: raise AttributeError("Doubly Robust ATT calculation on training data " "is available only when `drate=True`!") return self._att_stderr @att_stderr_.setter def att_stderr_(self, value): self._att_stderr = value class _GenericSingleOutcomeModelFinalWithCovInference(Inference): def prefit(self, estimator, *args, **kwargs): self.model_final = estimator.model_final_ self.featurizer = estimator.featurizer_ if hasattr(estimator, 'featurizer_') else None def fit(self, estimator, *args, **kwargs): # once the estimator has been fit, it's kosher to store d_t here # (which needs to have been expanded if there's a discrete treatment) self._est = estimator self._d_t = estimator._d_t self._d_y = estimator._d_y self.d_t = self._d_t[0] if self._d_t else 1 self.d_y = self._d_y[0] if self._d_y else 1 def const_marginal_effect_interval(self, X, *, alpha=0.05): return self.const_marginal_effect_inference(X).conf_int(alpha=alpha) def const_marginal_effect_inference(self, X): if X is None: raise ValueError("This inference method currently does not support X=None!") if self.featurizer is not None: X = self.featurizer.transform(X) pred, pred_var = self.model_final.predict_and_var(X) pred = pred.reshape((-1,) + self._d_y + self._d_t) pred_stderr = np.sqrt(np.diagonal(pred_var, axis1=2, axis2=3).reshape((-1,) + self._d_y + self._d_t)) return NormalInferenceResults(d_t=self.d_t, d_y=self.d_y, pred=pred, pred_stderr=pred_stderr, mean_pred_stderr=None, inf_type='effect') def effect_interval(self, X, *, T0, T1, alpha=0.05): return self.effect_inference(X, T0=T0, T1=T1).conf_int(alpha=alpha) def effect_inference(self, X, *, T0, T1): if X is None: raise ValueError("This inference method currently does not support X=None!") X, T0, T1 = self._est._expand_treatments(X, T0, T1) if self.featurizer is not None: X = self.featurizer.transform(X) dT = T1 - T0 if dT.ndim == 1: dT = dT.reshape((-1, 1)) pred, pred_var = self.model_final.predict_projection_and_var(X, dT) pred = pred.reshape((-1,) + self._d_y) pred_stderr = np.sqrt(pred_var.reshape((-1,) + self._d_y)) return NormalInferenceResults(d_t=None, d_y=self.d_y, pred=pred, pred_stderr=pred_stderr, mean_pred_stderr=None, inf_type='effect') class CausalForestDML(_BaseDML): """A Causal Forest [cfdml1]_ combined with double machine learning based residualization of the treatment and outcome variables. It fits a forest that solves the local moment equation problem: .. code-block:: E[ (Y - E[Y|X, W] - <theta(x), T - E[T|X, W]> - beta(x)) (T;1) | X=x] = 0 where E[Y|X, W] and E[T|X, W] are fitted in a first stage in a cross-fitting manner. Parameters ---------- model_y: estimator or 'auto', optional (default is 'auto') The estimator for fitting the response to the features. Must implement `fit` and `predict` methods. If 'auto' :class:`.WeightedLassoCV`/:class:`.WeightedMultiTaskLassoCV` will be chosen. model_t: estimator or 'auto', optional (default is 'auto') The estimator for fitting the treatment to the features. If estimator, it must implement `fit` and `predict` methods; If 'auto', :class:`~sklearn.linear_model.LogisticRegressionCV` will be applied for discrete treatment, and :class:`.WeightedLassoCV`/:class:`.WeightedMultiTaskLassoCV` will be applied for continuous treatment. featurizer : :term:`transformer`, optional, default None Must support fit_transform and transform. Used to create composite features in the final CATE regression. It is ignored if X is None. The final CATE will be trained on the outcome of featurizer.fit_transform(X). If featurizer=None, then CATE is trained on X. discrete_treatment: bool, optional (default is ``False``) Whether the treatment values should be treated as categorical, rather than continuous, quantities categories: 'auto' or list, default 'auto' The categories to use when encoding discrete treatments (or 'auto' to use the unique sorted values). The first category will be treated as the control treatment. cv: int, cross-validation generator or an iterable, optional (Default=2) Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross-validation, - integer, to specify the number of folds. - :term:`CV splitter` - An iterable yielding (train, test) splits as arrays of indices. For integer/None inputs, if the treatment is discrete :class:`~sklearn.model_selection.StratifiedKFold` is used, else, :class:`~sklearn.model_selection.KFold` is used (with a random shuffle in either case). Unless an iterable is used, we call `split(X,T)` to generate the splits. mc_iters: int, optional (default=None) The number of times to rerun the first stage models to reduce the variance of the nuisances. mc_agg: {'mean', 'median'}, optional (default='mean') How to aggregate the nuisance value for each sample across the `mc_iters` monte carlo iterations of cross-fitting. drate : bool, default=True Whether to calculate doubly robust average treatment effect estimate on training data at fit time. This happens only if `discrete_treatment=True`. Doubly robust ATE estimation on the training data is not available for continuous treatments. n_estimators : int, default=100 Number of trees criterion : {``"mse"``, ``"het"``}, default="mse" The function to measure the quality of a split. Supported criteria are ``"mse"`` for the mean squared error in a linear moment estimation tree and ``"het"`` for heterogeneity score. - The ``"mse"`` criterion finds splits that minimize the score .. code-block:: sum_{child} E[(Y - <theta(child), T> - beta(child))^2 | X=child] weight(child) Internally, for the case of more than two treatments or for the case of two treatments with ``fit_intercept=True`` then this criterion is approximated by computationally simpler variants for computational purposes. In particular, it is replaced by: .. code-block:: sum_{child} weight(child) * rho(child).T @ E[(T;1) @ (T;1).T | X in child] @ rho(child) where: .. code-block:: rho(child) := E[(T;1) @ (T;1).T | X in parent]^{-1} * E[(Y - <theta(x), T> - beta(x)) (T;1) | X in child] This can be thought as a heterogeneity inducing score, but putting more weight on scores with a large minimum eigenvalue of the child jacobian ``E[(T;1) @ (T;1).T | X in child]``, which leads to smaller variance of the estimate and stronger identification of the parameters. - The "het" criterion finds splits that maximize the pure parameter heterogeneity score .. code-block:: sum_{child} weight(child) * rho(child)[:n_T].T @ rho(child)[:n_T] This can be thought as an approximation to the ideal heterogeneity score: .. code-block:: weight(left) * weight(right) || theta(left) - theta(right)||_2^2 / weight(parent)^2 as outlined in [cfdml1]_ max_depth : int, default=None The maximum depth of the tree. If None, then nodes are expanded until all leaves are pure or until all leaves contain less than min_samples_split samples. min_samples_split : int or float, default=10 The minimum number of samples required to split an internal node: - If int, then consider `min_samples_split` as the minimum number. - If float, then `min_samples_split` is a fraction and `ceil(min_samples_split * n_samples)` are the minimum number of samples for each split. min_samples_leaf : int or float, default=5 The minimum number of samples required to be at a leaf node. A split point at any depth will only be considered if it leaves at least ``min_samples_leaf`` training samples in each of the left and right branches. This may have the effect of smoothing the model, especially in regression. - If int, then consider `min_samples_leaf` as the minimum number. - If float, then `min_samples_leaf` is a fraction and `ceil(min_samples_leaf * n_samples)` are the minimum number of samples for each node. min_weight_fraction_leaf : float, default=0.0 The minimum weighted fraction of the sum total of weights (of all the input samples) required to be at a leaf node. Samples have equal weight when sample_weight is not provided. min_var_fraction_leaf : None or float in (0, 1], default=None A constraint on some proxy of the variation of the treatment vector that should be contained within each leaf as a percentage of the total variance of the treatment vector on the whole sample. This avoids performing splits where either the variance of the treatment is small and hence the local parameter is not well identified and has high variance. The proxy of variance is different for different criterion, primarily for computational efficiency reasons. If ``criterion='het'``, then this constraint translates to:: for all i in {1, ..., T.shape[1]}: Var(T[i] | X in leaf) > `min_var_fraction_leaf` * Var(T[i]) If ``criterion='mse'``, because the criterion stores more information about the leaf for every candidate split, then this constraint imposes further constraints on the pairwise correlations of different coordinates of each treatment, i.e.:: for all i neq j: sqrt( Var(T[i]|X in leaf) * Var(T[j]|X in leaf) * ( 1 - rho(T[i], T[j]| in leaf)^2 ) ) > `min_var_fraction_leaf` sqrt( Var(T[i]) * Var(T[j]) * (1 - rho(T[i], T[j])^2 ) ) where rho(X, Y) is the Pearson correlation coefficient of two random variables X, Y. Thus this constraint also enforces that no two pairs of treatments be very co-linear within a leaf. This extra constraint primarily has bite in the case of more than two input treatments and also avoids leafs where the parameter estimate has large variance due to local co-linearities of the treatments. min_var_leaf_on_val : bool, default=False Whether the `min_var_fraction_leaf` constraint should also be enforced to hold on the validation set of the honest split too. If ``min_var_leaf=None`` then this flag does nothing. Setting this to True should be done with caution, as this partially violates the honesty structure, since the treatment variable of the validation set is used to inform the split structure of the tree. However, this is a benign dependence as it only uses local correlation structure of the treatment T to decide whether a split is feasible. max_features : int, float or {"auto", "sqrt", "log2"}, default=None The number of features to consider when looking for the best split: - If int, then consider `max_features` features at each split. - If float, then `max_features` is a fraction and `int(max_features * n_features)` features are considered at each split. - If "auto", then `max_features=n_features`. - If "sqrt", then `max_features=sqrt(n_features)`. - If "log2", then `max_features=log2(n_features)`. - If None, then `max_features=n_features`. Note: the search for a split does not stop until at least one valid partition of the node samples is found, even if it requires to effectively inspect more than ``max_features`` features. min_impurity_decrease : float, default=0.0 A node will be split if this split induces a decrease of the impurity greater than or equal to this value. The weighted impurity decrease equation is the following:: N_t / N * (impurity - N_t_R / N_t * right_impurity - N_t_L / N_t * left_impurity) where ``N`` is the total number of samples, ``N_t`` is the number of samples at the current node, ``N_t_L`` is the number of samples in the left child, and ``N_t_R`` is the number of samples in the right child. ``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum, if ``sample_weight`` is passed. max_samples : int or float in (0, 1], default=.45, The number of samples to use for each subsample that is used to train each tree: - If int, then train each tree on `max_samples` samples, sampled without replacement from all the samples - If float, then train each tree on `ceil(`max_samples` * `n_samples`)`, sampled without replacement from all the samples. If ``inference=True``, then `max_samples` must either be an integer smaller than `n_samples//2` or a float less than or equal to .5. min_balancedness_tol: float in [0, .5], default=.45 How imbalanced a split we can tolerate. This enforces that each split leaves at least (.5 - min_balancedness_tol) fraction of samples on each side of the split; or fraction of the total weight of samples, when sample_weight is not None. Default value, ensures that at least 5% of the parent node weight falls in each side of the split. Set it to 0.0 for no balancedness and to .5 for perfectly balanced splits. For the formal inference theory to be valid, this has to be any positive constant bounded away from zero. honest : bool, default=True Whether each tree should be trained in an honest manner, i.e. the training set is split into two equal sized subsets, the train and the val set. All samples in train are used to create the split structure and all samples in val are used to calculate the value of each node in the tree. inference : bool, default=True Whether inference (i.e. confidence interval construction and uncertainty quantification of the estimates) should be enabled. If ``inference=True``, then the estimator uses a bootstrap-of-little-bags approach to calculate the covariance of the parameter vector, with am objective Bayesian debiasing correction to ensure that variance quantities are positive. fit_intercept : bool, default=True Whether we should fit an intercept nuisance parameter beta(x). subforest_size : int, default=4, The number of trees in each sub-forest that is used in the bootstrap-of-little-bags calculation. The parameter `n_estimators` must be divisible by `subforest_size`. Should typically be a small constant. n_jobs : int or None, default=-1 The number of parallel jobs to be used for parallelism; follows joblib semantics. `n_jobs=-1` means all available cpu cores. `n_jobs=None` means no parallelism. random_state : int, RandomState instance or None, default=None Controls the randomness of the estimator. The features are always randomly permuted at each split. When ``max_features < n_features``, the algorithm will select ``max_features`` at random at each split before finding the best split among them. But the best found split may vary across different runs, even if ``max_features=n_features``. That is the case, if the improvement of the criterion is identical for several splits and one split has to be selected at random. To obtain a deterministic behaviour during fitting, ``random_state`` has to be fixed to an integer. verbose : int, default=0 Controls the verbosity when fitting and predicting. Attributes ---------- ate_ : ndarray of shape (n_outcomes, n_treatments) The average constant marginal treatment effect of each treatment for each outcome, averaged over the training data and with a doubly robust correction. Available only when `discrete_treatment=True` and `oob=True`. ate_stderr_ : ndarray of shape (n_outcomes, n_treatments) The standard error of the `ate_` attribute. feature_importances_ : ndarray of shape (n_features,) The feature importances based on the amount of parameter heterogeneity they create. The higher, the more important the feature. The importance of a feature is computed as the (normalized) total heterogeneity that the feature creates. Each split that the feature was chosen adds:: parent_weight * (left_weight * right_weight) * mean((value_left[k] - value_right[k])**2) / parent_weight**2 to the importance of the feature. Each such quantity is also weighted by the depth of the split. By default splits below `max_depth=4` are not used in this calculation and also each split at depth `depth`, is re-weighted by 1 / (1 + `depth`)**2.0. See the method ``feature_importances`` for a method that allows one to change these defaults. References ---------- .. [cfdml1] Athey, Susan, Julie Tibshirani, and Stefan Wager. "Generalized random forests." The Annals of Statistics 47.2 (2019): 1148-1178 https://arxiv.org/pdf/1610.01271.pdf """ def __init__(self, *, model_y='auto', model_t='auto', featurizer=None, discrete_treatment=False, categories='auto', cv=2, mc_iters=None, mc_agg='mean', drate=True, n_estimators=100, criterion="mse", max_depth=None, min_samples_split=10, min_samples_leaf=5, min_weight_fraction_leaf=0., min_var_fraction_leaf=None, min_var_leaf_on_val=False, max_features="auto", min_impurity_decrease=0., max_samples=.45, min_balancedness_tol=.45, honest=True, inference=True, fit_intercept=True, subforest_size=4, n_jobs=-1, random_state=None, verbose=0): # TODO: consider whether we need more care around stateful featurizers, # since we clone it and fit separate copies self.drate = drate self.model_y = clone(model_y, safe=False) self.model_t = clone(model_t, safe=False) self.featurizer = clone(featurizer, safe=False) self.discrete_instrument = discrete_treatment self.categories = categories self.cv = cv self.n_estimators = n_estimators self.criterion = criterion self.max_depth = max_depth self.min_samples_split = min_samples_split self.min_samples_leaf = min_samples_leaf self.min_weight_fraction_leaf = min_weight_fraction_leaf self.min_var_fraction_leaf = min_var_fraction_leaf self.min_var_leaf_on_val = min_var_leaf_on_val self.max_features = max_features self.min_impurity_decrease = min_impurity_decrease self.max_samples = max_samples self.min_balancedness_tol = min_balancedness_tol self.honest = honest self.inference = inference self.fit_intercept = fit_intercept self.subforest_size = subforest_size self.n_jobs = n_jobs self.verbose = verbose super().__init__(discrete_treatment=discrete_treatment, categories=categories, cv=cv, mc_iters=mc_iters, mc_agg=mc_agg, random_state=random_state) def _get_inference_options(self): options = super()._get_inference_options() options.update(blb=_GenericSingleOutcomeModelFinalWithCovInference) options.update(auto=_GenericSingleOutcomeModelFinalWithCovInference) return options def _gen_featurizer(self): return clone(self.featurizer, safe=False) def _gen_model_y(self): if self.model_y == 'auto': model_y = WeightedLassoCVWrapper(random_state=self.random_state) else: model_y = clone(self.model_y, safe=False) return _FirstStageWrapper(model_y, True, self._gen_featurizer(), False, self.discrete_treatment) def _gen_model_t(self): if self.model_t == 'auto': if self.discrete_treatment: model_t = LogisticRegressionCV(cv=WeightedStratifiedKFold(random_state=self.random_state), random_state=self.random_state) else: model_t = WeightedLassoCVWrapper(random_state=self.random_state) else: model_t = clone(self.model_t, safe=False) return _FirstStageWrapper(model_t, False, self._gen_featurizer(), False, self.discrete_treatment) def _gen_model_final(self): return MultiOutputGRF(CausalForest(n_estimators=self.n_estimators, criterion=self.criterion, max_depth=self.max_depth, min_samples_split=self.min_samples_split, min_samples_leaf=self.min_samples_leaf, min_weight_fraction_leaf=self.min_weight_fraction_leaf, min_var_fraction_leaf=self.min_var_fraction_leaf, min_var_leaf_on_val=self.min_var_leaf_on_val, max_features=self.max_features, min_impurity_decrease=self.min_impurity_decrease, max_samples=self.max_samples, min_balancedness_tol=self.min_balancedness_tol, honest=self.honest, inference=self.inference, fit_intercept=self.fit_intercept, subforest_size=self.subforest_size, n_jobs=self.n_jobs, random_state=self.random_state, verbose=self.verbose, warm_start=False)) def _gen_rlearner_model_final(self): return _CausalForestFinalWrapper(self._gen_model_final(), self._gen_featurizer(), self.discrete_treatment, self.drate) @property def tunable_params(self): return ['n_estimators', 'criterion', 'max_depth', 'min_samples_split', 'min_samples_leaf', 'min_weight_fraction_leaf', 'min_var_fraction_leaf', 'min_var_leaf_on_val', 'max_features', 'min_impurity_decrease', 'max_samples', 'min_balancedness_tol', 'honest', 'inference', 'fit_intercept', 'subforest_size'] def tune(self, Y, T, *, X=None, W=None, sample_weight=None, groups=None, params='auto'): """ Tunes the major hyperparameters of the final stage causal forest based on out-of-sample R-score performance. It trains small forests of size 100 trees on a grid of parameters and tests the out of sample R-score. After the function is called, then all parameters of `self` have been set to the optimal hyperparameters found. The estimator however remains un-fitted, so you need to call fit afterwards to fit the estimator with the chosen hyperparameters. The list of tunable parameters can be accessed via the property `tunable_params`. Parameters ---------- Y: (n × d_y) matrix or vector of length n Outcomes for each sample T: (n × dₜ) matrix or vector of length n Treatments for each sample X: (n × dₓ) matrix Features for each sample W: optional (n × d_w) matrix Controls for each sample sample_weight: optional (n,) vector Weights for each row groups: (n,) vector, optional All rows corresponding to the same group will be kept together during splitting. If groups is not None, the `cv` argument passed to this class's initializer must support a 'groups' argument to its split method. params: dict or 'auto', optional (default='auto') A dictionary that contains the grid of hyperparameters to try, i.e. {'param1': [value1, value2, ...], 'param2': [value1, value2, ...], ...} If `params='auto'`, then a default grid is used. Returns ------- self : CausalForestDML object The tuned causal forest object. This is the same object (not a copy) as the original one, but where all parameters of the object have been set to the best performing parameters from the tuning grid. """ from ..score import RScorer # import here to avoid circular import issue Y, T, X, W, sample_weight, groups = check_input_arrays(Y, T, X, W, sample_weight, groups) if params == 'auto': params = { 'min_weight_fraction_leaf': [0.0001, .01], 'max_depth': [3, 5, None], 'min_var_fraction_leaf': [0.001, .01] } else: # If custom param grid, check that only estimator parameters are being altered estimator_param_names = self.tunable_params for key in params.keys(): if key not in estimator_param_names: raise ValueError(f"Parameter `{key}` is not an tunable causal forest parameter.") strata = None if self.discrete_treatment: strata = self._strata(Y, T, X=X, W=W, sample_weight=sample_weight, groups=groups) # use 0.699 instead of 0.7 as train size so that if there are 5 examples in a stratum, we get 2 in test train, test = train_test_split(np.arange(Y.shape[0]), train_size=0.699, random_state=self.random_state, stratify=strata) ytrain, yval, Ttrain, Tval = Y[train], Y[test], T[train], T[test] Xtrain, Xval = (X[train], X[test]) if X is not None else (None, None) Wtrain, Wval = (W[train], W[test]) if W is not None else (None, None) groups_train, groups_val = (groups[train], groups[test]) if groups is not None else (None, None) if sample_weight is not None: sample_weight_train, sample_weight_val = sample_weight[train], sample_weight[test] else: sample_weight_train, sample_weight_val = None, None est = clone(self, safe=False) est.n_estimators = 100 est.inference = False scorer = RScorer(model_y=est.model_y, model_t=est.model_t, discrete_treatment=est.discrete_treatment, categories=est.categories, cv=est.cv, mc_iters=est.mc_iters, mc_agg=est.mc_agg, random_state=est.random_state) scorer.fit(yval, Tval, X=Xval, W=Wval, sample_weight=sample_weight_val, groups=groups_val) names = params.keys() scores = [] for it, values in enumerate(product(*params.values())): for key, value in zip(names, values): setattr(est, key, value) if it == 0: est.fit(ytrain, Ttrain, X=Xtrain, W=Wtrain, sample_weight=sample_weight_train, groups=groups_train, cache_values=True) else: est.refit_final() scores.append((scorer.score(est), tuple(zip(names, values)))) bestind = np.argmax([s[0] for s in scores]) _, best_params = scores[bestind] for key, value in best_params: setattr(self, key, value) return self # override only so that we can update the docstring to indicate support for `blb` def fit(self, Y, T, *, X=None, W=None, sample_weight=None, groups=None, cache_values=False, inference='auto'): """ Estimate the counterfactual model from data, i.e. estimates functions τ(·,·,·), ∂τ(·,·). Parameters ---------- Y: (n × d_y) matrix or vector of length n Outcomes for each sample T: (n × dₜ) matrix or vector of length n Treatments for each sample X: (n × dₓ) matrix Features for each sample W: optional (n × d_w) matrix Controls for each sample sample_weight : (n,) array like or None Individual weights for each sample. If None, it assumes equal weight. groups: (n,) vector, optional All rows corresponding to the same group will be kept together during splitting. If groups is not None, the `cv` argument passed to this class's initializer must support a 'groups' argument to its split method. cache_values: bool, default False Whether to cache inputs and first stage results, which will allow refitting a different final model inference: string, :class:`.Inference` instance, or None Method for performing inference. This estimator supports 'bootstrap' (or an instance of :class:`.BootstrapInference`), 'blb' or 'auto' (for Bootstrap-of-Little-Bags based inference) Returns ------- self """ if X is None: raise ValueError("This estimator does not support X=None!") return super().fit(Y, T, X=X, W=W, sample_weight=sample_weight, groups=groups, cache_values=cache_values, inference=inference) def refit_final(self, *, inference='auto'): return super().refit_final(inference=inference) refit_final.__doc__ = _OrthoLearner.refit_final.__doc__ def feature_importances(self, max_depth=4, depth_decay_exponent=2.0): imps = self.model_final_.feature_importances(max_depth=max_depth, depth_decay_exponent=depth_decay_exponent) return imps.reshape(self._d_y + (-1,)) def summary(self, alpha=0.05, value=0, decimals=3, feature_names=None, treatment_names=None, output_names=None): """ The summary of coefficient and intercept in the linear model of the constant marginal treatment effect. Parameters ---------- alpha: optional float in [0, 1] (default=0.05) The overall level of confidence of the reported interval. The alpha/2, 1-alpha/2 confidence interval is reported. value: optinal float (default=0) The mean value of the metric you'd like to test under null hypothesis. decimals: optinal int (default=3) Number of decimal places to round each column to. feature_names: optional list of strings or None (default is None) The input of the feature names treatment_names: optional list of strings or None (default is None) The names of the treatments output_names: optional list of strings or None (default is None) The names of the outputs Returns ------- smry : Summary instance this holds the summary tables and text, which can be printed or converted to various output formats. """ # Get input names treatment_names = self.cate_treatment_names(treatment_names) output_names = self.cate_output_names(output_names) # Summary if self._cached_values is not None: print("Population summary of CATE predictions on Training Data") smry = self.const_marginal_ate_inference(self._cached_values.X).summary(alpha=alpha, value=value, decimals=decimals, output_names=output_names, treatment_names=treatment_names) else: print("Population summary results are available only if `cache_values=True` at fit time!") smry = Summary() d_t = self._d_t[0] if self._d_t else 1 d_y = self._d_y[0] if self._d_y else 1 try: intercept_table = self.ate__inference().summary_frame(alpha=alpha, value=value, decimals=decimals, feature_names=None, treatment_names=treatment_names, output_names=output_names) intercept_array = intercept_table.values intercept_headers = intercept_table.columns.tolist() n_level = intercept_table.index.nlevels if n_level > 1: intercept_stubs = ["|".join(ind_value) for ind_value in intercept_table.index.values] else: intercept_stubs = intercept_table.index.tolist() intercept_title = 'Doubly Robust ATE on Training Data Results' smry.add_table(intercept_array, intercept_headers, intercept_stubs, intercept_title) except Exception as e: print("Doubly Robust ATE on Training Data Results: ", str(e)) for t in range(0, d_t + 1): try: intercept_table = self.att__inference(T=t).summary_frame(alpha=alpha, value=value, decimals=decimals, feature_names=None, output_names=output_names) intercept_array = intercept_table.values intercept_headers = intercept_table.columns.tolist() n_level = intercept_table.index.nlevels if n_level > 1: intercept_stubs = ["|".join(ind_value) for ind_value in intercept_table.index.values] else: intercept_stubs = intercept_table.index.tolist() intercept_title = "Doubly Robust ATT(T={}) on Training Data Results".format(t) smry.add_table(intercept_array, intercept_headers, intercept_stubs, intercept_title) except Exception as e: print("Doubly Robust ATT on Training Data Results: ", str(e)) break if len(smry.tables) > 0: return smry def shap_values(self, X, *, feature_names=None, treatment_names=None, output_names=None, background_samples=100): return _shap_explain_multitask_model_cate(self.const_marginal_effect, self.model_cate.estimators_, X, self._d_t, self._d_y, featurizer=self.featurizer_, feature_names=feature_names, treatment_names=treatment_names, output_names=output_names, input_names=self._input_names, background_samples=background_samples) shap_values.__doc__ = LinearCateEstimator.shap_values.__doc__ def ate__inference(self): """ Returns ------- ate__inference : NormalInferenceResults Inference results information for the `ate_` attribute, which is the average constant marginal treatment effect of each treatment for each outcome, averaged over the training data and with a doubly robust correction. Available only when `discrete_treatment=True` and `drate=True`. """ return NormalInferenceResults(d_t=self._d_t[0] if self._d_t else 1, d_y=self._d_y[0] if self._d_y else 1, pred=self.ate_, pred_stderr=self.ate_stderr_, mean_pred_stderr=None, inf_type='ate', feature_names=self.cate_feature_names(), output_names=self.cate_output_names(), treatment_names=self.cate_treatment_names()) @property def ate_(self): return self.rlearner_model_final_.ate_ @property def ate_stderr_(self): return self.rlearner_model_final_.ate_stderr_ def att__inference(self, *, T): """ Parameters ---------- T : int The index of the treatment for which to get the ATT. It corresponds to the lexicographic rank of the discrete input treatments. Returns ------- att__inference : NormalInferenceResults Inference results information for the `att_` attribute, which is the average constant marginal treatment effect of each treatment for each outcome, averaged over the training data treated with treatment T and with a doubly robust correction. Available only when `discrete_treatment=True` and `oob=True`. """ return NormalInferenceResults(d_t=self._d_t[0] if self._d_t else 1, d_y=self._d_y[0] if self._d_y else 1, pred=self.att_(T=T), pred_stderr=self.att_stderr_(T=T), mean_pred_stderr=None, inf_type='att', feature_names=self.cate_feature_names(), output_names=self.cate_output_names(), treatment_names=self.cate_treatment_names()) def att_(self, *, T): """ Parameters ---------- T : int The index of the treatment for which to get the ATT. It corresponds to the lexicographic rank of the discrete input treatments. Returns ------- att_ : ndarray (n_y, n_t) The average constant marginal treatment effect of each treatment for each outcome, averaged over the training data treated with treatment T and with a doubly robust correction. Singleton dimensions are dropped if input variable was a vector. """ return self.rlearner_model_final_.att_[T] def att_stderr_(self, *, T): """ Parameters ---------- T : int The index of the treatment for which to get the ATT. It corresponds to the lexicographic rank of the discrete input treatments. Returns ------- att_stderr_ : ndarray (n_y, n_t) The standard error of the corresponding `att_` """ return self.rlearner_model_final_.att_stderr_[T] @property def feature_importances_(self): return self.feature_importances() @property def model_final(self): return self._gen_model_final() @model_final.setter def model_final(self, model): if model is not None: raise ValueError("Parameter `model_final` cannot be altered for this estimator!") def __len__(self): """Return the number of estimators in the ensemble.""" return self.model_cate.__len__() def __getitem__(self, index): """Return the index'th estimator in the ensemble.""" return self.model_cate.__getitem__(index) def __iter__(self): """Return iterator over estimators in the ensemble.""" return self.model_cate.__iter__()
PypiClean
/MediaCurator-0.0.10.tar.gz/MediaCurator-0.0.10/README.md
# MediaCurator MediaCurator is a Python command line tool to manage a media database. * List all the video’s and their information with or without filters * Batch find and repair/convert videos with encoding errors * Batch recode videos to more modern codecs (x265 / AV1) based on filters: extentions, codecs, resolutions … ## Documentation The documentation is available on the following [link](https://fabquenneville.github.io/MediaCurator/) ## Releases MediaCurator is released on [PyPi](https://pypi.org/project/MediaCurator/). Instalation instructions are found on the [Github page](https://fabquenneville.github.io/MediaCurator/usage/installation.html). ## Usage mediacurator [list,convert] [-del] [-in:any,avi,mkv,wmv,mpg,mp4,m4v,flv,vid] [-filters:fferror,old,lowres,hd,720p,1080p,uhd,mpeg,mpeg4,x264,wmv3,wmv] [-out:mkv/mp4,x265/av1] [-print:list,formated,verbose] [-dirs/-files:"/mnt/media/",,"/mnt/media2/"] > for multiple files or filenames use double comma separated values ",," default options are: -in:any -filters: -out:mkv,x265 -print:list Examples: ```bash mediacurator list -filters:old -print:formated -dirs:/mnt/media/ >> ../medlist.txt mediacurator convert -del -filters:mpeg4 -out:av1,mp4 -dirs:"/mnt/media/Movies/" mediacurator convert -del -in:avi,mpg -print:formated,verbose -dirs:/mnt/media/ ``` ## Contributing Pull requests are welcome. For major changes, please open an issue first to discuss what you would like to change. Please make sure to update tests as appropriate. ## License [GNU GPLv3](https://choosealicense.com/licenses/gpl-3.0/)
PypiClean
/COMPAS-1.17.5.tar.gz/COMPAS-1.17.5/src/compas/numerical/dr/dr.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function from copy import deepcopy from math import sqrt __all__ = ["dr"] K = [ [0.0], [0.5, 0.5], [0.5, 0.0, 0.5], [1.0, 0.0, 0.0, 1.0], ] class Coeff: def __init__(self, c): self.c = c self.a = (1 - c * 0.5) / (1 + c * 0.5) self.b = 0.5 * (1 + self.a) def norm_vector(vector): """ Calculate the length of a vector. Parameters ---------- vector : list XYZ components of the vector. Returns ------- float The L2 norm, or *length* of the vector. Examples -------- >>> """ return sqrt(sum(axis**2 for axis in vector)) def norm_vectors(vectors): """ Calculate the norm of each vector in a list of vectors. Parameters ---------- vectors : list A list of vectors Returns ------- list A list with the lengths of all vectors. Examples -------- >>> """ return [norm_vector(vector) for vector in vectors] def adjacency_from_edges(edges): """Construct an adjacency dictionary from a set of edges. Parameters ---------- edges : list A list of index pairs. Returns ------- dict A dictionary mapping each index in the list of index pairs to a list of adjacent indices. Examples -------- >>> """ adj = {} for i, j in iter(edges): adj.setdefault(i, []).append(j) adj.setdefault(j, []).append(i) return adj def dr( vertices, edges, fixed, loads, qpre, fpre=None, lpre=None, linit=None, E=None, radius=None, kmax=100, dt=1.0, tol1=1e-3, tol2=1e-6, c=0.1, callback=None, callback_args=None, ): """Implementation of dynamic relaxation with RK integration scheme in pure Python. Parameters ---------- vertices : list XYZ coordinates of the vertices. edges : list Connectivity of the vertices. fixed : list Indices of the fixed vertices. loads : list XYZ components of the loads on the vertices. qpre : list Prescribed force densities in the edges. fpre : list, optional Prescribed forces in the edges. lpre : list, optional Prescribed lengths of the edges. linit : list, optoional Initial length of the edges. E : list, optional Stiffness of the edges. radius : list, optional Radius of the edges. kmax : int, optional Maximum number of iterations. dt : float, optional The time step. tol1 : float, optional Convergence criterion for the residual forces. tol2 : float, optional Convergence criterion for the displacements in between interations. c : float, optional Damping factor for viscous damping. callback : callable, optional A user-defined callback that is called after every iteration. The callback will be called with ``k`` the current iteration, ``X`` the coordinates at iteration ``k``, ``crit1, crit2`` the values of the stoppage criteria at iteration ``k``, and ``callback_args`` the optional additional arguments. callback_args : tuple, optional Additional arguments to be passed to the callback. Returns ------- xyz : array XYZ coordinates of the equilibrium geometry. q : array Force densities in the edges. f : array Forces in the edges. l : array Lengths of the edges r : array Residual forces. Examples -------- >>> """ if callback: if not callable(callback): raise Exception("The callback is not callable.") # -------------------------------------------------------------------------- # preprocess # -------------------------------------------------------------------------- n = len(vertices) e = len(edges) # i_nbrs = {i: [ij[1] if ij[0] == i else ij[0] for ij in edges if i in ij] for i in range(n)} i_nbrs = adjacency_from_edges(edges) ij_e = {(i, j): index for index, (i, j) in enumerate(edges)} ij_e.update({(j, i): index for (i, j), index in ij_e.items()}) coeff = Coeff(c) ca = coeff.a cb = coeff.b free = list(set(range(n)) - set(fixed)) # -------------------------------------------------------------------------- # attribute arrays # -------------------------------------------------------------------------- X = vertices P = loads Qpre = qpre or [0.0 for _ in range(e)] Fpre = fpre or [0.0 for _ in range(e)] Lpre = lpre or [0.0 for _ in range(e)] # -------------------------------------------------------------------------- # initial values # -------------------------------------------------------------------------- Q = [1.0 for _ in range(e)] L = [sum((X[i][axis] - X[j][axis]) ** 2 for axis in (0, 1, 2)) ** 0.5 for i, j in iter(edges)] F = [q * l for q, l in zip(Q, L)] M = [sum(0.5 * dt**2 * Q[ij_e[(i, j)]] for j in i_nbrs[i]) for i in range(n)] V = [[0.0, 0.0, 0.0] for _ in range(n)] R = [[0.0, 0.0, 0.0] for _ in range(n)] dX = [[0.0, 0.0, 0.0] for _ in range(n)] # -------------------------------------------------------------------------- # helpers # -------------------------------------------------------------------------- def update_R(): for i in range(n): x = X[i][0] y = X[i][1] z = X[i][2] f = [0.0, 0.0, 0.0] for j in i_nbrs[i]: q = Q[ij_e[(i, j)]] f[0] += q * (X[j][0] - x) f[1] += q * (X[j][1] - y) f[2] += q * (X[j][2] - z) R[i] = [P[i][axis] + f[axis] for axis in (0, 1, 2)] def rk(X0, V0, steps=2): def a(t, V): dX = [[V[i][axis] * t for axis in (0, 1, 2)] for i in range(n)] for i in free: X[i] = [X0[i][axis] + dX[i][axis] for axis in (0, 1, 2)] update_R() return [[cb * R[i][axis] / M[i] for axis in (0, 1, 2)] for i in range(n)] if steps == 2: B = [0.0, 1.0] a0 = a(K[0][0] * dt, V0) k0 = [[dt * a0[i][axis] for axis in (0, 1, 2)] for i in range(n)] a1 = a( K[1][0] * dt, [[V0[i][axis] + K[1][1] * k0[i][axis] for axis in (0, 1, 2)] for i in range(n)], ) k1 = [[dt * a1[i][axis] for axis in (0, 1, 2)] for i in range(n)] return [[B[0] * k0[i][axis] + B[1] * k1[i][axis] for axis in (0, 1, 2)] for i in range(n)] if steps == 4: B = [1.0 / 6.0, 1.0 / 3.0, 1.0 / 3.0, 1.0 / 6.0] a0 = a(K[0][0] * dt, V0) k0 = [[dt * a0[i][axis] for axis in (0, 1, 2)] for i in range(n)] a1 = a( K[1][0] * dt, [[V0[i][axis] + K[1][1] * k0[i][axis] for axis in (0, 1, 2)] for i in range(n)], ) k1 = [[dt * a1[i][axis] for axis in (0, 1, 2)] for i in range(n)] a2 = a( K[2][0] * dt, [[V0[i][axis] + K[2][1] * k0[i][axis] + K[2][2] * k1[i][axis] for axis in (0, 1, 2)] for i in range(n)], ) k2 = [[dt * a2[i][axis] for axis in (0, 1, 2)] for i in range(n)] a3 = a( K[3][0] * dt, [ [ V0[i][axis] + K[3][1] * k0[i][axis] + K[3][2] * k1[i][axis] + K[3][3] * k2[i][axis] for axis in (0, 1, 2) ] for i in range(n) ], ) k3 = [[dt * a3[i][axis] for axis in (0, 1, 2)] for i in range(n)] return [ [ B[0] * k0[i][axis] + B[1] * k1[i][axis] + B[2] * k2[i][axis] + B[3] * k3[i][axis] for axis in (0, 1, 2) ] for i in range(n) ] raise NotImplementedError # -------------------------------------------------------------------------- # start iterating # -------------------------------------------------------------------------- for k in range(kmax): Qfpre = [a / b if b else 0 for a, b in zip(Fpre, L)] Qlpre = [a / b if b else 0 for a, b in zip(F, Lpre)] Q = [a + b + c for a, b, c in zip(Qpre, Qfpre, Qlpre)] M = [sum(0.5 * dt**2 * Q[ij_e[(i, j)]] for j in i_nbrs[i]) for i in range(n)] X0 = deepcopy(X) V0 = [[ca * V[i][axis] for axis in (0, 1, 2)] for i in range(n)] # RK dV = rk(X0, V0, steps=4) # update for i in free: V[i] = [V0[i][axis] + dV[i][axis] for axis in (0, 1, 2)] dX[i] = [V[i][axis] * dt for axis in (0, 1, 2)] X[i] = [X0[i][axis] + dX[i][axis] for axis in (0, 1, 2)] L = [sum((X[i][axis] - X[j][axis]) ** 2 for axis in (0, 1, 2)) ** 0.5 for i, j in iter(edges)] F = [q * l for q, l in zip(Q, L)] update_R() # crits crit1 = max(norm_vectors([R[i] for i in free])) crit2 = max(norm_vectors([dX[i] for i in free])) # callback if callback: callback(k, X, (crit1, crit2), callback_args) # convergence if crit1 < tol1: break if crit2 < tol2: break # -------------------------------------------------------------------------- # update # -------------------------------------------------------------------------- update_R() return X, Q, F, L, R
PypiClean
/ChemGAPP-0.0.9-py3-none-any.whl/ChemGAPP_Package/ChemGAPP_Big/Cosine_Similarity.py
# In[ ]: import argparse import pandas as pd import random from scipy import spatial import re import seaborn as sns import matplotlib.pyplot as plt import matplotlib.patches as mpatches from sklearn.metrics import auc import numpy as np import os import itertools as it def get_options(): parser = argparse.ArgumentParser(description="Calculates the cosine similarity scores for the phenotypic profiles of genes from the same operon and genes from different operons. Produces a density plot of the cosine similarity scores for genes of the same and different operons. Produces an ROC curve testing models ability at different threshold. ", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("-i", "--InputFile", help="The dataset with gene names added. Output from Add_gene_names.py") parser.add_argument("-o", "--OutputFile", help="List of genes compared and the cosine similarity score as well as if they belong to the same operon") parser.add_argument("-or", "--Output_ROC_curve", help="Plot of the ROC curve and AUC score.") parser.add_argument("-od", "--Output_Density_plot", help="Density plot of the cosine similarity scores for same and different operons.") parser.add_argument("-clus", "--Cluster_file", help="A CSV file containing the operon clusters for each gene within the bacterium of interest, where columns = (Cluster,Gene). The Genes must match the names assigned to the scored dataset.") return parser.parse_args() def main(): options = get_options() inpt = options.InputFile outpt = options.OutputFile Op_Clus = options.Cluster_file outroc = options.Output_ROC_curve outdens = options.Output_Density_plot outroc = os.path.expanduser(outroc) outdens = os.path.expanduser(outdens) #Open the operon cluster and s-score files df = pd.read_csv(Op_Clus) df = df[['Gene', 'Cluster']] ar1 = np.array(df) df_14 = pd.read_table(inpt, index_col=0) df_14 = df_14.reset_index() df_14 = df_14.rename(columns={"index": "Gene"}) df_14 = df_14.sort_values('Gene') df_14 = df_14[~df_14.Gene.str.contains("empty")] df_14 = df_14[~df_14.Gene.str.contains("EMPTY")] df_14 = df_14.reset_index(drop=True) ar_14 = np.array(df_14) #Collect only the clusters for genes in the dataset and assigning clusters to the dataset in end column df_Clus = pd.DataFrame(columns=df.columns) for i in range(len(ar1)): x = ar1[i][0] if str(x) in ar_14[:,0]: rows = df.loc[i] df_Clus = df_Clus.append(rows, ignore_index=True) df_merge = pd.merge(df_14,df_Clus) df_co_sim_R_T = pd.DataFrame() ar_merge = np.array(df_merge) mainlist = list(range(0,len(df_merge))) for i,j in it.combinations(mainlist, 2): #ignores comparison with self if i != j: #if in same cluster list all s-scores if str(ar_merge[i][-1]) == str(ar_merge[j][-1]): p = list(ar_merge[i,1:-1]) p2 = list(ar_merge[j,1:-1]) data1 = [] data2 = [] #ensures nans are removed as well as the corresponding value for the other list. for itemp,indexp,itemp2, in zip(p,range(len(p)),p2): if str(itemp) != "nan" and str(itemp2) != "nan": data1.append(float(itemp)) data2.append(float(itemp2)) n = str(ar_merge[i,0:1]) n2 = str(ar_merge[j,0:1]) #calculates cosine similarity between the phenotypic profiles of s-scores for the two genes and adds to a df. cosine_similarity = 1 - spatial.distance.cosine(data1, data2) name = np.array([n, n2, cosine_similarity, "TRUE"],dtype=object) df_co_sim_R_T = df_co_sim_R_T.append(pd.DataFrame(name).T) df_co_sim_R_T.columns=['Gene1','Gene2','Cosine_score','Same Operon'] df_co_sim_R_T = df_co_sim_R_T.reset_index(drop=True) random.seed(2) def random_pairs(number_list): return [number_list[i] for i in random.sample(range(len(number_list)), 2)] #selects twice as many pairs as needed for different operons as likely some # will come up as the same and want to later syphon down to same number of comparisions as for same operon. n = (2*len(df_co_sim_R_T)) numbers = list(range(len(df_merge))) rp = [random_pairs(numbers) for i in range(n)] df_co_sim_R_F = pd.DataFrame() for k in range(n): i = rp[k][0] j = rp[k][1] d1 = list(ar_merge[i,-1:]) d2 = list(ar_merge[j,-1:]) #if operon clusters do not match. if d1 != d2: p = list(ar_merge[i,1:-1]) p2 = list(ar_merge[j,1:-1]) data1 = [] data2 = [] for itemp,indexp,itemp2, in zip(p,range(len(p)),p2): if str(itemp) != "nan" and str(itemp2) != "nan": data1.append(float(itemp)) data2.append(float(itemp2)) n = ar_merge[i,0:1] n2 = ar_merge[j,0:1] cosine_similarity = 1 - spatial.distance.cosine(data1, data2) name = np.array([n, n2, cosine_similarity, "FALSE"],dtype=object) df_co_sim_R_F = df_co_sim_R_F.append(pd.DataFrame(name).T) df_co_sim_R_F.columns=['Gene1','Gene2','Cosine_score','Same Operon'] df_co_sim_R_F = df_co_sim_R_F.reset_index(drop=True) # drop any NA values df_co_sim_R_T.dropna(subset = ["Cosine_score"], inplace=True) df_co_sim_R_F.dropna(subset = ["Cosine_score"], inplace=True) df_co_sim_R_F = df_co_sim_R_F.reset_index(drop=True) df_co_sim_R_T = df_co_sim_R_T.reset_index(drop=True) if len(df_co_sim_R_F) < len(df_co_sim_R_T): # pick random gene pairs from the different operons dataframe # selecting same number as there are in the same operon dataframe random.seed(2) r_S2 = random.sample(range(len(df_co_sim_R_T)), len(df_co_sim_R_F)) df_S2= pd.DataFrame(columns=df_co_sim_R_T.columns) for i in r_S2: rows = df_co_sim_R_T.loc[i] df_S2 = df_S2.append(rows, ignore_index=True) df_T_F = pd.concat([df_co_sim_R_F, df_S2],ignore_index=True) elif len(df_co_sim_R_F) > len(df_co_sim_R_T): # pick random gene pairs from the different operons dataframe # selecting same number as there are in the same operon dataframe random.seed(2) r_S2 = random.sample(range(len(df_co_sim_R_F)), len(df_co_sim_R_T)) df_S2= pd.DataFrame(columns=df_co_sim_R_F.columns) for i in r_S2: rows = df_co_sim_R_F.loc[i] df_S2 = df_S2.append(rows, ignore_index=True) # Join the subsetted different operon table and same operon table df_T_F = pd.concat([df_co_sim_R_T, df_S2],ignore_index=True) # Format the merged table so it can be pivoted to # have cosine similarity scores put into a same or different operon column df_T_F['Cosine_score'] = df_T_F['Cosine_score'].astype(float) for f in df_T_F.columns: if f != 'Cosine_score': df_T_F[f] = df_T_F[f].astype(str) for f in list(['Gene1',"Gene2"]): df_T_F[f] = df_T_F[f].str.replace("\[\'",'', regex = True) df_T_F[f] = df_T_F[f].str.replace("\'\]",'', regex = True) for f in df_T_F.columns: if f != 'Cosine_score': df_T_F[f] = pd.array(df_T_F[f].tolist()) df_T_F.to_csv(outpt, index=False) df_T_F_2 = pd.pivot_table(df_T_F, values='Cosine_score', index=['Gene1','Gene2'],columns=['Same Operon']) # Produce density plot for similarity scores between same and different operons sns.set(style="darkgrid") fig = sns.kdeplot(df_T_F_2['FALSE'], shade=True, color="r") fig = sns.kdeplot(df_T_F_2['TRUE'], shade=True, color="b") plt.xlabel("Cosine Similarity Score") plt.ylabel("Density") handles = [mpatches.Patch(facecolor="r", label="Different Operon"), mpatches.Patch(facecolor="b", label="Same Operon")] plt.legend(handles=handles) fig.set_title('Density Plot of Cosine Similarity Scores Between Genes of the Same and Different Operons') plt.savefig(outdens, bbox_inches='tight') df_FP = df_T_F_2.reset_index(drop=True) F = list(df_FP['FALSE']) F = [x for x in F if str(x) != 'nan'] T = list(df_FP['TRUE']) T = [x for x in T if str(x) != 'nan'] df_thres = pd.DataFrame() #calculate true and false positives and negatives at different thresholds for AUC and ROC curve production. for i in np.arange(-1, 1.1, 0.1): TP = 0 TN = 0 FP = 0 FN = 0 for row in F: if row < i: TN = TN + 1 else: FP = FP + 1 for row in T: if row < i: FN = FN + 1 else: TP = TP + 1 sens = TP/(TP+FN) spec = TN/(TN+FP) FPR = 1-spec name = np.array([i,sens,spec,FPR],dtype=object) df_thres = df_thres.append(pd.DataFrame(name).T) df_thres.columns=['Threshold','Sensitivity' ,'Specificity','False Positive Rate'] sens_rate = list(df_thres['Sensitivity']) spec_rate = list(df_thres['False Positive Rate']) fig, ax = plt.subplots(figsize=(6,6)) ax.plot(spec_rate, sens_rate) ax.set_xlabel('False Positive Rate') ax.set_ylabel('True Positive Rate') ax.set_title('ROC curve for Assigning S-scores to Genes of Same and Different Operons') AUC_txt = f'AUC {auc(spec_rate, sens_rate)}' fig.suptitle(AUC_txt, fontsize=12, fontweight='bold') plt.savefig(outroc, bbox_inches='tight') if __name__ == "__main__": main()
PypiClean
/ESMValTool-2.9.0-py3-none-any.whl/esmvaltool/cmorizers/data/formatters/datasets/landschuetzer2020.py
import logging import warnings from datetime import datetime from pathlib import Path import iris from cf_units import Unit from dask import array as da from iris import NameConstraint from iris.coords import CellMethod, DimCoord from esmvaltool.cmorizers.data import utilities as utils logger = logging.getLogger(__name__) def _callback_fix_fillvalue(cube, field, _): """Create masked array from FillValue.""" if hasattr(field.cf_data, 'FillValue'): fill_value = int(field.cf_data.FillValue) logger.info("Fixing fill value (%i)", fill_value) cube.data = da.ma.masked_equal(cube.core_data(), fill_value) def _fix_climatological_time(cube): """Fix climatology coordinate.""" time_units = Unit('days since 1950-01-01 00:00:00', calendar='standard') # Following the doc the covered time period of the climatology is # 1988-01-01 to 2020-01-01 (Use 2004 as the "mean" year). See # https://www.ncei.noaa.gov/access/metadata/landing-page/bin/ # iso?id=gov.noaa.nodc%3A0209633 time_points = time_units.date2num( [datetime(2004, m, 15) for m in range(1, 13)] ) time_bounds = [ [datetime(1988, m, 1), datetime(2019, m + 1, 1)] for m in range(1, 12) ] time_bounds.append([datetime(1988, 12, 1), datetime(2020, 1, 1)]) time_bounds = time_units.date2num(time_bounds) # Add new time coordinate to cube time_coord = DimCoord( time_points, bounds=time_bounds, standard_name='time', long_name='time', var_name='time', units=time_units, climatological=True, ) cube.remove_coord('time') cube.add_dim_coord(time_coord, 0) # Fix cell methods cube.add_cell_method(CellMethod('mean within years', coords=time_coord)) cube.add_cell_method(CellMethod('mean over years', coords=time_coord)) def _fix_scalar_coords(cube): """Fix scalar coordinates.""" if cube.var_name == 'spco2': utils.add_scalar_depth_coord(cube) def _extract_variable(var_info, cmor_info, attrs, filepath, out_dir): """Extract variable.""" var = cmor_info.short_name raw_var = var_info.get('raw_name', var) # Load data with warnings.catch_warnings(): warnings.filterwarnings( action='ignore', message='Ignoring netCDF variable .* invalid units .*', category=UserWarning, module='iris', ) cube = iris.load_cube(filepath, NameConstraint(var_name=raw_var), callback=_callback_fix_fillvalue) # Fix variable metadata if 'raw_units' in var_info: cube.units = var_info['raw_units'] cube.convert_units(cmor_info.units) utils.fix_var_metadata(cube, cmor_info) # Fix coordinates _fix_climatological_time(cube) utils.fix_coords( cube, overwrite_lat_bounds=False, overwrite_lon_bounds=False, overwrite_time_bounds=False, ) _fix_scalar_coords(cube) # Fix global metadata utils.set_global_atts(cube, attrs) # Save variable utils.save_variable( cube, var, out_dir, attrs, local_keys=['positive'], unlimited_dimensions=['time'], ) def cmorization(in_dir, out_dir, cfg, cfg_user, start_date, end_date): """Cmorization func call.""" cmor_table = cfg['cmor_table'] glob_attrs = cfg['attributes'] # Run the cmorization for (var, var_info) in cfg['variables'].items(): filepath = Path(in_dir) / var_info['filename'] logger.info("CMORizing variable '%s' from file %s", var, filepath) glob_attrs['mip'] = var_info['mip'] cmor_info = cmor_table.get_variable(var_info['mip'], var) _extract_variable(var_info, cmor_info, glob_attrs, filepath, out_dir)
PypiClean
/ApiLogicServer-9.2.18-py3-none-any.whl/api_logic_server_cli/create_from_model/safrs-react-admin-npm-build/static/js/2909.28b39275.chunk.js
"use strict";(self.webpackChunkreact_admin_upgrade=self.webpackChunkreact_admin_upgrade||[]).push([[2909],{72909:function(e,t,p){p.r(t),p.d(t,{conf:function(){return n},language:function(){return i}});var n={wordPattern:/(-?\d*\.\d\w*)|([^\`\~\!\@\#\%\^\&\*\(\)\-\=\+\[\{\]\}\\\|\;\:\'\"\,\.\<\>\/\?\s]+)/g,comments:{lineComment:"//",blockComment:["/*","*/"]},brackets:[["{","}"],["[","]"],["(",")"]],autoClosingPairs:[{open:"{",close:"}",notIn:["string"]},{open:"[",close:"]",notIn:["string"]},{open:"(",close:")",notIn:["string"]},{open:'"',close:'"',notIn:["string"]},{open:"'",close:"'",notIn:["string","comment"]}],folding:{markers:{start:new RegExp("^\\s*(#|//)region\\b"),end:new RegExp("^\\s*(#|//)endregion\\b")}}},i={defaultToken:"",tokenPostfix:"",tokenizer:{root:[[/<\?((php)|=)?/,{token:"@rematch",switchTo:"@phpInSimpleState.root"}],[/<!DOCTYPE/,"metatag.html","@doctype"],[/<!--/,"comment.html","@comment"],[/(<)(\w+)(\/>)/,["delimiter.html","tag.html","delimiter.html"]],[/(<)(script)/,["delimiter.html",{token:"tag.html",next:"@script"}]],[/(<)(style)/,["delimiter.html",{token:"tag.html",next:"@style"}]],[/(<)([:\w]+)/,["delimiter.html",{token:"tag.html",next:"@otherTag"}]],[/(<\/)(\w+)/,["delimiter.html",{token:"tag.html",next:"@otherTag"}]],[/</,"delimiter.html"],[/[^<]+/]],doctype:[[/<\?((php)|=)?/,{token:"@rematch",switchTo:"@phpInSimpleState.comment"}],[/[^>]+/,"metatag.content.html"],[/>/,"metatag.html","@pop"]],comment:[[/<\?((php)|=)?/,{token:"@rematch",switchTo:"@phpInSimpleState.comment"}],[/-->/,"comment.html","@pop"],[/[^-]+/,"comment.content.html"],[/./,"comment.content.html"]],otherTag:[[/<\?((php)|=)?/,{token:"@rematch",switchTo:"@phpInSimpleState.otherTag"}],[/\/?>/,"delimiter.html","@pop"],[/"([^"]*)"/,"attribute.value"],[/'([^']*)'/,"attribute.value"],[/[\w\-]+/,"attribute.name"],[/=/,"delimiter"],[/[ \t\r\n]+/]],script:[[/<\?((php)|=)?/,{token:"@rematch",switchTo:"@phpInSimpleState.script"}],[/type/,"attribute.name","@scriptAfterType"],[/"([^"]*)"/,"attribute.value"],[/'([^']*)'/,"attribute.value"],[/[\w\-]+/,"attribute.name"],[/=/,"delimiter"],[/>/,{token:"delimiter.html",next:"@scriptEmbedded.text/javascript",nextEmbedded:"text/javascript"}],[/[ \t\r\n]+/],[/(<\/)(script\s*)(>)/,["delimiter.html","tag.html",{token:"delimiter.html",next:"@pop"}]]],scriptAfterType:[[/<\?((php)|=)?/,{token:"@rematch",switchTo:"@phpInSimpleState.scriptAfterType"}],[/=/,"delimiter","@scriptAfterTypeEquals"],[/>/,{token:"delimiter.html",next:"@scriptEmbedded.text/javascript",nextEmbedded:"text/javascript"}],[/[ \t\r\n]+/],[/<\/script\s*>/,{token:"@rematch",next:"@pop"}]],scriptAfterTypeEquals:[[/<\?((php)|=)?/,{token:"@rematch",switchTo:"@phpInSimpleState.scriptAfterTypeEquals"}],[/"([^"]*)"/,{token:"attribute.value",switchTo:"@scriptWithCustomType.$1"}],[/'([^']*)'/,{token:"attribute.value",switchTo:"@scriptWithCustomType.$1"}],[/>/,{token:"delimiter.html",next:"@scriptEmbedded.text/javascript",nextEmbedded:"text/javascript"}],[/[ \t\r\n]+/],[/<\/script\s*>/,{token:"@rematch",next:"@pop"}]],scriptWithCustomType:[[/<\?((php)|=)?/,{token:"@rematch",switchTo:"@phpInSimpleState.scriptWithCustomType.$S2"}],[/>/,{token:"delimiter.html",next:"@scriptEmbedded.$S2",nextEmbedded:"$S2"}],[/"([^"]*)"/,"attribute.value"],[/'([^']*)'/,"attribute.value"],[/[\w\-]+/,"attribute.name"],[/=/,"delimiter"],[/[ \t\r\n]+/],[/<\/script\s*>/,{token:"@rematch",next:"@pop"}]],scriptEmbedded:[[/<\?((php)|=)?/,{token:"@rematch",switchTo:"@phpInEmbeddedState.scriptEmbedded.$S2",nextEmbedded:"@pop"}],[/<\/script/,{token:"@rematch",next:"@pop",nextEmbedded:"@pop"}]],style:[[/<\?((php)|=)?/,{token:"@rematch",switchTo:"@phpInSimpleState.style"}],[/type/,"attribute.name","@styleAfterType"],[/"([^"]*)"/,"attribute.value"],[/'([^']*)'/,"attribute.value"],[/[\w\-]+/,"attribute.name"],[/=/,"delimiter"],[/>/,{token:"delimiter.html",next:"@styleEmbedded.text/css",nextEmbedded:"text/css"}],[/[ \t\r\n]+/],[/(<\/)(style\s*)(>)/,["delimiter.html","tag.html",{token:"delimiter.html",next:"@pop"}]]],styleAfterType:[[/<\?((php)|=)?/,{token:"@rematch",switchTo:"@phpInSimpleState.styleAfterType"}],[/=/,"delimiter","@styleAfterTypeEquals"],[/>/,{token:"delimiter.html",next:"@styleEmbedded.text/css",nextEmbedded:"text/css"}],[/[ \t\r\n]+/],[/<\/style\s*>/,{token:"@rematch",next:"@pop"}]],styleAfterTypeEquals:[[/<\?((php)|=)?/,{token:"@rematch",switchTo:"@phpInSimpleState.styleAfterTypeEquals"}],[/"([^"]*)"/,{token:"attribute.value",switchTo:"@styleWithCustomType.$1"}],[/'([^']*)'/,{token:"attribute.value",switchTo:"@styleWithCustomType.$1"}],[/>/,{token:"delimiter.html",next:"@styleEmbedded.text/css",nextEmbedded:"text/css"}],[/[ \t\r\n]+/],[/<\/style\s*>/,{token:"@rematch",next:"@pop"}]],styleWithCustomType:[[/<\?((php)|=)?/,{token:"@rematch",switchTo:"@phpInSimpleState.styleWithCustomType.$S2"}],[/>/,{token:"delimiter.html",next:"@styleEmbedded.$S2",nextEmbedded:"$S2"}],[/"([^"]*)"/,"attribute.value"],[/'([^']*)'/,"attribute.value"],[/[\w\-]+/,"attribute.name"],[/=/,"delimiter"],[/[ \t\r\n]+/],[/<\/style\s*>/,{token:"@rematch",next:"@pop"}]],styleEmbedded:[[/<\?((php)|=)?/,{token:"@rematch",switchTo:"@phpInEmbeddedState.styleEmbedded.$S2",nextEmbedded:"@pop"}],[/<\/style/,{token:"@rematch",next:"@pop",nextEmbedded:"@pop"}]],phpInSimpleState:[[/<\?((php)|=)?/,"metatag.php"],[/\?>/,{token:"metatag.php",switchTo:"@$S2.$S3"}],{include:"phpRoot"}],phpInEmbeddedState:[[/<\?((php)|=)?/,"metatag.php"],[/\?>/,{token:"metatag.php",switchTo:"@$S2.$S3",nextEmbedded:"$S3"}],{include:"phpRoot"}],phpRoot:[[/[a-zA-Z_]\w*/,{cases:{"@phpKeywords":{token:"keyword.php"},"@phpCompileTimeConstants":{token:"constant.php"},"@default":"identifier.php"}}],[/[$a-zA-Z_]\w*/,{cases:{"@phpPreDefinedVariables":{token:"variable.predefined.php"},"@default":"variable.php"}}],[/[{}]/,"delimiter.bracket.php"],[/[\[\]]/,"delimiter.array.php"],[/[()]/,"delimiter.parenthesis.php"],[/[ \t\r\n]+/],[/(#|\/\/)$/,"comment.php"],[/(#|\/\/)/,"comment.php","@phpLineComment"],[/\/\*/,"comment.php","@phpComment"],[/"/,"string.php","@phpDoubleQuoteString"],[/'/,"string.php","@phpSingleQuoteString"],[/[\+\-\*\%\&\|\^\~\!\=\<\>\/\?\;\:\.\,\@]/,"delimiter.php"],[/\d*\d+[eE]([\-+]?\d+)?/,"number.float.php"],[/\d*\.\d+([eE][\-+]?\d+)?/,"number.float.php"],[/0[xX][0-9a-fA-F']*[0-9a-fA-F]/,"number.hex.php"],[/0[0-7']*[0-7]/,"number.octal.php"],[/0[bB][0-1']*[0-1]/,"number.binary.php"],[/\d[\d']*/,"number.php"],[/\d/,"number.php"]],phpComment:[[/\*\//,"comment.php","@pop"],[/[^*]+/,"comment.php"],[/./,"comment.php"]],phpLineComment:[[/\?>/,{token:"@rematch",next:"@pop"}],[/.$/,"comment.php","@pop"],[/[^?]+$/,"comment.php","@pop"],[/[^?]+/,"comment.php"],[/./,"comment.php"]],phpDoubleQuoteString:[[/[^\\"]+/,"string.php"],[/@escapes/,"string.escape.php"],[/\\./,"string.escape.invalid.php"],[/"/,"string.php","@pop"]],phpSingleQuoteString:[[/[^\\']+/,"string.php"],[/@escapes/,"string.escape.php"],[/\\./,"string.escape.invalid.php"],[/'/,"string.php","@pop"]]},phpKeywords:["abstract","and","array","as","break","callable","case","catch","cfunction","class","clone","const","continue","declare","default","do","else","elseif","enddeclare","endfor","endforeach","endif","endswitch","endwhile","extends","false","final","for","foreach","function","global","goto","if","implements","interface","instanceof","insteadof","namespace","new","null","object","old_function","or","private","protected","public","resource","static","switch","throw","trait","try","true","use","var","while","xor","die","echo","empty","exit","eval","include","include_once","isset","list","require","require_once","return","print","unset","yield","__construct"],phpCompileTimeConstants:["__CLASS__","__DIR__","__FILE__","__LINE__","__NAMESPACE__","__METHOD__","__FUNCTION__","__TRAIT__"],phpPreDefinedVariables:["$GLOBALS","$_SERVER","$_GET","$_POST","$_FILES","$_REQUEST","$_SESSION","$_ENV","$_COOKIE","$php_errormsg","$HTTP_RAW_POST_DATA","$http_response_header","$argc","$argv"],escapes:/\\(?:[abfnrtv\\"']|x[0-9A-Fa-f]{1,4}|u[0-9A-Fa-f]{4}|U[0-9A-Fa-f]{8})/}}}]); //# sourceMappingURL=2909.28b39275.chunk.js.map
PypiClean
/OASYS1-shadow4-0.0.10.tar.gz/OASYS1-shadow4-0.0.10/orangecontrib/shadow4/widgets/optics/ow_crl.py
from orangewidget.settings import Setting from oasys.widgets import gui as oasysgui from syned.beamline.shape import Circle from shadow4.beamline.optical_elements.refractors.s4_crl import S4CRL, S4CRLElement from orangecontrib.shadow4.widgets.gui.ow_abstract_lens import OWAbstractLens class OWCRL(OWAbstractLens): name = "Compound Refractive Lens" description = "Shadow Compound Refractive Lens" icon = "icons/crl.png" priority = 2.2 n_lens = Setting(10) piling_thickness = Setting(2.5) def __init__(self): super().__init__() def create_basic_settings_subtabs(self, tabs_basic_settings): return oasysgui.createTabPage(tabs_basic_settings, "CRL") # to be populated def populate_basic_setting_subtabs(self, basic_setting_subtabs): crl_box = oasysgui.widgetBox(basic_setting_subtabs, "CRL Parameters", addSpace=False, orientation="vertical", height=90) oasysgui.lineEdit(crl_box, self, "n_lens", "Number of lenses", labelWidth=260, valueType=int, orientation="horizontal") oasysgui.lineEdit(crl_box, self, "piling_thickness", "Piling thickness [mm]", labelWidth=260, valueType=float, orientation="horizontal") super(OWCRL, self).populate_basic_setting_subtabs(basic_setting_subtabs) def get_optical_element_instance(self): try: name = self.getNode().title except: name = "Compound Refractive Lens" um_to_si = 1e-6 mm_to_si = 1e-3 if self.has_finite_diameter == 0: boundary_shape = Circle(radius=um_to_si * self.diameter * 0.5) else: boundary_shape = None if self.is_cylinder == 1: cylinder_angle = self.cylinder_angle + 1 else: cylinder_angle = 0 return S4CRL(name=name, n_lens=self.n_lens, piling_thickness=self.piling_thickness*mm_to_si, boundary_shape=boundary_shape, material="", # not used thickness=self.interthickness * um_to_si, surface_shape=self.surface_shape, convex_to_the_beam=self.convex_to_the_beam, cylinder_angle=cylinder_angle, ri_calculation_mode=self.ri_calculation_mode, prerefl_file=self.prerefl_file, refraction_index=self.refraction_index, attenuation_coefficient=self.attenuation_coefficient, radius=self.radius * um_to_si, conic_coefficients=None) # TODO: add conic coefficient shape to the GUI def get_beamline_element_instance(self): return S4CRLElement()
PypiClean
/DoggoMenus-1.0.3-py3-none-any.whl/doggomenus/selection.py
from .errors import NotEnoughItemsError, InvalidCommandType from .command import Command from getch import getch import sys import os import shutil class SelectionMenu: def __init__(self, *items, **kwargs): if kwargs.get("cmd_list"): self.items = kwargs.get("cmd_list") else: self.items = list(items) self.count = len(self.items) + 1 self.index = 0 self.activated_child = None self.parent = None self.is_prev = False self.title = kwargs.get("title") self.exit_cmd = True if kwargs.get("exit_cmd") == False and kwargs.get("exit_cmd") != None: self.count -= 1 self.exit_cmd = False if self.count == 0: raise NotEnoughItemsError("Not enough items provided") for x in self.items: if not isinstance(x, Command) and not isinstance(x, SelectionMenu): raise InvalidCommandType(f"Item {self.items.index(x)} is type {type(x)} and is not a Command or SelectionMenu.") if isinstance(x, SelectionMenu): x.parent = self x.is_prev = True if self.exit_cmd: self.items.append(Command("Exit", self._exit)) def empty_function(self): pass def _clear(self): if sys.platform == "win32": os.system("cls") return os.system("clear") def _select(self): if isinstance(self.items[self.index], Command): self._clear() self.items[self.index]._execute() return if isinstance(self.items[self.index], SelectionMenu): self.activated_child = self.items[self.index] def _menu_up(self): if self.index == 0: self.index = self.count - 1 return self.index -= 1 def _menu_down(self): if self.index == self.count - 1: self.index = 0 return self.index += 1 def _get_input(self): a = getch() if a == b'\x03': raise KeyboardInterrupt() if a == b'\x1a': raise EOFError() if a == b'\r': if self.parent: self.parent.on_select() else: self.on_select() self._select() return if a == b'\x00' or b'\xe0': b = getch() if b == b'H': # Up if self.parent: self.parent.on_move() else: self.on_move() self._menu_up() elif b == b'P': # Down if self.parent: self.parent.on_move() else: self.on_move() self._menu_down() """elif b == b'K': # Left pass elif b == b'M': # Right pass""" def _print_title(self): x = f" {self.title} ".center(shutil.get_terminal_size().columns, "#") if self.title else "#"*shutil.get_terminal_size().columns print(x) def _exit(self): if self.parent: self.parent.activated_child = None return self._clear() sys.exit(0) def __str__(self): return self.title def run(self, **kwargs): self.on_select = kwargs.get("on_select", self.empty_function) self.on_move = kwargs.get("on_move", self.empty_function) while True: self._clear() self._print_title() if not self.activated_child: for i, item in enumerate(self.items): if i == self.index: print(f"> {item}") continue print(f" {item}") print("#"*shutil.get_terminal_size().columns) self._get_input() else: for i, item in enumerate(self.activated_child.items): if i == self.activated_child.index: print(f"> {item}") continue print(f" {item}") print("#"*shutil.get_terminal_size().columns) self.activated_child._get_input()
PypiClean
/BitBucket-0.4a.tar.gz/BitBucket-0.4a/bitbucket.py
import md5 import s3amazon.S3 import httplib import urllib import ConfigParser import os import string import mimetypes import xml.sax import pdb # # Canned Access Policies # _canned_access_policies = ('private', 'public-read', 'public-read-write', 'authenticated-read') # # Dictionary mapping mime types to content filters # _content_filter_map = {} def add_content_filter(content_type, filter): _content_filter_map[content_type] = filter filter.content_type = content_type def get_content_filter(content_type): if _content_filter_map.has_key(content_type): return _content_filter_map[content_type] else: return None def remove_content_filter(content_type): del _content_filter_map[content_type] def filter_bits(bits): filter = get_content_filter(bits.content_type) if filter: filter.filter(bits) class ContentFilter: def __init__(self): self.content_type = None def filter(self, bits): print 'filter[%s] - filtering %s' % (self.content_type, bits.filename) class BBHeadResponse(s3amazon.S3.GetResponse): def __init__(self, http_response): self.http_response = http_response response_headers = http_response.msg # older pythons don't have getheaders self.metadata = self.get_aws_metadata(response_headers) # Need to read the body even though we now it's not there if http_response.status == 200: http_response.read() else: # -- gross hack -- # httplib gets confused with chunked responses to HEAD requests # so I have to fake it out http_response.chunked = 0 http_response.read() class BBListBucketResponse(s3amazon.S3.Response): def __init__(self, http_response): s3amazon.S3.Response.__init__(self, http_response) if http_response.status < 300: handler = BBListBucketHandler() xml.sax.parseString(self.body, handler) self.entries = handler.entries self.is_truncated = handler.is_truncated else: self.entries = [] class BBListBucketHandler(xml.sax.ContentHandler): def __init__(self): self.entries = [] self.curr_entry = None self.curr_text = '' def startElement(self, name, attrs): if name == 'Contents': self.curr_entry = Bits() elif name == 'Owner': self.curr_entry.owner = s3amazon.S3.Owner() def endElement(self, name): if name == 'Contents': self.entries.append(self.curr_entry) elif name == 'Key': self.curr_entry.key = self.curr_text elif name == 'LastModified': self.curr_entry.last_modified = self.curr_text elif name == 'ETag': self.curr_entry.etag = self.curr_text elif name == 'Size': self.curr_entry.size = int(self.curr_text) elif name == 'ID': self.curr_entry.owner.id = self.curr_text elif name == 'DisplayName': self.curr_entry.owner.display_name = self.curr_text elif name == 'StorageClass': self.curr_entry.storage_class = self.curr_text elif name == 'IsTruncated': self.is_truncated = self.curr_text self.curr_text = '' def characters(self, content): self.curr_text += content # # Exception classes - Subclassing allows you to check for specific errors # class BitBucketError(Exception): def __init__(self, reason): self.reason = reason def __repr__(self): return 'BitBucketError: %s' % self.reason def __str__(self): return 'BitBucketError: %s' % self.reason class BitBucketResponseError(BitBucketError): def __init__(self, status, reason): BitBucketError.__init__(self, reason) self.status = status self.reason = reason def __repr__(self): return 'BitBucketError[%d]: %s' % (self.status, self.reason) def __str__(self): return 'BitBucketError[%d]: %s' % (self.status, self.reason) class BitBucketTypeError(BitBucketError): pass class BitBucketEmptyError(BitBucketError): pass class BitBucketDataError(BitBucketError): pass class BitBucketCreateError(BitBucketResponseError): pass # State Constants BITS_LOOSE=0 # Bits object not yet associated with a BitBucket object BITS_NEED_READ=1 # data in the Bits object needs to be read from S3 BITS_NEED_WRITE=2 # data in the Bits object needs to be written to S3 BITS_IN_SYNC=3 # data in the Bits object is consistent with S3 class Bits(s3amazon.S3.S3Object): def __init__(self, filename=None): self.state = BITS_LOOSE self.metadata = {} self.bucket = None self.content_type = 'application/octet-stream' self.filename = filename self.etag = None self.key = None self.last_modified = None self.owner = None self.storage_class = None def __getattr__(self, name): if name == 'data': if self.state == BITS_NEED_READ: if self.bucket: self.bucket.get_bits(self) return self._data elif name in self.metadata: return self.metadata[name] else: raise AttributeError def __setattr__(self, name, value): if name == 'data': self._data = value if value: self.__dict__['size'] = len(value) if self.bucket: self.bucket.send_bits(self) elif name == 'filename': self.__dict__[name] = value if value: self.sync() else: self.__dict__[name] = value def __getitem__(self, key): return self.metadata[key] def __setitem__(self, key, value): self.metadata[key] = value def __delitem__(self, key): del self.metadata[key] def _compute_md5(self): m = md5.new() p = open(self.filename, 'rb') s = p.read(8192) while s: m.update(s) s = p.read(8192) self.md5 = '"%s"' % m.hexdigest() p.close() def sync(self): if self.filename: if os.path.exists(self.filename): self.size = os.stat(self.filename).st_size self._compute_md5() self.content_type = mimetypes.guess_type(self.filename)[0] filter_bits(self) self.state = BITS_NEED_WRITE if self.bucket: self.bucket.send_file(self) else: self.state = BITS_NEED_READ if self.bucket: self.bucket.get_file(self, self.filename) else: self.state = BITS_NEED_WRITE self.bucket.send_bits(self) def get_url(self, expires_in=60): if self.bucket: return self.bucket.generate_url('get', self, expires_in) else: raise BitBucketError("Bits aren't associated with a BitBucket yet") def delete_url(self, expires_in=60): if self.bucket: return self.bucket.generate_url('delete', self, expires_in) else: raise BitBucketError("Bits aren't associated with a BitBucket yet") def set_canned_acl(self, policy): if policy not in _canned_access_policies: raise BitBucketError('Invalid acl_policy: %s' % policy) self.bucket.connection.set_canned_acl(self, policy) def get_acl(self): return self.bucket.connection.get_acl(self) def to_file(self, filename): if self.bucket != None: self.bucket.get_file(self, filename) else: raise BitBucketError("Bits aren't associated with a BitBucket yet") def BitBucketGenerator(bucket): bucket.options['max-keys'] = bucket.page_size finished = False last_key = None while not finished: if last_key: bucket.options['marker'] = last_key resp = bucket.connection.list_bucket(bucket.name, bucket.options) if resp.http_response.status == 200: for b in resp.entries: b.state = BITS_NEED_READ b.bucket = bucket key = bucket.get_local_key(b.key) b.key = key last_key = key yield b if resp.is_truncated != 'true': finished = True else: raise BitBucketResponseError(resp.http_response.status, resp.http_response.reason) class BitBucket: def __init__(self, name, connection, prefix=None, page_size=100, debug=None): self.options = {} self.connection = connection if prefix: self.options['prefix'] = prefix self.page_size = page_size self.debug = debug self.name = self.connection.bucket_name_prefix + name self._cache = {} def get_head(self, key): self._resp = self.connection.head(self.name, key) if self._resp.http_response.status == 200: bits = Bits() bits.bucket = self bits.metadata = self._resp.metadata http_resp = self._resp.http_response bits.etag = http_resp.getheader('etag') bits.content_type = http_resp.getheader('content-type') bits.last_modified = http_resp.getheader('last-modified') bits.state = BITS_NEED_READ self._cache[key] = bits bits.key = key return bits elif self._resp.http_response.status == 404: return None else: raise BitBucketResponseError(self._resp.http_response.status, self._resp.http_response.reason) def fetch_all_keys(self, last_key=None): if last_key: self.options['marker'] = last_key self._resp = self.connection.list_bucket(self.name, self.options) if self._resp.http_response.status == 200: for b in self._resp.entries: b.state = BITS_NEED_READ b.bucket = self key = self.get_local_key(b.key) b.key = key self._cache[b.key] = b last_key = key if self._resp.is_truncated == 'true': if self.debug: print '__list__: getting next page - last_key=%s' % last_key self.fetch_all_keys(last_key) else: raise BitBucketResponseError(self._resp.http_response.status, self._resp.http_response.reason) def delete_all_keys(self): self.options['max-keys'] = self.page_size finished = False last_key = None while not finished: if last_key: self.options['marker'] = last_key resp = self.connection.list_bucket(self.name, self.options) if resp.http_response.status == 200: for b in resp.entries: key = b.key last_key = key if self.debug: print '[delete_all_keys]: deleting %s' % key self._resp = self.connection.delete(self.name, self.get_aws_key(key)) if self._resp.http_response.status != 204: raise BitBucketResponseError(self._resp.http_response.status, self._resp.http_response.reason) if resp.is_truncated != 'true': finished = True else: raise BitBucketResponseError(resp.http_response.status, resp.http_response.reason) def get_acl(self): return self.connection.get_acl(self) def set_canned_acl(self, policy): if policy not in _canned_access_policies: raise BitBucketError('Invalid acl_policy: %s' % policy) self.connection.set_canned_acl(self, policy) def __repr__(self): return 'BitBucket(%s)' % self.name def __len__(self): return len(self._cache.keys()) def __getitem__(self, key): if self._cache.has_key(key): return self._cache[key] bits = self.get_head(key) if bits: return bits else: raise KeyError(key) def __setitem__(self, key, bits): bits.key = key bits.bucket = self if bits.filename: self.send_file(bits) else: self.send_bits(bits) self._cache[bits.key] = bits def __delitem__(self, key): del self._cache[key] self._resp = self.connection.delete(self.name, self.get_aws_key(key)) if self._resp.http_response.status != 204: raise BitBucketResponseError(self._resp.http_response.status, self._resp.http_response.reason) def __contains__(self, item): if item in self._cache: return True if self.get_head(key): return True else: return False def __iter__(self): return BitBucketGenerator(self) def get_local_key(self, key): if self.options.has_key('prefix'): return key[len(self.options['prefix']):] else: return key def get_aws_key(self, key): if self.options.has_key('prefix'): return self.options['prefix'] + key else: return key def generate_url(self, request, bits=None, expires_in=60): if bits: if not isinstance(bits, Bits): raise BitBucketTypeError('Value must be of type Bits') self.connection.query_gen.set_expires_in(expires_in) if request == 'get': return self.connection.query_gen.get(self.name, bits.key) elif request == 'delete': return self.connection.query_gen.delete(self.name, bits.key) else: raise BitBucketError('Invalid request: %s' % request) def send_bits(self, bits): if not isinstance(bits, Bits): raise BitBucketTypeError('Value must be of type Bits') if not bits.data: raise BitBucketEmptyError("Can't write empty Bits to S3") etag = '"%s"' % md5.new(bits.data).hexdigest() # compare the hash of current data to etag on Bits object # if they are the same, there is no need to send data to S3 if etag != bits.etag: if self.debug: print '[send_bits] sending %s' % bits.key headers = {'ETag':etag} if bits.content_type: headers['Content-Type'] = bits.content_type self._resp = self.connection.put(self.name, self.get_aws_key(bits.key), bits, headers) if self._resp.http_response.status != 200: raise BitBucketResponseError(self._resp.http_response.status, self._resp.http_response.reason) if etag != self._resp.http_response.getheader('etag'): raise BitBucketDataError("ETags don't match") else: if self.debug: print '[send_bits] skipping %s' % bits.key bits.etag = etag bits.size = len(bits.data) bits.state = BITS_IN_SYNC def send_file(self, bits): if not isinstance(bits, Bits): raise BitBucketTypeError('Value must be of type Bits') # compare the hash of current data to etag on Bits object # if they are the same, there is no need to send data to S3 if bits.etag != bits.md5: if self.debug: print '[send_file] sending %s' % bits.key headers = {'ETag':bits.md5} if bits.content_type: headers['Content-Type'] = bits.content_type self._resp = self.connection.put_file(bits, headers) if self._resp.http_response.status != 200: raise BitBucketResponseError(self._resp.http_response.status, self._resp.http_response.reason) bits.etag = self._resp.http_response.getheader('etag') if bits.etag != bits.md5: raise BitBucketDataError("ETags don't match") else: if self.debug: print '[send_file] skipping %s' % bits.key bits.state = BITS_IN_SYNC def get_bits(self, bits): if not isinstance(bits, Bits): raise BitBucketTypeError('Value must be of type Bits') if bits.bucket != self: raise BitBucketError('Those are not my Bits') if self.debug: print '[get_bits] getting %s' % bits.key self._resp = self.connection.get(self.name, self.get_aws_key(bits.key)) if self._resp.http_response.status == 200: bits._data = self._resp.object.data bits.metadata = self._resp.object.metadata bits.state = BITS_IN_SYNC else: raise BitBucketResponseError(self._resp.http_response.status, self._resp.http_response.reason) def get_file(self, bits, filename): if not isinstance(bits, Bits): raise BitBucketTypeError('Value must be of type Bits') if self.debug: print '[get_file] getting %s' % bits.key self._resp = self.connection.get_file(bits, filename) if self._resp.status != 200: raise BitBucketResponseError(self._resp.status, self._resp.reason) #bits.etag = self._resp.getheader('etag') bits.state = BITS_IN_SYNC def keys(self): return self._cache.keys() def values(self): return self._cache.values() def items(self): return self._cache.items() def has_key(self, key): if self._cache.has_key(key): return True if self.get_head(key): return True else: return False # # You have a couple of choices for getting your individual keys in. # 1. You could pass them to the bitbucket connect method # 2. You could add them to this dictionary # 3. You could place them in a file called bitbucket.cfg and put that # file in either your home directory or in the directory you are # running the code in. The file needs to look like this: #------------------------------------------------------- # [DEFAULT] # AccessKeyID: YourAccessKeyHere # SecretAccessKey: YourSecretAccessKeyHere # Debug: 1 #------------------------------------------------------- BB_DEFAULTS = {'AccessKeyID': '', 'SecretAccessKey': '', 'BucketNamePrefix': '', 'PageSize': '100', 'Debug': '0'} # # Just wanted to subclass to include an implementation of HEAD # class BBConnection(s3amazon.S3.AWSAuthConnection): BufferSize=1024 def __init__(self, access_key=None, secret_key=None, is_secure=True, server=s3amazon.S3.DEFAULT_HOST, port=None, bucket_name_prefix=None, page_size=None): self._buckets = {} # check to see if there is a bitbucket.cfg file # if so, we use those values unless values are passed to constructor self.config = ConfigParser.ConfigParser(BB_DEFAULTS) self.config.read(['./bitbucket.cfg', os.path.expanduser('~/bitbucket.cfg')]) if access_key: self.access_key = access_key else: self.access_key = self.config.get(ConfigParser.DEFAULTSECT, 'AccessKeyID') if secret_key: self.secret_key = secret_key else: self.secret_key = self.config.get(ConfigParser.DEFAULTSECT, 'SecretAccessKey') if bucket_name_prefix: self.bucket_name_prefix = bucket_name_prefix else: self.bucket_name_prefix = self.config.get(ConfigParser.DEFAULTSECT, 'BucketNamePrefix') if page_size: self.page_size = page_size else: self.page_size = self.config.getint(ConfigParser.DEFAULTSECT, 'PageSize') self.debug = self.config.getint(ConfigParser.DEFAULTSECT, 'Debug') s3amazon.S3.AWSAuthConnection.__init__(self, self.access_key, self.secret_key, is_secure, server, port) # if debug>1 then turn on httplib debug as well if self.debug > 1: self.connection.debuglevel = self.debug self.query_gen = s3amazon.S3.QueryStringAuthGenerator(self.access_key, self.secret_key) self.get_all_buckets() def __len__(self): return len(self._buckets.keys()) def __getitem__(self, key): if self._buckets.has_key(key): return self._buckets[key] else: raise KeyError(key) def __delitem__(self, key): del self._buckets[key] self._resp = self.delete_bucket(self.name) if self._resp.http_response.status != 204: raise BitBucketResponseError(self._resp.http_response.status, self._resp.http_response.reason) def __contains__(self, item): if item in self._buckets: return True else: return False def keys(self): return self._buckets.keys() def values(self): return self._buckets.values() def items(self): return self._buckets.items() def has_key(self, key): if self._buckets.has_key(key): return True else: return False def head(self, bucket, key, headers={}): return BBHeadResponse( self.make_request('HEAD', '%s/%s' % (bucket, urllib.quote_plus(key)), headers)) # # This method mainly stolen from Emanuele Ruffaldi's S3 Tool # def put_file(self, bits, headers={}): if not isinstance(bits, Bits): raise BitBucketTypeError('Must pass Bits to put_file') final_headers = s3amazon.S3.merge_meta(headers, bits.metadata); path = '%s/%s' % (bits.bucket.name, urllib.quote_plus(bits.key)) self.add_aws_auth_header(final_headers, 'PUT', path) self.connection.putrequest('PUT','/'+path) final_headers["Content-Length"] = bits.size for key in final_headers: self.connection.putheader(key,final_headers[key]) self.connection.endheaders() file = open(bits.filename, 'rb') l = file.read(self.BufferSize) while len(l) > 0: self.connection.send(l) l = file.read(self.BufferSize) file.close() return s3amazon.S3.Response(self.connection.getresponse()) def get_file(self, bits, filename, headers={}): if not isinstance(bits, Bits): raise BitBucketTypeError('Must pass Bits to get_file') final_headers = s3amazon.S3.merge_meta(headers, bits.metadata); path = '%s/%s' % (bits.bucket.name, urllib.quote_plus(bits.key)) self.add_aws_auth_header(final_headers, 'GET', path) self.connection.putrequest('GET','/'+path) for key in final_headers: self.connection.putheader(key,final_headers[key]) self.connection.endheaders() resp = self.connection.getresponse() response_headers = resp.msg for key in response_headers.keys(): if key.lower().startswith(s3amazon.S3.METADATA_PREFIX): bits[key[len(s3amazon.S3.METADATA_PREFIX):]] = response_headers[key] del response_headers[key] elif key.lower() == 'content-length': bits.size = response_headers[key] elif key.lower() == 'etag': bits.etag = response_headers[key] file = open(filename, 'wb') l = resp.read(self.BufferSize) while len(l) > 0: file.write(l) l = resp.read(self.BufferSize) file.close() resp.read() return resp def list_bucket(self, bucket, options={}, headers={}): path = bucket if options: path += '?' + '&'.join(["%s=%s" % (param, urllib.quote_plus(str(options[param]))) for param in options]) return BBListBucketResponse(self.make_request('GET', path, headers)) def get_all_buckets(self): r = self.list_all_my_buckets() for entry in r.entries: b = BitBucket(entry.name, self, page_size=self.page_size, debug=self.debug) self._buckets[entry.name] = b def get_bucket(self, bucket_name, prefix=None, fetch_keys=False): if bucket_name in self._buckets.keys(): return self._buckets[bucket_name] self._resp = self.create_bucket(bucket_name) status = self._resp.http_response.status if status == 409: raise BitBucketCreateError(self._resp.http_response.status, self._resp.http_response.reason) if status == 200: b = BitBucket(bucket_name, self, page_size=self.page_size, debug=self.debug) self._buckets[bucket_name] = b if fetch_keys: b.fetch_all_keys() return b else: raise BitBucketResponseError(self._resp.http_response.status, self._resp.http_response.reason) def delete_bucket(self, bucket_name): self._resp = s3amazon.S3.AWSAuthConnection.delete_bucket(self, bucket_name) status = self._resp.http_response.status if status == 204: if bucket_name in self._buckets.keys(): del self._buckets[bucket_name] else: raise BitBucketResponseError(self._resp.http_response.status, self._resp.http_response.reason) def get_acl(self, b): if isinstance(b, Bits): b_name = b.bucket.name key = b.key elif isinstance(b, BitBucket): b_name = b.name key = '' else: raise BitBucketError('Must pass Bits or BitBucket object') self._resp = s3amazon.S3.AWSAuthConnection.get_acl(self, b_name, key) return self._resp.body def set_canned_acl(self, b, policy): if isinstance(b, Bits): b_name = b.bucket.name key = b.key elif isinstance(b, BitBucket): b_name = b.name key = '' else: raise BitBucketError('Must pass Bits or BitBucket object') headers = {'x-amz-acl': policy} self._resp = self.put_acl(b_name, key, '', headers) def connect(access_key=None, secret_key=None): return BBConnection(access_key, secret_key)
PypiClean
/ORE_strhub-0.0.1-py3-none-any.whl/strhub/models/trba/prediction.py
import torch import torch.nn as nn import torch.nn.functional as F class Attention(nn.Module): def __init__(self, input_size, hidden_size, num_class, num_char_embeddings=256): super().__init__() self.attention_cell = AttentionCell(input_size, hidden_size, num_char_embeddings) self.hidden_size = hidden_size self.num_class = num_class self.generator = nn.Linear(hidden_size, num_class) self.char_embeddings = nn.Embedding(num_class, num_char_embeddings) def forward(self, batch_H, text, max_label_length=25): """ input: batch_H : contextual_feature H = hidden state of encoder. [batch_size x num_steps x num_class] text : the text-index of each image. [batch_size x (max_length+1)]. +1 for [SOS] token. text[:, 0] = [SOS]. output: probability distribution at each step [batch_size x num_steps x num_class] """ batch_size = batch_H.size(0) num_steps = max_label_length + 1 # +1 for [EOS] at end of sentence. output_hiddens = batch_H.new_zeros((batch_size, num_steps, self.hidden_size), dtype=torch.float) hidden = (batch_H.new_zeros((batch_size, self.hidden_size), dtype=torch.float), batch_H.new_zeros((batch_size, self.hidden_size), dtype=torch.float)) if self.training: for i in range(num_steps): char_embeddings = self.char_embeddings(text[:, i]) # hidden : decoder's hidden s_{t-1}, batch_H : encoder's hidden H, char_embeddings : f(y_{t-1}) hidden, alpha = self.attention_cell(hidden, batch_H, char_embeddings) output_hiddens[:, i, :] = hidden[0] # LSTM hidden index (0: hidden, 1: Cell) probs = self.generator(output_hiddens) else: targets = text[0].expand(batch_size) # should be fill with [SOS] token probs = batch_H.new_zeros((batch_size, num_steps, self.num_class), dtype=torch.float) for i in range(num_steps): char_embeddings = self.char_embeddings(targets) hidden, alpha = self.attention_cell(hidden, batch_H, char_embeddings) probs_step = self.generator(hidden[0]) probs[:, i, :] = probs_step _, next_input = probs_step.max(1) targets = next_input return probs # batch_size x num_steps x num_class class AttentionCell(nn.Module): def __init__(self, input_size, hidden_size, num_embeddings): super().__init__() self.i2h = nn.Linear(input_size, hidden_size, bias=False) self.h2h = nn.Linear(hidden_size, hidden_size) # either i2i or h2h should have bias self.score = nn.Linear(hidden_size, 1, bias=False) self.rnn = nn.LSTMCell(input_size + num_embeddings, hidden_size) self.hidden_size = hidden_size def forward(self, prev_hidden, batch_H, char_embeddings): # [batch_size x num_encoder_step x num_channel] -> [batch_size x num_encoder_step x hidden_size] batch_H_proj = self.i2h(batch_H) prev_hidden_proj = self.h2h(prev_hidden[0]).unsqueeze(1) e = self.score(torch.tanh(batch_H_proj + prev_hidden_proj)) # batch_size x num_encoder_step * 1 alpha = F.softmax(e, dim=1) context = torch.bmm(alpha.permute(0, 2, 1), batch_H).squeeze(1) # batch_size x num_channel concat_context = torch.cat([context, char_embeddings], 1) # batch_size x (num_channel + num_embedding) cur_hidden = self.rnn(concat_context, prev_hidden) return cur_hidden, alpha
PypiClean
/Booktype-1.5.tar.gz/Booktype-1.5/lib/booki/site_static/js/tiny_mce/plugins/table/editor_plugin.js
(function(d){var e=d.each;function c(g,h){var j=h.ownerDocument,f=j.createRange(),k;f.setStartBefore(h);f.setEnd(g.endContainer,g.endOffset);k=j.createElement("body");k.appendChild(f.cloneContents());return k.innerHTML.replace(/<(br|img|object|embed|input|textarea)[^>]*>/gi,"-").replace(/<[^>]+>/g,"").length==0}function a(g,f){return parseInt(g.getAttribute(f)||1)}function b(H,G,K){var g,L,D,o;t();o=G.getParent(K.getStart(),"th,td");if(o){L=F(o);D=I();o=z(L.x,L.y)}function A(N,M){N=N.cloneNode(M);N.removeAttribute("id");return N}function t(){var M=0;g=[];e(["thead","tbody","tfoot"],function(N){var O=G.select("> "+N+" tr",H);e(O,function(P,Q){Q+=M;e(G.select("> td, > th",P),function(W,R){var S,T,U,V;if(g[Q]){while(g[Q][R]){R++}}U=a(W,"rowspan");V=a(W,"colspan");for(T=Q;T<Q+U;T++){if(!g[T]){g[T]=[]}for(S=R;S<R+V;S++){g[T][S]={part:N,real:T==Q&&S==R,elm:W,rowspan:U,colspan:V}}}})});M+=O.length})}function z(M,O){var N;N=g[O];if(N){return N[M]}}function s(O,M,N){if(O){N=parseInt(N);if(N===1){O.removeAttribute(M,1)}else{O.setAttribute(M,N,1)}}}function j(M){return M&&(G.hasClass(M.elm,"mceSelected")||M==o)}function k(){var M=[];e(H.rows,function(N){e(N.cells,function(O){if(G.hasClass(O,"mceSelected")||O==o.elm){M.push(N);return false}})});return M}function r(){var M=G.createRng();M.setStartAfter(H);M.setEndAfter(H);K.setRng(M);G.remove(H)}function f(M){var N;d.walk(M,function(P){var O;if(P.nodeType==3){e(G.getParents(P.parentNode,null,M).reverse(),function(Q){Q=A(Q,false);if(!N){N=O=Q}else{if(O){O.appendChild(Q)}}O=Q});if(O){O.innerHTML=d.isIE?"&nbsp;":'<br data-mce-bogus="1" />'}return false}},"childNodes");M=A(M,false);s(M,"rowSpan",1);s(M,"colSpan",1);if(N){M.appendChild(N)}else{if(!d.isIE){M.innerHTML='<br data-mce-bogus="1" />'}}return M}function q(){var M=G.createRng();e(G.select("tr",H),function(N){if(N.cells.length==0){G.remove(N)}});if(G.select("tr",H).length==0){M.setStartAfter(H);M.setEndAfter(H);K.setRng(M);G.remove(H);return}e(G.select("thead,tbody,tfoot",H),function(N){if(N.rows.length==0){G.remove(N)}});t();row=g[Math.min(g.length-1,L.y)];if(row){K.select(row[Math.min(row.length-1,L.x)].elm,true);K.collapse(true)}}function u(S,Q,U,R){var P,N,M,O,T;P=g[Q][S].elm.parentNode;for(M=1;M<=U;M++){P=G.getNext(P,"tr");if(P){for(N=S;N>=0;N--){T=g[Q+M][N].elm;if(T.parentNode==P){for(O=1;O<=R;O++){G.insertAfter(f(T),T)}break}}if(N==-1){for(O=1;O<=R;O++){P.insertBefore(f(P.cells[0]),P.cells[0])}}}}}function C(){e(g,function(M,N){e(M,function(P,O){var S,R,T,Q;if(j(P)){P=P.elm;S=a(P,"colspan");R=a(P,"rowspan");if(S>1||R>1){s(P,"rowSpan",1);s(P,"colSpan",1);for(Q=0;Q<S-1;Q++){G.insertAfter(f(P),P)}u(O,N,R-1,S)}}})})}function p(V,S,Y){var P,O,X,W,U,R,T,M,V,N,Q;if(V){pos=F(V);P=pos.x;O=pos.y;X=P+(S-1);W=O+(Y-1)}else{P=L.x;O=L.y;X=D.x;W=D.y}T=z(P,O);M=z(X,W);if(T&&M&&T.part==M.part){C();t();T=z(P,O).elm;s(T,"colSpan",(X-P)+1);s(T,"rowSpan",(W-O)+1);for(R=O;R<=W;R++){for(U=P;U<=X;U++){if(!g[R]||!g[R][U]){continue}V=g[R][U].elm;if(V!=T){N=d.grep(V.childNodes);e(N,function(Z){T.appendChild(Z)});if(N.length){N=d.grep(T.childNodes);Q=0;e(N,function(Z){if(Z.nodeName=="BR"&&G.getAttrib(Z,"data-mce-bogus")&&Q++<N.length-1){T.removeChild(Z)}})}G.remove(V)}}}q()}}function l(Q){var M,S,P,R,T,U,N,V,O;e(g,function(W,X){e(W,function(Z,Y){if(j(Z)){Z=Z.elm;T=Z.parentNode;U=A(T,false);M=X;if(Q){return false}}});if(Q){return !M}});for(R=0;R<g[0].length;R++){if(!g[M][R]){continue}S=g[M][R].elm;if(S!=P){if(!Q){O=a(S,"rowspan");if(O>1){s(S,"rowSpan",O+1);continue}}else{if(M>0&&g[M-1][R]){V=g[M-1][R].elm;O=a(V,"rowSpan");if(O>1){s(V,"rowSpan",O+1);continue}}}N=f(S);s(N,"colSpan",S.colSpan);U.appendChild(N);P=S}}if(U.hasChildNodes()){if(!Q){G.insertAfter(U,T)}else{T.parentNode.insertBefore(U,T)}}}function h(N){var O,M;e(g,function(P,Q){e(P,function(S,R){if(j(S)){O=R;if(N){return false}}});if(N){return !O}});e(g,function(S,T){var P,Q,R;if(!S[O]){return}P=S[O].elm;if(P!=M){R=a(P,"colspan");Q=a(P,"rowspan");if(R==1){if(!N){G.insertAfter(f(P),P);u(O,T,Q-1,R)}else{P.parentNode.insertBefore(f(P),P);u(O,T,Q-1,R)}}else{s(P,"colSpan",P.colSpan+1)}M=P}})}function n(){var M=[];e(g,function(N,O){e(N,function(Q,P){if(j(Q)&&d.inArray(M,P)===-1){e(g,function(T){var R=T[P].elm,S;S=a(R,"colSpan");if(S>1){s(R,"colSpan",S-1)}else{G.remove(R)}});M.push(P)}})});q()}function m(){var N;function M(Q){var P,R,O;P=G.getNext(Q,"tr");e(Q.cells,function(S){var T=a(S,"rowSpan");if(T>1){s(S,"rowSpan",T-1);R=F(S);u(R.x,R.y,1,1)}});R=F(Q.cells[0]);e(g[R.y],function(S){var T;S=S.elm;if(S!=O){T=a(S,"rowSpan");if(T<=1){G.remove(S)}else{s(S,"rowSpan",T-1)}O=S}})}N=k();e(N.reverse(),function(O){M(O)});q()}function E(){var M=k();G.remove(M);q();return M}function J(){var M=k();e(M,function(O,N){M[N]=A(O,true)});return M}function B(O,N){var P=k(),M=P[N?0:P.length-1],Q=M.cells.length;e(g,function(S){var R;Q=0;e(S,function(U,T){if(U.real){Q+=U.colspan}if(U.elm.parentNode==M){R=1}});if(R){return false}});if(!N){O.reverse()}e(O,function(T){var S=T.cells.length,R;for(i=0;i<S;i++){R=T.cells[i];s(R,"colSpan",1);s(R,"rowSpan",1)}for(i=S;i<Q;i++){T.appendChild(f(T.cells[S-1]))}for(i=Q;i<S;i++){G.remove(T.cells[i])}if(N){M.parentNode.insertBefore(T,M)}else{G.insertAfter(T,M)}})}function F(M){var N;e(g,function(O,P){e(O,function(R,Q){if(R.elm==M){N={x:Q,y:P};return false}});return !N});return N}function w(M){L=F(M)}function I(){var O,N,M;N=M=0;e(g,function(P,Q){e(P,function(S,R){var U,T;if(j(S)){S=g[Q][R];if(R>N){N=R}if(Q>M){M=Q}if(S.real){U=S.colspan-1;T=S.rowspan-1;if(U){if(R+U>N){N=R+U}}if(T){if(Q+T>M){M=Q+T}}}}})});return{x:N,y:M}}function v(S){var P,O,U,T,N,M,Q,R;D=F(S);if(L&&D){P=Math.min(L.x,D.x);O=Math.min(L.y,D.y);U=Math.max(L.x,D.x);T=Math.max(L.y,D.y);N=U;M=T;for(y=O;y<=M;y++){S=g[y][P];if(!S.real){if(P-(S.colspan-1)<P){P-=S.colspan-1}}}for(x=P;x<=N;x++){S=g[O][x];if(!S.real){if(O-(S.rowspan-1)<O){O-=S.rowspan-1}}}for(y=O;y<=T;y++){for(x=P;x<=U;x++){S=g[y][x];if(S.real){Q=S.colspan-1;R=S.rowspan-1;if(Q){if(x+Q>N){N=x+Q}}if(R){if(y+R>M){M=y+R}}}}}G.removeClass(G.select("td.mceSelected,th.mceSelected"),"mceSelected");for(y=O;y<=M;y++){for(x=P;x<=N;x++){if(g[y][x]){G.addClass(g[y][x].elm,"mceSelected")}}}}}d.extend(this,{deleteTable:r,split:C,merge:p,insertRow:l,insertCol:h,deleteCols:n,deleteRows:m,cutRows:E,copyRows:J,pasteRows:B,getPos:F,setStartCell:w,setEndCell:v})}d.create("tinymce.plugins.TablePlugin",{init:function(g,h){var f,m,j=true;function l(p){var o=g.selection,n=g.dom.getParent(p||o.getNode(),"table");if(n){return new b(n,g.dom,o)}}function k(){g.getBody().style.webkitUserSelect="";if(j){g.dom.removeClass(g.dom.select("td.mceSelected,th.mceSelected"),"mceSelected");j=false}}e([["table","table.desc","mceInsertTable",true],["delete_table","table.del","mceTableDelete"],["delete_col","table.delete_col_desc","mceTableDeleteCol"],["delete_row","table.delete_row_desc","mceTableDeleteRow"],["col_after","table.col_after_desc","mceTableInsertColAfter"],["col_before","table.col_before_desc","mceTableInsertColBefore"],["row_after","table.row_after_desc","mceTableInsertRowAfter"],["row_before","table.row_before_desc","mceTableInsertRowBefore"],["row_props","table.row_desc","mceTableRowProps",true],["cell_props","table.cell_desc","mceTableCellProps",true],["split_cells","table.split_cells_desc","mceTableSplitCells",true],["merge_cells","table.merge_cells_desc","mceTableMergeCells",true]],function(n){g.addButton(n[0],{title:n[1],cmd:n[2],ui:n[3]})});if(!d.isIE){g.onClick.add(function(n,o){o=o.target;if(o.nodeName==="TABLE"){n.selection.select(o);n.nodeChanged()}})}g.onPreProcess.add(function(o,p){var n,q,r,t=o.dom,s;n=t.select("table",p.node);q=n.length;while(q--){r=n[q];t.setAttrib(r,"data-mce-style","");if((s=t.getAttrib(r,"width"))){t.setStyle(r,"width",s);t.setAttrib(r,"width","")}if((s=t.getAttrib(r,"height"))){t.setStyle(r,"height",s);t.setAttrib(r,"height","")}}});g.onNodeChange.add(function(q,o,s){var r;s=q.selection.getStart();r=q.dom.getParent(s,"td,th,caption");o.setActive("table",s.nodeName==="TABLE"||!!r);if(r&&r.nodeName==="CAPTION"){r=0}o.setDisabled("delete_table",!r);o.setDisabled("delete_col",!r);o.setDisabled("delete_table",!r);o.setDisabled("delete_row",!r);o.setDisabled("col_after",!r);o.setDisabled("col_before",!r);o.setDisabled("row_after",!r);o.setDisabled("row_before",!r);o.setDisabled("row_props",!r);o.setDisabled("cell_props",!r);o.setDisabled("split_cells",!r);o.setDisabled("merge_cells",!r)});g.onInit.add(function(r){var p,t,q=r.dom,u;f=r.windowManager;r.onMouseDown.add(function(w,z){if(z.button!=2){k();t=q.getParent(z.target,"td,th");p=q.getParent(t,"table")}});q.bind(r.getDoc(),"mouseover",function(C){var A,z,B=C.target;if(t&&(u||B!=t)&&(B.nodeName=="TD"||B.nodeName=="TH")){z=q.getParent(B,"table");if(z==p){if(!u){u=l(z);u.setStartCell(t);r.getBody().style.webkitUserSelect="none"}u.setEndCell(B);j=true}A=r.selection.getSel();try{if(A.removeAllRanges){A.removeAllRanges()}else{A.empty()}}catch(w){}C.preventDefault()}});r.onMouseUp.add(function(F,G){var z,B=F.selection,H,I=B.getSel(),w,C,A,E;if(t){if(u){F.getBody().style.webkitUserSelect=""}function D(J,L){var K=new d.dom.TreeWalker(J,J);do{if(J.nodeType==3&&d.trim(J.nodeValue).length!=0){if(L){z.setStart(J,0)}else{z.setEnd(J,J.nodeValue.length)}return}if(J.nodeName=="BR"){if(L){z.setStartBefore(J)}else{z.setEndBefore(J)}return}}while(J=(L?K.next():K.prev()))}H=q.select("td.mceSelected,th.mceSelected");if(H.length>0){z=q.createRng();C=H[0];E=H[H.length-1];z.setStartBefore(C);z.setEndAfter(C);D(C,1);w=new d.dom.TreeWalker(C,q.getParent(H[0],"table"));do{if(C.nodeName=="TD"||C.nodeName=="TH"){if(!q.hasClass(C,"mceSelected")){break}A=C}}while(C=w.next());D(A);B.setRng(z)}F.nodeChanged();t=u=p=null}});r.onKeyUp.add(function(w,z){k()});r.onKeyDown.add(function(w,z){n(w)});r.onMouseDown.add(function(w,z){if(z.button!=2){n(w)}});function o(D,z,A,F){var B=3,G=D.dom.getParent(z.startContainer,"TABLE"),C,w,E;if(G){C=G.parentNode}w=z.startContainer.nodeType==B&&z.startOffset==0&&z.endOffset==0&&F&&(A.nodeName=="TR"||A==C);E=(A.nodeName=="TD"||A.nodeName=="TH")&&!F;return w||E}function n(A){if(!d.isWebKit){return}var z=A.selection.getRng();var C=A.selection.getNode();var B=A.dom.getParent(z.startContainer,"TD,TH");if(!o(A,z,C,B)){return}if(!B){B=C}var w=B.lastChild;while(w.lastChild){w=w.lastChild}z.setEnd(w,w.nodeValue.length);A.selection.setRng(z)}r.plugins.table.fixTableCellSelection=n;if(r&&r.plugins.contextmenu){r.plugins.contextmenu.onContextMenu.add(function(A,w,C){var D,B=r.selection,z=B.getNode()||r.getBody();if(r.dom.getParent(C,"td")||r.dom.getParent(C,"th")||r.dom.select("td.mceSelected,th.mceSelected").length){w.removeAll();if(z.nodeName=="A"&&!r.dom.getAttrib(z,"name")){w.add({title:"advanced.link_desc",icon:"link",cmd:r.plugins.advlink?"mceAdvLink":"mceLink",ui:true});w.add({title:"advanced.unlink_desc",icon:"unlink",cmd:"UnLink"});w.addSeparator()}if(z.nodeName=="IMG"&&z.className.indexOf("mceItem")==-1){w.add({title:"advanced.image_desc",icon:"image",cmd:r.plugins.advimage?"mceAdvImage":"mceImage",ui:true});w.addSeparator()}w.add({title:"table.desc",icon:"table",cmd:"mceInsertTable",value:{action:"insert"}});w.add({title:"table.props_desc",icon:"table_props",cmd:"mceInsertTable"});w.add({title:"table.del",icon:"delete_table",cmd:"mceTableDelete"});w.addSeparator();D=w.addMenu({title:"table.cell"});D.add({title:"table.cell_desc",icon:"cell_props",cmd:"mceTableCellProps"});D.add({title:"table.split_cells_desc",icon:"split_cells",cmd:"mceTableSplitCells"});D.add({title:"table.merge_cells_desc",icon:"merge_cells",cmd:"mceTableMergeCells"});D=w.addMenu({title:"table.row"});D.add({title:"table.row_desc",icon:"row_props",cmd:"mceTableRowProps"});D.add({title:"table.row_before_desc",icon:"row_before",cmd:"mceTableInsertRowBefore"});D.add({title:"table.row_after_desc",icon:"row_after",cmd:"mceTableInsertRowAfter"});D.add({title:"table.delete_row_desc",icon:"delete_row",cmd:"mceTableDeleteRow"});D.addSeparator();D.add({title:"table.cut_row_desc",icon:"cut",cmd:"mceTableCutRow"});D.add({title:"table.copy_row_desc",icon:"copy",cmd:"mceTableCopyRow"});D.add({title:"table.paste_row_before_desc",icon:"paste",cmd:"mceTablePasteRowBefore"}).setDisabled(!m);D.add({title:"table.paste_row_after_desc",icon:"paste",cmd:"mceTablePasteRowAfter"}).setDisabled(!m);D=w.addMenu({title:"table.col"});D.add({title:"table.col_before_desc",icon:"col_before",cmd:"mceTableInsertColBefore"});D.add({title:"table.col_after_desc",icon:"col_after",cmd:"mceTableInsertColAfter"});D.add({title:"table.delete_col_desc",icon:"delete_col",cmd:"mceTableDeleteCol"})}else{w.add({title:"table.desc",icon:"table",cmd:"mceInsertTable"})}})}if(d.isWebKit){function v(C,N){var L=d.VK;var Q=N.keyCode;function O(Y,U,S){var T=Y?"previousSibling":"nextSibling";var Z=C.dom.getParent(U,"tr");var X=Z[T];if(X){z(C,U,X,Y);d.dom.Event.cancel(S);return true}else{var aa=C.dom.getParent(Z,"table");var W=Z.parentNode;var R=W.nodeName.toLowerCase();if(R==="tbody"||R===(Y?"tfoot":"thead")){var V=w(Y,aa,W,"tbody");if(V!==null){return K(Y,V,U,S)}}return M(Y,Z,T,aa,S)}}function w(V,T,U,X){var S=C.dom.select(">"+X,T);var R=S.indexOf(U);if(V&&R===0||!V&&R===S.length-1){return B(V,T)}else{if(R===-1){var W=U.tagName.toLowerCase()==="thead"?0:S.length-1;return S[W]}else{return S[R+(V?-1:1)]}}}function B(U,T){var S=U?"thead":"tfoot";var R=C.dom.select(">"+S,T);return R.length!==0?R[0]:null}function K(V,T,S,U){var R=J(T,V);R&&z(C,S,R,V);d.dom.Event.cancel(U);return true}function M(Y,U,R,X,W){var S=X[R];if(S){F(S);return true}else{var V=C.dom.getParent(X,"td,th");if(V){return O(Y,V,W)}else{var T=J(U,!Y);F(T);return d.dom.Event.cancel(W)}}}function J(S,R){return S&&S[R?"lastChild":"firstChild"]}function F(R){C.selection.setCursorLocation(R,0)}function A(){return Q==L.UP||Q==L.DOWN}function D(R){var T=R.selection.getNode();var S=R.dom.getParent(T,"tr");return S!==null}function P(S){var R=0;var T=S;while(T.previousSibling){T=T.previousSibling;R=R+a(T,"colspan")}return R}function E(T,R){var U=0;var S=0;e(T.children,function(V,W){U=U+a(V,"colspan");S=W;if(U>R){return false}});return S}function z(T,W,Y,V){var X=P(T.dom.getParent(W,"td,th"));var S=E(Y,X);var R=Y.childNodes[S];var U=J(R,V);F(U||R)}function H(R){var T=C.selection.getNode();var U=C.dom.getParent(T,"td,th");var S=C.dom.getParent(R,"td,th");return U&&U!==S&&I(U,S)}function I(S,R){return C.dom.getParent(S,"TABLE")===C.dom.getParent(R,"TABLE")}if(A()&&D(C)){var G=C.selection.getNode();setTimeout(function(){if(H(G)){O(!N.shiftKey&&Q===L.UP,G,N)}},0)}}r.onKeyDown.add(v)}if(!d.isIE){function s(){var w;for(w=r.getBody().lastChild;w&&w.nodeType==3&&!w.nodeValue.length;w=w.previousSibling){}if(w&&w.nodeName=="TABLE"){r.dom.add(r.getBody(),"p",null,'<br mce_bogus="1" />')}}if(d.isGecko){r.onKeyDown.add(function(z,B){var w,A,C=z.dom;if(B.keyCode==37||B.keyCode==38){w=z.selection.getRng();A=C.getParent(w.startContainer,"table");if(A&&z.getBody().firstChild==A){if(c(w,A)){w=C.createRng();w.setStartBefore(A);w.setEndBefore(A);z.selection.setRng(w);B.preventDefault()}}}})}r.onKeyUp.add(s);r.onSetContent.add(s);r.onVisualAid.add(s);r.onPreProcess.add(function(w,A){var z=A.node.lastChild;if(z&&z.childNodes.length==1&&z.firstChild.nodeName=="BR"){w.dom.remove(z)}});s();r.startContent=r.getContent({format:"raw"})}});e({mceTableSplitCells:function(n){n.split()},mceTableMergeCells:function(o){var p,q,n;n=g.dom.getParent(g.selection.getNode(),"th,td");if(n){p=n.rowSpan;q=n.colSpan}if(!g.dom.select("td.mceSelected,th.mceSelected").length){f.open({url:h+"/merge_cells.htm",width:240+parseInt(g.getLang("table.merge_cells_delta_width",0)),height:110+parseInt(g.getLang("table.merge_cells_delta_height",0)),inline:1},{rows:p,cols:q,onaction:function(r){o.merge(n,r.cols,r.rows)},plugin_url:h})}else{o.merge()}},mceTableInsertRowBefore:function(n){n.insertRow(true)},mceTableInsertRowAfter:function(n){n.insertRow()},mceTableInsertColBefore:function(n){n.insertCol(true)},mceTableInsertColAfter:function(n){n.insertCol()},mceTableDeleteCol:function(n){n.deleteCols()},mceTableDeleteRow:function(n){n.deleteRows()},mceTableCutRow:function(n){m=n.cutRows()},mceTableCopyRow:function(n){m=n.copyRows()},mceTablePasteRowBefore:function(n){n.pasteRows(m,true)},mceTablePasteRowAfter:function(n){n.pasteRows(m)},mceTableDelete:function(n){n.deleteTable()}},function(o,n){g.addCommand(n,function(){var p=l();if(p){o(p);g.execCommand("mceRepaint");k()}})});e({mceInsertTable:function(n){f.open({url:h+"/table.htm",width:400+parseInt(g.getLang("table.table_delta_width",0)),height:320+parseInt(g.getLang("table.table_delta_height",0)),inline:1},{plugin_url:h,action:n?n.action:0})},mceTableRowProps:function(){f.open({url:h+"/row.htm",width:400+parseInt(g.getLang("table.rowprops_delta_width",0)),height:295+parseInt(g.getLang("table.rowprops_delta_height",0)),inline:1},{plugin_url:h})},mceTableCellProps:function(){f.open({url:h+"/cell.htm",width:400+parseInt(g.getLang("table.cellprops_delta_width",0)),height:295+parseInt(g.getLang("table.cellprops_delta_height",0)),inline:1},{plugin_url:h})}},function(o,n){g.addCommand(n,function(p,q){o(q)})})}});d.PluginManager.add("table",d.plugins.TablePlugin)})(tinymce);
PypiClean
/DXR-1.9.4.tar.gz/DXR-1.9.4/Dxr_grpc/Datas_pb2_grpc.py
"""Client and server classes corresponding to protobuf-defined services.""" import grpc from . import Datas_pb2 as Datas__pb2 class MainServerStub(object): """server """ def __init__(self, channel): """Constructor. Args: channel: A grpc.Channel. """ self.getStream = channel.stream_stream( '/MainServer/getStream', request_serializer=Datas__pb2.Request.SerializeToString, response_deserializer=Datas__pb2.Reply.FromString, ) class MainServerServicer(object): """server """ def getStream(self, request_iterator, context): """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def add_MainServerServicer_to_server(servicer, server): rpc_method_handlers = { 'getStream': grpc.stream_stream_rpc_method_handler( servicer.getStream, request_deserializer=Datas__pb2.Request.FromString, response_serializer=Datas__pb2.Reply.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( 'MainServer', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,)) # This class is part of an EXPERIMENTAL API. class MainServer(object): """server """ @staticmethod def getStream(request_iterator, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.stream_stream(request_iterator, target, '/MainServer/getStream', Datas__pb2.Request.SerializeToString, Datas__pb2.Reply.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
PypiClean
/Electrum-VTC-2.9.3.3.tar.gz/Electrum-VTC-2.9.3.3/gui/qt/history_list.py
import webbrowser from util import * from electrum_vtc.i18n import _ from electrum_vtc.util import block_explorer_URL, format_satoshis, format_time from electrum_vtc.plugins import run_hook from electrum_vtc.util import timestamp_to_datetime, profiler TX_ICONS = [ "warning.png", "warning.png", "warning.png", "unconfirmed.png", "unconfirmed.png", "clock1.png", "clock2.png", "clock3.png", "clock4.png", "clock5.png", "confirmed.png", ] class HistoryList(MyTreeWidget): filter_columns = [2, 3, 4] # Date, Description, Amount def __init__(self, parent=None): MyTreeWidget.__init__(self, parent, self.create_menu, [], 3) self.refresh_headers() self.setColumnHidden(1, True) def refresh_headers(self): headers = ['', '', _('Date'), _('Description') , _('Amount'), _('Balance')] fx = self.parent.fx if fx and fx.show_history(): headers.extend(['%s '%fx.ccy + _('Amount'), '%s '%fx.ccy + _('Balance')]) self.update_headers(headers) def get_domain(self): '''Replaced in address_dialog.py''' return self.wallet.get_addresses() @profiler def on_update(self): self.wallet = self.parent.wallet h = self.wallet.get_history(self.get_domain()) item = self.currentItem() current_tx = item.data(0, Qt.UserRole).toString() if item else None self.clear() fx = self.parent.fx if fx: fx.history_used_spot = False for h_item in h: tx_hash, height, conf, timestamp, value, balance = h_item status, status_str = self.wallet.get_tx_status(tx_hash, height, conf, timestamp) has_invoice = self.wallet.invoices.paid.get(tx_hash) icon = QIcon(":icons/" + TX_ICONS[status]) v_str = self.parent.format_amount(value, True, whitespaces=True) balance_str = self.parent.format_amount(balance, whitespaces=True) label = self.wallet.get_label(tx_hash) entry = ['', tx_hash, status_str, label, v_str, balance_str] if fx and fx.show_history(): date = timestamp_to_datetime(time.time() if conf <= 0 else timestamp) for amount in [value, balance]: text = fx.historical_value_str(amount, date) entry.append(text) item = QTreeWidgetItem(entry) item.setIcon(0, icon) item.setToolTip(0, str(conf) + " confirmation" + ("s" if conf != 1 else "")) if has_invoice: item.setIcon(3, QIcon(":icons/seal")) for i in range(len(entry)): if i>3: item.setTextAlignment(i, Qt.AlignRight) if i!=2: item.setFont(i, QFont(MONOSPACE_FONT)) item.setTextAlignment(i, Qt.AlignVCenter) if value < 0: item.setForeground(3, QBrush(QColor("#BC1E1E"))) item.setForeground(4, QBrush(QColor("#BC1E1E"))) if tx_hash: item.setData(0, Qt.UserRole, tx_hash) self.insertTopLevelItem(0, item) if current_tx == tx_hash: self.setCurrentItem(item) def on_doubleclick(self, item, column): if self.permit_edit(item, column): super(HistoryList, self).on_doubleclick(item, column) else: tx_hash = str(item.data(0, Qt.UserRole).toString()) tx = self.wallet.transactions.get(tx_hash) self.parent.show_transaction(tx) def update_labels(self): root = self.invisibleRootItem() child_count = root.childCount() for i in range(child_count): item = root.child(i) txid = str(item.data(0, Qt.UserRole).toString()) label = self.wallet.get_label(txid) item.setText(3, label) def update_item(self, tx_hash, height, conf, timestamp): status, status_str = self.wallet.get_tx_status(tx_hash, height, conf, timestamp) icon = QIcon(":icons/" + TX_ICONS[status]) items = self.findItems(tx_hash, Qt.UserRole|Qt.MatchContains|Qt.MatchRecursive, column=1) if items: item = items[0] item.setIcon(0, icon) item.setText(2, status_str) def create_menu(self, position): self.selectedIndexes() item = self.currentItem() if not item: return column = self.currentColumn() tx_hash = str(item.data(0, Qt.UserRole).toString()) if not tx_hash: return if column is 0: column_title = "ID" column_data = tx_hash else: column_title = self.headerItem().text(column) column_data = item.text(column) tx_URL = block_explorer_URL(self.config, 'tx', tx_hash) height, conf, timestamp = self.wallet.get_tx_height(tx_hash) tx = self.wallet.transactions.get(tx_hash) is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx) is_unconfirmed = height <= 0 pr_key = self.wallet.invoices.paid.get(tx_hash) menu = QMenu() menu.addAction(_("Copy %s")%column_title, lambda: self.parent.app.clipboard().setText(column_data)) if column in self.editable_columns: menu.addAction(_("Edit %s")%column_title, lambda: self.editItem(item, column)) menu.addAction(_("Details"), lambda: self.parent.show_transaction(tx)) if is_unconfirmed and tx: rbf = is_mine and not tx.is_final() if rbf: menu.addAction(_("Increase fee"), lambda: self.parent.bump_fee_dialog(tx)) else: child_tx = self.wallet.cpfp(tx, 0) if child_tx: menu.addAction(_("Child pays for parent"), lambda: self.parent.cpfp(tx, child_tx)) if pr_key: menu.addAction(QIcon(":icons/seal"), _("View invoice"), lambda: self.parent.show_invoice(pr_key)) if tx_URL: menu.addAction(_("View on block explorer"), lambda: webbrowser.open(tx_URL)) menu.exec_(self.viewport().mapToGlobal(position))
PypiClean
/MathCake-0.0.1a3.tar.gz/MathCake-0.0.1a3/cake/core/surd.py
from math import sqrt import math import typing from .number import Number from .types.float import Float from cake.abc import IntegerType from cake.helpers import convert_type __all__ = ("get_perfect_square", "_rationalise", "Surd") def get_perfect_square( limit: int, *, increment: typing.Optional[int] = 3, index: typing.Optional[int] = 0, accumulated_list: typing.Optional[typing.List[int]] = [1], ) -> typing.Union[tuple]: while (accumulated_list[-1] + increment) <= limit: accumulated_list.append(accumulated_list[index] + increment) index += 1 increment = (2 * index) + 3 return accumulated_list def _rationalise(n: int, **kwargs) -> typing.Union[tuple, int]: from cake import Imaginary if n < 0: return Imaginary(n, math.sqrt) if sqrt(n).is_integer(): return sqrt(n) ac_list = get_perfect_square(n / 2, **kwargs) factors = [square for square in ac_list if n % square == 0 and square > 1] if len(factors) == 0: return n a = int(sqrt(max(factors))) b = int(n / max(factors)) return a, b class Surd(Number): r""" An object representing an irrational number, in the form of a surd. Parameters ---------- integer: `~cake.abc.IntegerType` The irrational integer n: `~cake.abc.IntegerType` the nth term of the root, e.g ``3`` is cube-root. Doing ``'(2/3)'`` will cube root it an then raise it to the power of 2 i: :`~cake.abc.IntegerType`: Coefficient of the unsolved root """ def __new__( cls, integer: IntegerType, n: IntegerType = 1, i: IntegerType = 1, ) -> typing.Union["Surd", Float]: is_rational = sqrt(integer) if int(is_rational) - is_rational == 0: return Float(is_rational) return super(Surd, cls).__new__(Surd) def __init__(self, integer, n=1, i=1) -> None: if isinstance(n, str): if not n.startswith("(") and not n.endswith(")"): raise ValueError( f"Formatting of fraction powers must be in the form of `(x/y)`" ) n = n[1:-1] n = map(convert_type, n.split("/")) self.integer = integer self.n = n self.i = i super().__init__(self.decimal, True, float, Surd, Surd) # Some utilities def rationalise(self, **kwargs): from cake import Imaginary res = _rationalise(self.value, **kwargs) if isinstance(res, Imaginary): return res if isinstance(res, int): return Float(int) co, ac = res return Surd(ac, self.n, co) @property def decimal(self): if self.i != 1: integer = self.integer * (self.i ** 2) else: integer = self.integer if isinstance(self.n, tuple): x, y = self.n return convert_type(((integer ** (1 / y)) ** x) ** self.i) return convert_type((integer ** (1 / self.n)) ** self.i) @property def simplify(self): return self.decimal @property def evaluate(self): return self.decimal def __repr__(self) -> str: if self.i and self.i != 1: co = str(self.i) else: co = "" if self.n and self.n != 1: n = f"** {self.n}" else: n = "" return f"{co}√{self.integer} {n}" def __add__(self, other: "Surd") -> "Surd": if not isinstance(other, Surd): return super().__add__(other) if other.integer != self.integer: raise ValueError( f'Cannot add "{repr(other)}" as the integer is not the same!' ) return Surd(self.integer, n=self.n, i=(self.i + other.i)) def __sub__(self, other: "Surd") -> "Surd": if not isinstance(other, Surd): return super().__sub__(other) if other.integer != self.integer: raise ValueError( f'Cannot subtract "{repr(other)}" as the integer is not the same!' ) return Surd(self.integer, n=self.n, i=(self.i - other.i))
PypiClean
/KL_Audit_supportV2.2-1.0-py3-none-any.whl/AuditModule/util/LoggerUtil.py
from collections import OrderedDict from logging import Formatter, FileHandler, StreamHandler, getLogger, INFO from json import loads, dumps def logger(name, handler, recordfields=None, level=INFO): """ returns logger object with logging level set and handlers added :param name: logger name :param handler: handler name :param recordfields: fields for log record :param level: logging level :return: """ log = getLogger(name) textformatter = JSONFormatter(recordfields) handler.setFormatter(textformatter) log.addHandler(handler) log.setLevel(level) return log def filelogger(logname, recordfields=None, filename='json.log', level=INFO): """A convenience function to return a JSON file logger for simple situations. Args: logname : The name of the logger - to allow for multiple logs, and levels of logs in an application recordfields : The metadata fields to add to the JSON record created by the logger filename : The name of the file to be used in the logger level : The logger level Returns: A JSON file logger. """ handler = FileHandler(filename, 'w') return logger(logname, handler, recordfields, level) def streamlogger(logname, recordfields=None, outputstream=None, level=INFO): """A convenience function to return a JSON stream logger for simple situations. Args: logname : The name of the logger - to allow for multiple logs, and levels of logs in an application recordfields : The metadata fields to add to the JSON record created by the logger outputstream : The outputstream to be used by the logger. sys.stderr is used when outputstream is None. level : The logger level Returns: A JSON stream logger. """ handler = StreamHandler(outputstream) return logger(logname, handler, recordfields, level) def read_json_log(logfile, filterfunction=(lambda x: True), customjson=None): """Iterate through a log file of JSON records and return a list of JSON records that meet the filterfunction. Args: logfile : A file like object consisting of JSON records. filterfunction : A function that returns True if the JSON record should be included in the output and False otherwise. customjson : A decoder function to enable the loading of custom json objects Returns: A list of Python objects built from JSON records that passed the filterfunction. """ json_records = [] for x in logfile: rec = loads(x[:-1], object_hook=customjson) if filterfunction(rec): json_records.append(rec) return json_records class JSONFormatter(Formatter): """The JSONFormatter class outputs Python log records in JSON format. JSONFormatter assumes that log record metadata fields are specified at the fomatter level as opposed to the record level. The specification of matadata fields at the formatter level allows for multiple handles to display differing levels of detail. For example, console log output might specify less detail to allow for quick problem triage while file log output generated from the same data may contain more detail for in-depth investigations. Attributes: recordfields : A list of strings containing the names of metadata fields (see Python log record documentation for details) to add to the JSON output. Metadata fields will be added to the JSON record in the order specified in the recordfields list. customjson : A JSONEncoder subclass to enable writing of custom JSON objects. """ def __init__(self, recordfields=None, datefmt=None, customjson=None): """__init__ overrides the default constructor to accept a formatter specific list of metadata fields Args: recordfields : A list of strings referring to metadata fields on the record object. It can be empty. The list of fields will be added to the JSON record created by the formatter. """ Formatter.__init__(self, None, datefmt) self.recordfields = recordfields self.customjson = customjson def uses_time(self): """ Overridden from the ancestor to look for the asctime attribute in the recordfields attribute. The override is needed because of the change in design assumptions from the documentation for the logging module The implementation in this object could be brittle if a new release changes the name or adds another time attribute. Returns: boolean : True if asctime is in self.recordfields, False otherwise. """ return 'asctime' in self.recordfields def _formattime(self, record): if self.uses_time(): record.asctime = self.formatTime(record, self.datefmt) def _getjsondata(self, record): """ combines any supplied recordfields with the log record msg field into an object to convert to JSON Args: record : log record to output to JSON log Returns: An object to convert to JSON - either an ordered dict if recordfields are supplied or the record.msg attribute """ if len(self.recordfields) > 0: fields = [] for x in self.recordfields: fields.append((x, getattr(record, x))) fields.append(('msg', record.msg)) # An OrderedDict is used to ensure that the converted data appears in the same order for every record return OrderedDict(fields) else: return record.msg def format(self, record): """overridden from the ancestor class to take a log record and output a JSON formatted string. Args: record : log record to output to JSON log Returns: A JSON formatted string """ self._formattime(record) jsondata = self._getjsondata(record) formattedjson = dumps(jsondata, cls=self.customjson) return formattedjson
PypiClean
/Flootty-3.3.0-py3-none-any.whl/flootty/floo/common/handlers/floo_handler.py
import os import sys import time import hashlib import base64 import collections from functools import reduce from operator import attrgetter try: from . import base from ..reactor import reactor from ..lib import DMP from .. import msg, ignore, repo, shared as G, utils from ..exc_fmt import str_e from ... import editor from ..protocols import floo_proto except (ImportError, ValueError) as e: import base from floo import editor from floo.common.lib import DMP from floo.common.reactor import reactor from floo.common.exc_fmt import str_e from floo.common import msg, ignore, repo, shared as G, utils from floo.common.protocols import floo_proto try: unicode() except NameError: unicode = str try: import io except ImportError: io = None MAX_WORKSPACE_SIZE = 200000000 # 200MB TOO_BIG_TEXT = '''Maximum workspace size is %.2fMB.\n %s is too big (%.2fMB) to upload.\n\nWould you like to ignore these paths and continue?\n\n%s''' class FlooHandler(base.BaseHandler): PROTOCOL = floo_proto.FlooProtocol def __init__(self, owner, workspace, auth, action): self.username = auth.get('username') self.secret = auth.get('secret') self.api_key = auth.get('api_key') # BaseHandler calls reload_settings() super(FlooHandler, self).__init__() self.owner = owner self.workspace = workspace self.action = action self.upload_timeout = None self.reset() def _on_highlight(self, data): raise NotImplementedError("_on_highlight not implemented") def ok_cancel_dialog(self, msg, cb=None): raise NotImplementedError("ok_cancel_dialog not implemented.") def get_view(self, buf_id): raise NotImplementedError("get_view not implemented") def get_view_text_by_path(self, rel_path): raise NotImplementedError("get_view_text_by_path not implemented") def build_protocol(self, *args): self.proto = super(FlooHandler, self).build_protocol(*args) def f(): self.joined_workspace = False self.proto.on('cleanup', f) self.proto.once('stop', self.stop) return self.proto def get_username_by_id(self, user_id): try: return self.workspace_info['users'][str(user_id)]['username'] except Exception: return '' def get_buf_by_path(self, path): try: p = utils.to_rel_path(path) except ValueError: return buf_id = self.paths_to_ids.get(p) if buf_id: return self.bufs.get(buf_id) def get_buf(self, buf_id, view=None): self.send({ 'name': 'get_buf', 'id': buf_id }) buf = self.bufs[buf_id] msg.warn('Syncing buffer ', buf['path'], ' for consistency.') if 'buf' in buf: del buf['buf'] if view: view.set_read_only(True) view.set_status('Floobits locked this file until it is synced.') try: del G.VIEW_TO_HASH[view.native_id] except Exception: pass def save_view(self, view): view.save() def on_connect(self): utils.reload_settings() self.reset() req = { 'username': self.username, 'secret': self.secret, 'room': self.workspace, 'room_owner': self.owner, 'client': self.client, 'platform': sys.platform, 'supported_encodings': ['utf8', 'base64'], 'version': G.__VERSION__ } if self.api_key: req['api_key'] = self.api_key self.send(req) @property def workspace_url(self): protocol = self.proto.secure and 'https' or 'http' return '{protocol}://{host}/{owner}/{name}'.format(protocol=protocol, host=self.proto.host, owner=self.owner, name=self.workspace) def reset(self): self.bufs = {} self.paths_to_ids = {} self.save_on_get_bufs = set() self.on_load = collections.defaultdict(dict) utils.cancel_timeout(self.upload_timeout) self.upload_timeout = None def _on_patch(self, data): buf_id = data['id'] buf = self.bufs[buf_id] if 'buf' not in buf: msg.debug('buf ', buf['path'], ' not populated yet. not patching') return if buf['encoding'] == 'base64': # TODO apply binary patches return self.get_buf(buf_id, None) if len(data['patch']) == 0: msg.debug('wtf? no patches to apply. server is being stupid') return msg.debug('patch is', data['patch']) dmp_patches = DMP.patch_fromText(data['patch']) # TODO: run this in a separate thread old_text = buf['buf'] view = self.get_view(buf_id) if view and not view.is_loading(): view_text = view.get_text() if old_text == view_text: buf['forced_patch'] = False elif not buf.get('forced_patch'): patch = utils.FlooPatch(view_text, buf) # Update the current copy of the buffer buf['buf'] = patch.current buf['md5'] = hashlib.md5(patch.current.encode('utf-8')).hexdigest() buf['forced_patch'] = True msg.debug('forcing patch for ', buf['path']) self.send(patch.to_json()) old_text = view_text else: msg.debug('forced patch is true. not sending another force patch for buf ', buf['path']) md5_before = hashlib.md5(old_text.encode('utf-8')).hexdigest() if md5_before != data['md5_before']: msg.warn('starting md5s don\'t match for ', buf['path'], '. this is dangerous!') t = DMP.patch_apply(dmp_patches, old_text) clean_patch = True for applied_patch in t[1]: if not applied_patch: clean_patch = False break if G.DEBUG: if len(t[0]) == 0: try: msg.debug('OMG EMPTY!') msg.debug('Starting data:', buf['buf']) msg.debug('Patch:', data['patch']) except Exception as e: msg.error(e) if '\x01' in t[0]: msg.debug('FOUND CRAZY BYTE IN BUFFER') msg.debug('Starting data:', buf['buf']) msg.debug('Patch:', data['patch']) timeout_id = buf.get('timeout_id') if timeout_id: utils.cancel_timeout(timeout_id) del buf['timeout_id'] if not clean_patch: msg.log('Couldn\'t patch ', buf['path'], ' cleanly.') return self.get_buf(buf_id, view) cur_hash = hashlib.md5(t[0].encode('utf-8')).hexdigest() if cur_hash != data['md5_after']: msg.debug('Ending md5s don\'t match for ', buf['path'], ' Setting get_buf timeout.') buf['timeout_id'] = utils.set_timeout(self.get_buf, 2000, buf_id, view) buf['buf'] = t[0] buf['md5'] = cur_hash if not view: msg.debug('No view. Not saving buffer ', buf_id) def _on_load(): v = self.get_view(buf_id) if v and 'buf' in buf: v.update(buf, message=False) self.on_load[buf_id]['patch'] = _on_load return view.apply_patches(buf, t, data['username']) def _on_get_buf(self, data): buf_id = data['id'] buf = self.bufs.get(buf_id) if not buf: return msg.warn('no buf found: ', data, '. Hopefully you didn\'t need that.') timeout_id = buf.get('timeout_id') if timeout_id: utils.cancel_timeout(timeout_id) if data['encoding'] == 'base64': data['buf'] = base64.b64decode(data['buf']) self.bufs[buf_id] = data save = False if buf_id in self.save_on_get_bufs: self.save_on_get_bufs.remove(buf_id) save = True view = self.get_view(buf_id) if not view: msg.debug('No view for buf ', buf_id, '. Saving to disk.') return utils.save_buf(data) view.update(data) if save: view.save() def _on_create_buf(self, data): if data['encoding'] == 'base64': data['buf'] = base64.b64decode(data['buf']) self.bufs[data['id']] = data self.paths_to_ids[data['path']] = data['id'] view = self.get_view(data['id']) if view: self.save_view(view) else: utils.save_buf(data) def _on_rename_buf(self, data): del self.paths_to_ids[data['old_path']] self.paths_to_ids[data['path']] = data['id'] new = utils.get_full_path(data['path']) old = utils.get_full_path(data['old_path']) new_dir = os.path.split(new)[0] if new_dir: utils.mkdir(new_dir) view = self.get_view(data['id']) if view: view.rename(new) else: try: os.rename(old, new) except Exception as e: msg.debug('Error moving ', old, 'to', new, str_e(e)) utils.save_buf(self.bufs[data.id]) self.bufs[data['id']]['path'] = data['path'] def _on_delete_buf(self, data): buf_id = data['id'] path = data.get('path') try: buf = self.bufs.get(buf_id) if buf: del self.paths_to_ids[buf['path']] del self.bufs[buf_id] path = buf['path'] except KeyError: msg.debug('KeyError deleting buf id ', buf_id) # TODO: if data['unlink'] == True, add to ignore? action = 'removed' path = utils.get_full_path(path) if data.get('unlink', False): action = 'deleted' try: utils.rm(path) except Exception as e: msg.debug('Error deleting ', path, ': ', str_e(e)) user_id = data.get('user_id') username = self.get_username_by_id(user_id) msg.log(username, ' ', action, ' ', path) def _upload_file_by_path(self, rel_path): return self._upload(utils.get_full_path(rel_path), self.get_view_text_by_path(rel_path)) @utils.inlined_callbacks def _initial_upload(self, ig, missing_bufs, changed_bufs, cb): files, size = yield self.prompt_ignore, ig, G.PROJECT_PATH missing_buf_ids = set([buf['id'] for buf in missing_bufs]) for buf_id in missing_buf_ids: self.send({'name': 'delete_buf', 'id': buf_id}) for p, buf_id in self.paths_to_ids.items(): if p in files: files.discard(p) # TODO: recalculate size (need size in room_info) continue if buf_id in missing_buf_ids: continue self.send({ 'name': 'delete_buf', 'id': buf_id, }) def __upload(rel_path_or_buf): # Its a buf! if type(rel_path_or_buf) == dict: return self._upload(utils.get_full_path(rel_path_or_buf['path']), rel_path_or_buf.get('buf')) # Its a rel path! buf_id = self.paths_to_ids.get(rel_path_or_buf) text = self.bufs.get(buf_id, {}).get('buf') # Only upload stuff that's not in self.bufs (new bufs). We already took care of everything else. if text is not None: return len(text) return self._upload(utils.get_full_path(rel_path_or_buf), self.get_view_text_by_path(rel_path_or_buf)) def make_iterator(): # Upload changed bufs before everything else, since they're probably what people will edit for b in changed_bufs: yield b for f in files: yield f total_size = reduce(lambda a, buf: a + len(buf.get('buf', '')), changed_bufs, size) self._rate_limited_upload(make_iterator(), total_size, upload_func=__upload) cb() def _scan_dir(self, bufs, ig, read_only): status_msg = 'Comparing local files against workspace...' editor.status_message(status_msg) update_status_msg = getattr(self, 'update_status_msg', None) if update_status_msg: try: update_status_msg(self, status_msg) except Exception as e: pass changed_bufs = [] missing_bufs = [] new_files = set() if not read_only: new_files = set([utils.to_rel_path(x) for x in ig.list_paths()]) for buf_id, buf in bufs.items(): buf_id = int(buf_id) # json keys must be strings buf_path = utils.get_full_path(buf['path']) view = self.get_view(buf_id) if view and not view.is_loading() and buf['encoding'] == 'utf8': view_text = view.get_text() view_md5 = hashlib.md5(view_text.encode('utf-8')).hexdigest() buf['buf'] = view_text buf['view'] = view G.VIEW_TO_HASH[view.native_id] = view_md5 if view_md5 == buf['md5']: msg.debug('md5 sum matches view. not getting buffer ', buf['path']) else: changed_bufs.append(buf) buf['md5'] = view_md5 continue try: if buf['encoding'] == 'utf8': if io: buf_fd = io.open(buf_path, 'Urt', encoding='utf8') buf_buf = buf_fd.read() else: buf_fd = open(buf_path, 'rb') buf_buf = buf_fd.read().decode('utf-8').replace('\r\n', '\n') md5 = hashlib.md5(buf_buf.encode('utf-8')).hexdigest() else: buf_fd = open(buf_path, 'rb') buf_buf = buf_fd.read() md5 = hashlib.md5(buf_buf).hexdigest() buf_fd.close() buf['buf'] = buf_buf if md5 == buf['md5']: msg.debug('md5 sum matches. not getting buffer ', buf['path']) else: msg.debug('md5 differs. possibly getting buffer later ', buf['path']) changed_bufs.append(buf) buf['md5'] = md5 except Exception as e: msg.debug('Error calculating md5 for ', buf['path'], ', ', str_e(e)) missing_bufs.append(buf) editor.status_message('Comparing local files against workspace... done.') return changed_bufs, missing_bufs, new_files @utils.inlined_callbacks def _on_room_info(self, data): self.joined_workspace = True self.workspace_info = data G.PERMS = data['perms'] self.proto.reset_retries() if G.OUTBOUND_FILTERING: msg.error('Detected outbound port blocking! See https://floobits.com/help/network for more info.') read_only = False if 'patch' not in data['perms']: read_only = True no_perms_msg = '''You don't have permission to edit this workspace. All files will be read-only.''' msg.log('No patch permission. Setting buffers to read-only') if 'request_perm' in data['perms']: should_send = yield self.ok_cancel_dialog, no_perms_msg + '\nDo you want to request edit permission?' # TODO: wait for perms to be OK'd/denied before uploading or bailing if should_send: self.send({'name': 'request_perms', 'perms': ['edit_room']}) else: if G.EXPERT_MODE: editor.status_message(no_perms_msg) else: editor.error_message(no_perms_msg) floo_json = { 'url': utils.to_workspace_url({ 'owner': self.owner, 'workspace': self.workspace, 'host': self.proto.host, 'port': self.proto.port, 'secure': self.proto.secure, }) } utils.update_floo_file(os.path.join(G.PROJECT_PATH, '.floo'), floo_json) utils.update_recent_workspaces(self.workspace_url) ig = ignore.create_ignore_tree(G.PROJECT_PATH) G.IGNORE = ig for buf_id, buf in data['bufs'].items(): buf_id = int(buf_id) # json keys must be strings self.bufs[buf_id] = buf self.paths_to_ids[buf['path']] = buf_id changed_bufs, missing_bufs, new_files = self._scan_dir(data['bufs'], ig, read_only) ignored = [] for p, buf_id in self.paths_to_ids.items(): if p not in new_files: ignored.append(p) new_files.discard(p) if self.action == utils.JOIN_ACTION.UPLOAD: yield self._initial_upload, ig, missing_bufs, changed_bufs # TODO: maybe use org name here who = 'Your friends' anon_perms = G.AGENT.workspace_info.get('anon_perms') if 'get_buf' in anon_perms: who = 'Anyone' _msg = 'You are sharing:\n\n%s\n\n%s can join your workspace at:\n\n%s' % (G.PROJECT_PATH, who, G.AGENT.workspace_url) # Workaround for horrible Sublime Text bug utils.set_timeout(editor.message_dialog, 0, _msg) # Don't auto-upload again on reconnect self.action = utils.JOIN_ACTION.PROMPT elif changed_bufs or missing_bufs or new_files: # TODO: handle readonly here if self.action == utils.JOIN_ACTION.PROMPT: stomp_local = yield self.stomp_prompt, changed_bufs, missing_bufs, list(new_files), ignored if stomp_local not in [0, 1]: self.stop() return elif self.action == utils.JOIN_ACTION.DOWNLOAD: stomp_local = True else: # This should never happen assert False return if stomp_local: for buf in changed_bufs: self.get_buf(buf['id'], buf.get('view')) self.save_on_get_bufs.add(buf['id']) for buf in missing_bufs: self.get_buf(buf['id'], buf.get('view')) self.save_on_get_bufs.add(buf['id']) else: yield self._initial_upload, ig, missing_bufs, changed_bufs success_msg = '%s@%s/%s: Joined!' % (self.username, self.owner, self.workspace) msg.log(success_msg) editor.status_message(success_msg) data = utils.get_persistent_data() data['recent_workspaces'].insert(0, {"url": self.workspace_url}) utils.update_persistent_data(data) utils.add_workspace_to_persistent_json(self.owner, self.workspace, self.workspace_url, G.PROJECT_PATH) temp_data = data.get('temp_data', {}) hangout = temp_data.get('hangout', {}) hangout_url = hangout.get('url') if hangout_url: self.prompt_join_hangout(hangout_url) if data.get('repo_info'): msg.log('Repo info:', data.get('repo_info')) # TODO: check local repo info and update remote (or prompt?) else: repo_info = repo.get_info(self.workspace_url, G.PROJECT_PATH) if repo_info and 'repo' in G.PERMS: self.send({ 'name': 'repo', 'action': 'set', 'data': repo_info, }) self.emit("room_info") @utils.inlined_callbacks def refresh_workspace(self): ig = ignore.create_ignore_tree(G.PROJECT_PATH) G.IGNORE = ig read_only = 'patch' not in self.workspace_info['perms'] changed_bufs, missing_bufs, new_files = self._scan_dir(self.bufs, G.IGNORE, read_only) ignored = [] for p, buf_id in self.paths_to_ids.items(): if p not in new_files: ignored.append(p) new_files.discard(p) if changed_bufs or missing_bufs or new_files: stomp_local = yield self.stomp_prompt, changed_bufs, missing_bufs, list(new_files), ignored if stomp_local not in [0, 1]: return if stomp_local: for buf in changed_bufs: self.get_buf(buf['id'], buf.get('view')) self.save_on_get_bufs.add(buf['id']) for buf in missing_bufs: self.get_buf(buf['id'], buf.get('view')) self.save_on_get_bufs.add(buf['id']) else: yield self._initial_upload, G.IGNORE, missing_bufs, changed_bufs else: editor.error_message('No files differ') def _on_user_info(self, data): user_id = str(data['user_id']) user_info = data['user_info'] self.workspace_info['users'][user_id] = user_info if user_id == str(self.workspace_info['user_id']): G.PERMS = user_info['perms'] def _on_join(self, data): msg.log(data['username'], ' joined the workspace on ', data.get('client', 'unknown client')) user_id = str(data['user_id']) self.workspace_info['users'][user_id] = data def _on_part(self, data): msg.log(data['username'], ' left the workspace') user_id = str(data['user_id']) try: del self.workspace_info['users'][user_id] except Exception: msg.error('Unable to delete user %s from user list' % (data)) def _on_set_temp_data(self, data): hangout_data = data.get('data', {}) hangout = hangout_data.get('hangout', {}) hangout_url = hangout.get('url') if hangout_url: self.prompt_join_hangout(hangout_url) def _on_saved(self, data): buf_id = data['id'] buf = self.bufs.get(buf_id) if not buf: return on_view_load = self.on_load.get(buf_id) if on_view_load: try: del on_view_load['patch'] except KeyError: pass view = self.get_view(data['id']) if view: self.save_view(view) elif 'buf' in buf: utils.save_buf(buf) username = self.get_username_by_id(data['user_id']) msg.log('%s saved buffer %s' % (username, buf['path'])) @utils.inlined_callbacks def _on_request_perms(self, data): user_id = str(data.get('user_id')) username = self.get_username_by_id(user_id) if not username: msg.debug('Unknown user for id ', user_id, '. Not handling request_perms event.') return perm_mapping = { 'edit_room': 'edit', 'admin_room': 'admin', } perms = data.get('perms') perms_str = ''.join([perm_mapping.get(p) for p in perms]) prompt = 'User %s is requesting %s permission for this room.' % (username, perms_str) message = data.get('message') if message: prompt += '\n\n%s says: %s' % (username, message) prompt += '\n\nDo you want to grant them permission?' confirm = yield self.ok_cancel_dialog, prompt self.send({ 'name': 'perms', 'action': confirm and 'add' or 'reject', 'user_id': user_id, 'perms': perms, }) def _on_perms(self, data): action = data['action'] user_id = str(data['user_id']) user = self.workspace_info['users'].get(user_id) if user is None: msg.log('No user for id ', user_id, '. Not handling perms event') return perms = set(user['perms']) if action == 'add': perms |= set(data['perms']) elif action == 'remove': perms -= set(data['perms']) else: return user['perms'] = list(perms) if user_id == self.workspace_info['user_id']: G.PERMS = perms def _on_webrtc(self, data): msg.debug('WebRTC got a data message. Action ', data.get('action'), ' user_id ', data.get('user_id')) def _on_msg(self, data): self.on_msg(data) def _on_ping(self, data): self.last_ack_time = time.time() self.send({'name': 'pong'}) @utils.inlined_callbacks def prompt_ignore(self, ig, path, cb): ignore.create_flooignore(ig.path) dirs = ig.get_children() dirs.append(ig) dirs = sorted(dirs, key=attrgetter('size')) size = starting_size = reduce(lambda x, c: x + c.size, dirs, 0) too_big = [] while size > MAX_WORKSPACE_SIZE and dirs: cd = dirs.pop() size -= cd.size too_big.append(cd) if size > MAX_WORKSPACE_SIZE: editor.error_message( 'Maximum workspace size is %.2fMB.\n\n%s is too big (%.2fMB) to upload. Consider adding stuff to the .flooignore file.' % (MAX_WORKSPACE_SIZE / 1000000.0, path, ig.size / 1000000.0)) cb([set(), 0]) return if too_big: txt = TOO_BIG_TEXT % (MAX_WORKSPACE_SIZE / 1000000.0, path, starting_size / 1000000.0, "\n".join(set([x.path for x in too_big]))) upload = yield self.ok_cancel_dialog, txt if not upload: cb([set(), 0]) return files = set() for ig in dirs: files = files.union(set([utils.to_rel_path(x) for x in ig.files])) cb([files, size]) def upload(self, path): if not utils.is_shared(path): editor.error_message('Cannot share %s because is not in shared path %s.\n\nPlease move it there and try again.' % (path, G.PROJECT_PATH)) return ig = ignore.create_ignore_tree(G.PROJECT_PATH) G.IGNORE = ig is_dir = os.path.isdir(path) if ig.is_ignored(path, is_dir, True): editor.error_message('Cannot share %s because it is ignored.\n\nAdd an exclude rule (!%s) to your .flooignore file.' % (path, path)) return rel_path = utils.to_rel_path(path) if not is_dir: self._upload_file_by_path(rel_path) return for p in rel_path.split('/'): child = ig.children.get(p) if not child: break ig = child if ig.path != path: msg.warn(ig.path, ' is not the same as ', path) self._rate_limited_upload(ig.list_paths(), ig.total_size, upload_func=self._upload_file_by_path) def _rate_limited_upload(self, paths_iter, total_bytes, bytes_uploaded=0.0, upload_func=None): reactor.tick() upload_func = upload_func or (lambda x: self._upload(utils.get_full_path(x))) if len(self.proto) > 0: self.upload_timeout = utils.set_timeout(self._rate_limited_upload, 10, paths_iter, total_bytes, bytes_uploaded, upload_func) return bar_len = 20 try: p = next(paths_iter) size = upload_func(p) bytes_uploaded += size try: percent = (bytes_uploaded / total_bytes) except ZeroDivisionError: percent = 0.5 bar = ' |' + ('|' * int(bar_len * percent)) + (' ' * int((1 - percent) * bar_len)) + '|' editor.status_message('Uploading... %2.2f%% %s' % (percent * 100, bar)) except StopIteration: editor.status_message('Uploading... 100% ' + ('|' * bar_len) + '| complete') msg.log('All done uploading') return self.upload_timeout = utils.set_timeout(self._rate_limited_upload, 50, paths_iter, total_bytes, bytes_uploaded, upload_func) def _upload(self, path, text=None): size = 0 try: if text is None: with open(path, 'rb') as buf_fd: buf = buf_fd.read() else: try: # work around python 3 encoding issue buf = text.encode('utf8') except Exception as e: msg.debug('Error encoding buf ', path, ': ', str_e(e)) # We're probably in python 2 so it's ok to do this buf = text size = len(buf) encoding = 'utf8' rel_path = utils.to_rel_path(path) existing_buf = self.get_buf_by_path(path) if existing_buf: if text is None: buf_md5 = hashlib.md5(buf).hexdigest() if existing_buf['md5'] == buf_md5: msg.log(path, ' already exists and has the same md5. Skipping.') return size existing_buf['md5'] = buf_md5 msg.log('Setting buffer ', rel_path) try: buf = buf.decode('utf-8') except Exception: buf = base64.b64encode(buf).decode('utf-8') encoding = 'base64' existing_buf['buf'] = buf existing_buf['encoding'] = encoding self.send({ 'name': 'set_buf', 'id': existing_buf['id'], 'buf': buf, 'md5': existing_buf['md5'], 'encoding': encoding, }) self.send({'name': 'saved', 'id': existing_buf['id']}) return size try: buf = buf.decode('utf-8') except Exception: buf = base64.b64encode(buf).decode('utf-8') encoding = 'base64' msg.log('Creating buffer ', rel_path, ' (', len(buf), ' bytes)') event = { 'name': 'create_buf', 'buf': buf, 'path': rel_path, 'encoding': encoding, } def done(d): if d.get('id'): self.bufs[d['id']] = buf self.paths_to_ids[rel_path] = d['id'] self.send(event, done) except (IOError, OSError): msg.error('Failed to open ', path) except Exception as e: msg.error('Failed to create buffer ', path, ': ', str_e(e)) return size def kick(self, user_id): if 'kick' not in G.PERMS: return self.send({ 'name': 'kick', 'user_id': user_id, }) def stop(self): utils.cancel_timeout(self.upload_timeout) self.upload_timeout = None super(FlooHandler, self).stop()
PypiClean
/CCC-2.0.1.tar.gz/CCC-2.0.1/ccc/commonregex.py
import re date = re.compile( u'(?:(?<!\:)(?<!\:\d)[0-3]?\d(?:st|nd|rd|th)?\s+(?:of\s+)?(?:jan\.?|january|feb\.?|february|mar\.?|march|apr\.?|april|may|jun\.?|june|jul\.?|july|aug\.?|august|sep\.?|september|oct\.?|october|nov\.?|november|dec\.?|december)|(?:jan\.?|january|feb\.?|february|mar\.?|march|apr\.?|april|may|jun\.?|june|jul\.?|july|aug\.?|august|sep\.?|september|oct\.?|october|nov\.?|november|dec\.?|december)\s+(?<!\:)(?<!\:\d)[0-3]?\d(?:st|nd|rd|th)?)(?:\,)?\s*(?:\d{4})?|[0-3]?\d[-\./][0-3]?\d[-\./]\d{2,4}', re.IGNORECASE) time = re.compile(u'\d{1,2}:\d{2} ?(?:[ap]\.?m\.?)?|\d[ap]\.?m\.?', re.IGNORECASE) phone = re.compile( u'''((?:(?<![\d-])(?:\+?\d{1,3}[-.\s*]?)?(?:\(?\d{3}\)?[-.\s*]?)?\d{3}[-.\s*]?\d{4}(?![\d-]))|(?:(?<![\d-])(?:(?:\(\+?\d{2}\))|(?:\+?\d{2}))\s*\d{2}\s*\d{3}\s*\d{4}(?![\d-])))''') phones_with_exts = re.compile( u'((?:(?:\+?1\s*(?:[.-]\s*)?)?(?:\(\s*(?:[2-9]1[02-9]|[2-9][02-8]1|[2-9][02-8][02-9])\s*\)|(?:[2-9]1[02-9]|[2-9][02-8]1|[2-9][02-8][02-9]))\s*(?:[.-]\s*)?)?(?:[2-9]1[02-9]|[2-9][02-9]1|[2-9][02-9]{2})\s*(?:[.-]\s*)?(?:[0-9]{4})(?:\s*(?:#|x\.?|ext\.?|extension)\s*(?:\d+)?))', re.IGNORECASE) link = re.compile( u'(?i)((?:https?://|www\d{0,3}[.])?[a-z0-9.\-]+[.](?:(?:international)|(?:construction)|(?:contractors)|(?:enterprises)|(?:photography)|(?:immobilien)|(?:management)|(?:technology)|(?:directory)|(?:education)|(?:equipment)|(?:institute)|(?:marketing)|(?:solutions)|(?:builders)|(?:clothing)|(?:computer)|(?:democrat)|(?:diamonds)|(?:graphics)|(?:holdings)|(?:lighting)|(?:plumbing)|(?:training)|(?:ventures)|(?:academy)|(?:careers)|(?:company)|(?:domains)|(?:florist)|(?:gallery)|(?:guitars)|(?:holiday)|(?:kitchen)|(?:recipes)|(?:shiksha)|(?:singles)|(?:support)|(?:systems)|(?:agency)|(?:berlin)|(?:camera)|(?:center)|(?:coffee)|(?:estate)|(?:kaufen)|(?:luxury)|(?:monash)|(?:museum)|(?:photos)|(?:repair)|(?:social)|(?:tattoo)|(?:travel)|(?:viajes)|(?:voyage)|(?:build)|(?:cheap)|(?:codes)|(?:dance)|(?:email)|(?:glass)|(?:house)|(?:ninja)|(?:photo)|(?:shoes)|(?:solar)|(?:today)|(?:aero)|(?:arpa)|(?:asia)|(?:bike)|(?:buzz)|(?:camp)|(?:club)|(?:coop)|(?:farm)|(?:gift)|(?:guru)|(?:info)|(?:jobs)|(?:kiwi)|(?:land)|(?:limo)|(?:link)|(?:menu)|(?:mobi)|(?:moda)|(?:name)|(?:pics)|(?:pink)|(?:post)|(?:rich)|(?:ruhr)|(?:sexy)|(?:tips)|(?:wang)|(?:wien)|(?:zone)|(?:biz)|(?:cab)|(?:cat)|(?:ceo)|(?:com)|(?:edu)|(?:gov)|(?:int)|(?:mil)|(?:net)|(?:onl)|(?:org)|(?:pro)|(?:red)|(?:tel)|(?:uno)|(?:xxx)|(?:ac)|(?:ad)|(?:ae)|(?:af)|(?:ag)|(?:ai)|(?:al)|(?:am)|(?:an)|(?:ao)|(?:aq)|(?:ar)|(?:as)|(?:at)|(?:au)|(?:aw)|(?:ax)|(?:az)|(?:ba)|(?:bb)|(?:bd)|(?:be)|(?:bf)|(?:bg)|(?:bh)|(?:bi)|(?:bj)|(?:bm)|(?:bn)|(?:bo)|(?:br)|(?:bs)|(?:bt)|(?:bv)|(?:bw)|(?:by)|(?:bz)|(?:ca)|(?:cc)|(?:cd)|(?:cf)|(?:cg)|(?:ch)|(?:ci)|(?:ck)|(?:cl)|(?:cm)|(?:cn)|(?:co)|(?:cr)|(?:cu)|(?:cv)|(?:cw)|(?:cx)|(?:cy)|(?:cz)|(?:de)|(?:dj)|(?:dk)|(?:dm)|(?:do)|(?:dz)|(?:ec)|(?:ee)|(?:eg)|(?:er)|(?:es)|(?:et)|(?:eu)|(?:fi)|(?:fj)|(?:fk)|(?:fm)|(?:fo)|(?:fr)|(?:ga)|(?:gb)|(?:gd)|(?:ge)|(?:gf)|(?:gg)|(?:gh)|(?:gi)|(?:gl)|(?:gm)|(?:gn)|(?:gp)|(?:gq)|(?:gr)|(?:gs)|(?:gt)|(?:gu)|(?:gw)|(?:gy)|(?:hk)|(?:hm)|(?:hn)|(?:hr)|(?:ht)|(?:hu)|(?:id)|(?:ie)|(?:il)|(?:im)|(?:in)|(?:io)|(?:iq)|(?:ir)|(?:is)|(?:it)|(?:je)|(?:jm)|(?:jo)|(?:jp)|(?:ke)|(?:kg)|(?:kh)|(?:ki)|(?:km)|(?:kn)|(?:kp)|(?:kr)|(?:kw)|(?:ky)|(?:kz)|(?:la)|(?:lb)|(?:lc)|(?:li)|(?:lk)|(?:lr)|(?:ls)|(?:lt)|(?:lu)|(?:lv)|(?:ly)|(?:ma)|(?:mc)|(?:md)|(?:me)|(?:mg)|(?:mh)|(?:mk)|(?:ml)|(?:mm)|(?:mn)|(?:mo)|(?:mp)|(?:mq)|(?:mr)|(?:ms)|(?:mt)|(?:mu)|(?:mv)|(?:mw)|(?:mx)|(?:my)|(?:mz)|(?:na)|(?:nc)|(?:ne)|(?:nf)|(?:ng)|(?:ni)|(?:nl)|(?:no)|(?:np)|(?:nr)|(?:nu)|(?:nz)|(?:om)|(?:pa)|(?:pe)|(?:pf)|(?:pg)|(?:ph)|(?:pk)|(?:pl)|(?:pm)|(?:pn)|(?:pr)|(?:ps)|(?:pt)|(?:pw)|(?:py)|(?:qa)|(?:re)|(?:ro)|(?:rs)|(?:ru)|(?:rw)|(?:sa)|(?:sb)|(?:sc)|(?:sd)|(?:se)|(?:sg)|(?:sh)|(?:si)|(?:sj)|(?:sk)|(?:sl)|(?:sm)|(?:sn)|(?:so)|(?:sr)|(?:st)|(?:su)|(?:sv)|(?:sx)|(?:sy)|(?:sz)|(?:tc)|(?:td)|(?:tf)|(?:tg)|(?:th)|(?:tj)|(?:tk)|(?:tl)|(?:tm)|(?:tn)|(?:to)|(?:tp)|(?:tr)|(?:tt)|(?:tv)|(?:tw)|(?:tz)|(?:ua)|(?:ug)|(?:uk)|(?:us)|(?:uy)|(?:uz)|(?:va)|(?:vc)|(?:ve)|(?:vg)|(?:vi)|(?:vn)|(?:vu)|(?:wf)|(?:ws)|(?:ye)|(?:yt)|(?:za)|(?:zm)|(?:zw))(?:/[^\s()<>]+[^\s`!()\[\]{};:\'".,<>?\xab\xbb\u201c\u201d\u2018\u2019])?)', re.IGNORECASE) email = re.compile( u"([a-z0-9!#$%&'*+\/=?^_`{|.}~-]+@(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?)", re.IGNORECASE) ip = re.compile( u'(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)', re.IGNORECASE) ipv6 = re.compile( u'\s*(?!.*::.*::)(?:(?!:)|:(?=:))(?:[0-9a-f]{0,4}(?:(?<=::)|(?<!::):)){6}(?:[0-9a-f]{0,4}(?:(?<=::)|(?<!::):)[0-9a-f]{0,4}(?:(?<=::)|(?<!:)|(?<=:)(?<!::):)|(?:25[0-4]|2[0-4]\d|1\d\d|[1-9]?\d)(?:\.(?:25[0-4]|2[0-4]\d|1\d\d|[1-9]?\d)){3})\s*', re.VERBOSE | re.IGNORECASE | re.DOTALL) price = re.compile(u'[$]\s?[+-]?[0-9]{1,3}(?:(?:,?[0-9]{3}))*(?:\.[0-9]{1,2})?') hex_color = re.compile(u'(#(?:[0-9a-fA-F]{8})|#(?:[0-9a-fA-F]{3}){1,2})\\b') credit_card = re.compile(u'((?:(?:\\d{4}[- ]?){3}\\d{4}|\\d{15,16}))(?![\\d])') btc_address = re.compile(u'(?<![a-km-zA-HJ-NP-Z0-9])[13][a-km-zA-HJ-NP-Z0-9]{26,33}(?![a-km-zA-HJ-NP-Z0-9])') street_address = re.compile( u'\d{1,4} [\w\s]{1,20}(?:street|st|avenue|ave|road|rd|highway|hwy|square|sq|trail|trl|drive|dr|court|ct|park|parkway|pkwy|circle|cir|boulevard|blvd)\W?(?=\s|$)', re.IGNORECASE) zip_code = re.compile(r'\b\d{5}(?:[-\s]\d{4})?\b') po_box = re.compile(r'P\.? ?O\.? Box \d+', re.IGNORECASE) regexes = { "dates": date, "times": time, "phones": phone, "phones_with_exts": phones_with_exts, "links": link, "emails": email, "ips": ip, "ipv6s": ipv6, "prices": price, "hex_colors": hex_color, "credit_cards": credit_card, "btc_addresses": btc_address, "street_addresses": street_address, "zip_codes": zip_code, "po_boxes": po_box, } class regex: def __init__(self, obj, regex): self.obj = obj self.regex = regex def __call__(self, *args): def regex_method(text=None): return [x.strip() for x in self.regex.findall(text or self.obj.text)] return regex_method class CommonRegex(object): def __init__(self, text=""): self.text = text for k, v in regexes.items(): setattr(self, k, regex(self, v)(self)) if text: for key in regexes.keys(): method = getattr(self, key) setattr(self, key, method())
PypiClean
/Cubane-1.0.11.tar.gz/Cubane-1.0.11/cubane/backend/static/cubane/backend/tinymce/js/tinymce/plugins/help/plugin.min.js
!function(){"use strict";var e,t,n,r,o,a,i=tinymce.util.Tools.resolve("tinymce.PluginManager"),u=function(e){return function(){return e}},c={noop:function(){},noarg:function(e){return function(){return e()}},compose:function(e,t){return function(){return e(t.apply(null,arguments))}},constant:u,identity:function(e){return e},tripleEquals:function(e,t){return e===t},curry:function(e){for(var t=new Array(arguments.length-1),n=1;n<arguments.length;n++)t[n-1]=arguments[n];return function(){for(var n=new Array(arguments.length),r=0;r<n.length;r++)n[r]=arguments[r];var o=t.concat(n);return e.apply(null,o)}},not:function(e){return function(){return!e.apply(null,arguments)}},die:function(e){return function(){throw new Error(e)}},apply:function(e){return e()},call:function(e){e()},never:u(!1),always:u(!0)},l=c.never,s=c.always,f=function(){return m},m=(r={fold:function(e,t){return e()},is:l,isSome:l,isNone:s,getOr:n=function(e){return e},getOrThunk:t=function(e){return e()},getOrDie:function(e){throw new Error(e||"error: getOrDie called on none.")},or:n,orThunk:t,map:f,ap:f,each:function(){},bind:f,flatten:f,exists:l,forall:s,filter:f,equals:e=function(e){return e.isNone()},equals_:e,toArray:function(){return[]},toString:c.constant("none()")},Object.freeze&&Object.freeze(r),r),p=function(e){var t=function(){return e},n=function(){return o},r=function(t){return t(e)},o={fold:function(t,n){return n(e)},is:function(t){return e===t},isSome:s,isNone:l,getOr:t,getOrThunk:t,getOrDie:t,or:n,orThunk:n,map:function(t){return p(t(e))},ap:function(t){return t.fold(f,function(t){return p(t(e))})},each:function(t){t(e)},bind:r,flatten:t,exists:r,forall:r,filter:function(t){return t(e)?o:m},equals:function(t){return t.is(e)},equals_:function(t,n){return t.fold(l,function(t){return n(e,t)})},toArray:function(){return[e]},toString:function(){return"some("+e+")"}};return o},d={some:p,none:f,from:function(e){return null===e||e===undefined?m:p(e)}},h=(o=Array.prototype.indexOf)===undefined?function(e,t){return v(e,t)}:function(e,t){return o.call(e,t)},y=function(e,t){return h(e,t)>-1},g=function(e,t){for(var n=e.length,r=new Array(n),o=0;o<n;o++){var a=e[o];r[o]=t(a,o,e)}return r},k=function(e,t){for(var n=[],r=0,o=e.length;r<o;r++){var a=e[r];t(a,r,e)&&n.push(a)}return n},v=function(e,t){for(var n=0,r=e.length;n<r;++n)if(e[n]===t)return n;return-1},b=(Array.prototype.push,Array.prototype.slice,g),x=k,w=function(e,t){for(var n=0,r=e.length;n<r;n++){var o=e[n];if(t(o,n,e))return d.some(o)}return d.none()},C=y,A=tinymce.util.Tools.resolve("tinymce.util.I18n"),S=tinymce.util.Tools.resolve("tinymce.Env"),T=S.mac?"\u2318":"Ctrl",P=S.mac?"Ctrl + Alt":"Shift + Alt",_={shortcuts:[{shortcut:T+" + B",action:"Bold"},{shortcut:T+" + I",action:"Italic"},{shortcut:T+" + U",action:"Underline"},{shortcut:T+" + A",action:"Select all"},{shortcut:T+" + Y or "+T+" + Shift + Z",action:"Redo"},{shortcut:T+" + Z",action:"Undo"},{shortcut:P+" + 1",action:"Header 1"},{shortcut:P+" + 2",action:"Header 2"},{shortcut:P+" + 3",action:"Header 3"},{shortcut:P+" + 4",action:"Header 4"},{shortcut:P+" + 5",action:"Header 5"},{shortcut:P+" + 6",action:"Header 6"},{shortcut:P+" + 7",action:"Paragraph"},{shortcut:P+" + 8",action:"Div"},{shortcut:P+" + 9",action:"Address"},{shortcut:"Alt + F9",action:"Focus to menubar"},{shortcut:"Alt + F10",action:"Focus to toolbar"},{shortcut:"Alt + F11",action:"Focus to element path"},{shortcut:"Ctrl + Shift + P > Ctrl + Shift + P",action:"Focus to contextual toolbar"},{shortcut:T+" + K",action:"Insert link (if link plugin activated)"},{shortcut:T+" + S",action:"Save (if save plugin activated)"},{shortcut:T+" + F",action:"Find (if searchreplace plugin activated)"}]},H=function(){var e=b(_.shortcuts,function(e){return'<tr data-mce-tabstop="1" tabindex="-1" aria-label="Action: '+(t=e).action+", Shortcut: "+t.shortcut.replace(/Ctrl/g,"Control")+'"><td>'+A.translate(e.action)+"</td><td>"+e.shortcut+"</td></tr>";var t}).join("");return{title:"Handy Shortcuts",type:"container",style:"overflow-y: auto; overflow-x: hidden; max-height: 250px",items:[{type:"container",html:'<div><table class="mce-table-striped"><thead><th>'+A.translate("Action")+"</th><th>"+A.translate("Shortcut")+"</th></thead>"+e+"</table></div>"}]}},O=(a=Object.keys)===undefined?function(e){var t=[];for(var n in e)e.hasOwnProperty(n)&&t.push(n);return t}:a,M=function(e,t){for(var n=O(e),r=0,o=n.length;r<o;r++){var a=n[r];t(e[a],a,e)}},E=function(e,t){var n={};return M(e,function(r,o){var a=t(r,o,e);n[a.k]=a.v}),n},F=function(e,t){var n=[];return M(e,function(e,r){n.push(t(e,r))}),n},I=function(e){return F(e,function(e){return e})},L={bifilter:function(e,t){var n={},r={};return M(e,function(e,o){(t(e,o)?n:r)[o]=e}),{t:n,f:r}},each:M,map:function(e,t){return E(e,function(e,n,r){return{k:n,v:t(e,n,r)}})},mapToArray:F,tupleMap:E,find:function(e,t){for(var n=O(e),r=0,o=n.length;r<o;r++){var a=n[r],i=e[a];if(t(i,a,e))return d.some(i)}return d.none()},keys:O,values:I,size:function(e){return I(e).length}},B=[{key:"advlist",name:"Advanced List"},{key:"anchor",name:"Anchor"},{key:"autolink",name:"Autolink"},{key:"autoresize",name:"Autoresize"},{key:"autosave",name:"Autosave"},{key:"bbcode",name:"BBCode"},{key:"charmap",name:"Character Map"},{key:"code",name:"Code"},{key:"codesample",name:"Code Sample"},{key:"colorpicker",name:"Color Picker"},{key:"compat3x",name:"3.x Compatibility"},{key:"contextmenu",name:"Context Menu"},{key:"directionality",name:"Directionality"},{key:"emoticons",name:"Emoticons"},{key:"fullpage",name:"Full Page"},{key:"fullscreen",name:"Full Screen"},{key:"help",name:"Help"},{key:"hr",name:"Horizontal Rule"},{key:"image",name:"Image"},{key:"imagetools",name:"Image Tools"},{key:"importcss",name:"Import CSS"},{key:"insertdatetime",name:"Insert Date/Time"},{key:"legacyoutput",name:"Legacy Output"},{key:"link",name:"Link"},{key:"lists",name:"Lists"},{key:"media",name:"Media"},{key:"nonbreaking",name:"Nonbreaking"},{key:"noneditable",name:"Noneditable"},{key:"pagebreak",name:"Page Break"},{key:"paste",name:"Paste"},{key:"preview",name:"Preview"},{key:"print",name:"Print"},{key:"save",name:"Save"},{key:"searchreplace",name:"Search and Replace"},{key:"spellchecker",name:"Spell Checker"},{key:"tabfocus",name:"Tab Focus"},{key:"table",name:"Table"},{key:"template",name:"Template"},{key:"textcolor",name:"Text Color"},{key:"textpattern",name:"Text Pattern"},{key:"toc",name:"Table of Contents"},{key:"visualblocks",name:"Visual Blocks"},{key:"visualchars",name:"Visual Characters"},{key:"wordcount",name:"Word Count"}],j=c.curry(function(e,t){return e.replace(/\${([^{}]*)}/g,function(e,n){var r,o=t[n];return"string"==(r=typeof o)||"number"===r?o:e})},'<a href="${url}" target="_blank" rel="noopener">${name}</a>'),z=function(e){var t,n,r=(t=e,n=L.keys(t.plugins),t.settings.forced_plugins===undefined?n:x(n,c.not(c.curry(C,t.settings.forced_plugins)))),o=b(r,function(t){return"<li>"+(n=e,r=t,w(B,function(e){return e.key===r}).fold(function(){var e=n.plugins[r].getMetadata;return"function"==typeof e?j(e()):r},function(e){return j({name:e.name,url:"https://www.tinymce.com/docs/plugins/"+e.key})}))+"</li>";var n,r}),a=o.length,i=o.join("");return"<p><b>"+A.translate(["Plugins installed ({0}):",a])+"</b></p><ul>"+i+"</ul>"},D=function(e){return{title:"Plugins",type:"container",style:"overflow-y: auto; overflow-x: hidden;",layout:"flex",padding:10,spacing:10,items:[(t=e,{type:"container",html:'<div style="overflow-y: auto; overflow-x: hidden; max-height: 230px; height: 230px;" data-mce-tabstop="1" tabindex="-1">'+z(t)+"</div>",flex:1}),{type:"container",html:'<div style="padding: 10px; background: #e3e7f4; height: 100%;" data-mce-tabstop="1" tabindex="-1"><p><b>'+A.translate("Premium plugins:")+'</b></p><ul><li>PowerPaste</li><li>Spell Checker Pro</li><li>Accessibility Checker</li><li>Advanced Code Editor</li><li>Enhanced Media Embed</li><li>Link Checker</li></ul><br /><p style="float: right;"><a href="https://www.tinymce.com/pricing/?utm_campaign=editor_referral&utm_medium=help_dialog&utm_source=tinymce" target="_blank">'+A.translate("Learn more...")+"</a></p></div>",flex:1}]};var t},q=tinymce.util.Tools.resolve("tinymce.EditorManager"),N=function(){var e,t,n='<a href="https://www.tinymce.com/docs/changelog/?utm_campaign=editor_referral&utm_medium=help_dialog&utm_source=tinymce" target="_blank">TinyMCE '+(e=q.majorVersion,t=q.minorVersion,0===e.indexOf("@")?"X.X.X":e+"."+t)+"</a>";return[{type:"label",html:A.translate(["You are using {0}",n])},{type:"spacer",flex:1},{text:"Close",onclick:function(){this.parent().parent().close()}}]},R=function(e,t){return function(){e.windowManager.open({title:"Help",bodyType:"tabpanel",layout:"flex",body:[H(),D(e)],buttons:N(),onPostRender:function(){this.getEl("title").innerHTML='<img src="'+t+'/img/logo.png" alt="TinyMCE Logo" style="display: inline-block; width: 200px; height: 50px">'}})}},V=function(e,t){e.addCommand("mceHelp",R(e,t))},U=function(e,t){e.addButton("help",{icon:"help",onclick:R(e,t)}),e.addMenuItem("Help",{text:"Help",icon:"help",context:"help",onclick:R(e,t)})};i.add("help",function(e,t){U(e,t),V(e,t),e.shortcuts.add("Alt+0","Open help dialog","mceHelp")})}();
PypiClean
/Bottleneck-1.3.7rc1-cp36-cp36m-macosx_10_9_x86_64.whl/bottleneck/slow/move.py
"Alternative methods of calculating moving window statistics." import warnings import numpy as np __all__ = [ "move_sum", "move_mean", "move_std", "move_var", "move_min", "move_max", "move_argmin", "move_argmax", "move_median", "move_rank", ] def move_sum(a, window, min_count=None, axis=-1): "Slow move_sum for unaccelerated dtype" return move_func(np.nansum, a, window, min_count, axis=axis) def move_mean(a, window, min_count=None, axis=-1): "Slow move_mean for unaccelerated dtype" return move_func(np.nanmean, a, window, min_count, axis=axis) def move_std(a, window, min_count=None, axis=-1, ddof=0): "Slow move_std for unaccelerated dtype" return move_func(np.nanstd, a, window, min_count, axis=axis, ddof=ddof) def move_var(a, window, min_count=None, axis=-1, ddof=0): "Slow move_var for unaccelerated dtype" return move_func(np.nanvar, a, window, min_count, axis=axis, ddof=ddof) def move_min(a, window, min_count=None, axis=-1): "Slow move_min for unaccelerated dtype" return move_func(np.nanmin, a, window, min_count, axis=axis) def move_max(a, window, min_count=None, axis=-1): "Slow move_max for unaccelerated dtype" return move_func(np.nanmax, a, window, min_count, axis=axis) def move_argmin(a, window, min_count=None, axis=-1): "Slow move_argmin for unaccelerated dtype" def argmin(a, axis): a = np.array(a, copy=False) flip = [slice(None)] * a.ndim flip[axis] = slice(None, None, -1) a = a[tuple(flip)] # if tie, pick index of rightmost tie try: idx = np.nanargmin(a, axis=axis) except ValueError: # an all nan slice encountered a = a.copy() mask = np.isnan(a) np.copyto(a, np.inf, where=mask) idx = np.argmin(a, axis=axis).astype(np.float64) if idx.ndim == 0: idx = np.nan else: mask = np.all(mask, axis=axis) idx[mask] = np.nan return idx return move_func(argmin, a, window, min_count, axis=axis) def move_argmax(a, window, min_count=None, axis=-1): "Slow move_argmax for unaccelerated dtype" def argmax(a, axis): a = np.array(a, copy=False) flip = [slice(None)] * a.ndim flip[axis] = slice(None, None, -1) a = a[tuple(flip)] # if tie, pick index of rightmost tie try: idx = np.nanargmax(a, axis=axis) except ValueError: # an all nan slice encountered a = a.copy() mask = np.isnan(a) np.copyto(a, -np.inf, where=mask) idx = np.argmax(a, axis=axis).astype(np.float64) if idx.ndim == 0: idx = np.nan else: mask = np.all(mask, axis=axis) idx[mask] = np.nan return idx return move_func(argmax, a, window, min_count, axis=axis) def move_median(a, window, min_count=None, axis=-1): "Slow move_median for unaccelerated dtype" return move_func(np.nanmedian, a, window, min_count, axis=axis) def move_rank(a, window, min_count=None, axis=-1): "Slow move_rank for unaccelerated dtype" return move_func(lastrank, a, window, min_count, axis=axis) # magic utility functions --------------------------------------------------- def move_func(func, a, window, min_count=None, axis=-1, **kwargs): "Generic moving window function implemented with a python loop." a = np.array(a, copy=False) if min_count is None: mc = window else: mc = min_count if mc > window: msg = "min_count (%d) cannot be greater than window (%d)" raise ValueError(msg % (mc, window)) elif mc <= 0: raise ValueError("`min_count` must be greater than zero.") if a.ndim == 0: raise ValueError("moving window functions require ndim > 0") if axis is None: raise ValueError("An `axis` value of None is not supported.") if window < 1: raise ValueError("`window` must be at least 1.") if window > a.shape[axis]: raise ValueError("`window` is too long.") if issubclass(a.dtype.type, np.inexact): y = np.empty_like(a) else: y = np.empty(a.shape) idx1 = [slice(None)] * a.ndim idx2 = list(idx1) with warnings.catch_warnings(): warnings.simplefilter("ignore") for i in range(a.shape[axis]): win = min(window, i + 1) idx1[axis] = slice(i + 1 - win, i + 1) idx2[axis] = i y[tuple(idx2)] = func(a[tuple(idx1)], axis=axis, **kwargs) idx = _mask(a, window, mc, axis) y[idx] = np.nan return y def _mask(a, window, min_count, axis): n = (a == a).cumsum(axis) idx1 = [slice(None)] * a.ndim idx2 = [slice(None)] * a.ndim idx3 = [slice(None)] * a.ndim idx1[axis] = slice(window, None) idx2[axis] = slice(None, -window) idx3[axis] = slice(None, window) idx1 = tuple(idx1) idx2 = tuple(idx2) idx3 = tuple(idx3) nidx1 = n[idx1] nidx1 = nidx1 - n[idx2] idx = np.empty(a.shape, dtype=np.bool_) idx[idx1] = nidx1 < min_count idx[idx3] = n[idx3] < min_count return idx # --------------------------------------------------------------------------- def lastrank(a, axis=-1): """ The ranking of the last element along the axis, ignoring NaNs. The ranking is normalized to be between -1 and 1 instead of the more common 1 and N. The results are adjusted for ties. Parameters ---------- a : ndarray Input array. If `a` is not an array, a conversion is attempted. axis : int, optional The axis over which to rank. By default (axis=-1) the ranking (and reducing) is performed over the last axis. Returns ------- d : array In the case of, for example, a 2d array of shape (n, m) and axis=1, the output will contain the rank (normalized to be between -1 and 1 and adjusted for ties) of the the last element of each row. The output in this example will have shape (n,). Examples -------- Create an array: >>> y1 = larry([1, 2, 3]) What is the rank of the last element (the value 3 in this example)? It is the largest element so the rank is 1.0: >>> import numpy as np >>> from la.afunc import lastrank >>> x1 = np.array([1, 2, 3]) >>> lastrank(x1) 1.0 Now let's try an example where the last element has the smallest value: >>> x2 = np.array([3, 2, 1]) >>> lastrank(x2) -1.0 Here's an example where the last element is not the minimum or maximum value: >>> x3 = np.array([1, 3, 4, 5, 2]) >>> lastrank(x3) -0.5 """ a = np.array(a, copy=False) ndim = a.ndim if a.size == 0: # At least one dimension has length 0 shape = list(a.shape) shape.pop(axis) r = np.empty(shape, dtype=float) r.fill(np.nan) r = r.astype(a.dtype) if (r.ndim == 0) and (r.size == 1): r = np.nan return r indlast = [slice(None)] * ndim indlast[axis] = slice(-1, None) indlast = tuple(indlast) indlast2 = [slice(None)] * ndim indlast2[axis] = -1 indlast2 = tuple(indlast2) n = (~np.isnan(a)).sum(axis) a_indlast = a[indlast] g = (a_indlast > a).sum(axis) e = (a_indlast == a).sum(axis) r = (g + g + e - 1.0) / 2.0 r = r / (n - 1.0) r = 2.0 * (r - 0.5) if ndim == 1: if n == 1: r = 0 if np.isnan(a[indlast2]): # elif? r = np.nan else: np.putmask(r, n == 1, 0) np.putmask(r, np.isnan(a[indlast2]), np.nan) return r
PypiClean
/Attention_and_Transformers-0.0.15-py3-none-any.whl/Attention_and_Transformers/MobileViT_v1/mobile_vit_v1.py
from dataclasses import dataclass from tensorflow.keras import Model, Input from tensorflow.keras.layers import GlobalAveragePooling2D, Dropout, Dense from .BaseLayers import ConvLayer, InvertedResidualBlock from .mobile_vit_v1_block import MobileViT_v1_Block def MobileViT_v1( out_channels: list, expansion_factor: int, tf_repeats: list, tf_embedding_dims: list, linear_drop: float = 0.0, attention_drop: float = 0.2, num_classes: int = 1000, input_shape: tuple = (256, 256, 3), model_type: str = "S", ): """ Arguments -------- out_channel: (list) Output channels of each layer expansion_factor: (int) Inverted residual block -> Bottelneck expansion size tf_repeats: (list) Number of time to repeat each transformer block tf_embedding_dims: (list) Embedding dimension used in each transformer block num_classes: (int) Number of output classes input_shape: (tuple) Input shape -> H, W, C model_type: (str) Model to create linear_drop: (float) Dropout rate for Dense layers attention_drop: (float) Dropout rate for the attention matrix """ input_layer = Input(shape=input_shape) # Block 1 out_b1_1 = ConvLayer(num_filters=out_channels[0], kernel_size=3, strides=2)(input_layer) out_b1_2 = InvertedResidualBlock( in_channels=out_channels[0], out_channels=out_channels[1], depthwise_stride=1, expansion_factor=expansion_factor, name="block-1-IR1", )(out_b1_1) if out_b1_1.shape[-1] == out_b1_2.shape[-1]: out = out_b1_1 + out_b1_2 else: out = out_b1_2 # Block 2 out_b2_1 = InvertedResidualBlock( in_channels=out_channels[1], out_channels=out_channels[2], depthwise_stride=2, expansion_factor=expansion_factor, name="block-2-IR1", )(out) out_b2_2 = InvertedResidualBlock( in_channels=out_channels[2], out_channels=out_channels[3], depthwise_stride=1, expansion_factor=expansion_factor, name="block-2-IR2", )(out_b2_1) out = out_b2_1 + out_b2_2 out_b2_3 = InvertedResidualBlock( in_channels=out_channels[3], out_channels=out_channels[4], depthwise_stride=1, expansion_factor=expansion_factor, name="block-2-IR3", )(out) out = out + out_b2_3 # Block 3 out = InvertedResidualBlock( in_channels=out_channels[4], out_channels=out_channels[5], depthwise_stride=2, expansion_factor=expansion_factor, name="block-3-IR1", )(out) out = MobileViT_v1_Block( out_filters=out_channels[6], embedding_dim=tf_embedding_dims[0], transformer_repeats=tf_repeats[0], name="MobileViTBlock-1", attention_drop=attention_drop, linear_drop=linear_drop, )(out) # Block 4 out = InvertedResidualBlock( in_channels=out_channels[6], out_channels=out_channels[7], depthwise_stride=2, expansion_factor=expansion_factor, name="block-4-IR1", )(out) out = MobileViT_v1_Block( out_filters=out_channels[8], embedding_dim=tf_embedding_dims[1], transformer_repeats=tf_repeats[1], name="MobileViTBlock-2", attention_drop=attention_drop, linear_drop=linear_drop, )(out) # Block 5 out = InvertedResidualBlock( in_channels=out_channels[8], out_channels=out_channels[9], depthwise_stride=2, expansion_factor=expansion_factor, name="block-5-IR1", )(out) out = MobileViT_v1_Block( out_filters=out_channels[10], embedding_dim=tf_embedding_dims[2], transformer_repeats=tf_repeats[2], name="MobileViTBlock-3", attention_drop=attention_drop, linear_drop=linear_drop, )(out) out = ConvLayer(num_filters=out_channels[11], kernel_size=1, strides=1)(out) # Output layer out = GlobalAveragePooling2D()(out) if linear_drop > 0.0: out = Dropout(rate=linear_drop)(out) out = Dense(units=num_classes)(out) model = Model(inputs=input_layer, outputs=out, name=f"MobileViT_v1-{model_type}") return model @dataclass(frozen=True) class config_MobileViT_v1_S: out_channels = [16, 32, 64, 64, 64, 96, 96, 128, 128, 160, 160, 640] depthwise_expansion_factor = 4 tf_repeats = [2, 4, 3] tf_embedding_dims = [144, 192, 240] @dataclass(frozen=True) class config_MobileViT_v1_XS: out_channels = [16, 32, 48, 48, 48, 64, 64, 80, 80, 96, 96, 384] depthwise_expansion_factor = 4 tf_repeats = [2, 4, 3] tf_embedding_dims = [96, 120, 144] @dataclass(frozen=True) class config_MobileViT_v1_XXS: out_channels = [16, 16, 24, 24, 24, 48, 48, 64, 64, 80, 80, 320] depthwise_expansion_factor = 2 tf_repeats = [2, 4, 3] tf_embedding_dims = [64, 80, 96] def build_MobileViT_v1(model_type: str = "S", num_classes: int = 1000, input_shape: tuple = (None, None, 3), **kwargs): """ Create MobileViT-v1 Classification models Arguments -------- model_type: (str) MobileViT version to create. Options: S, XS, XSS num_classes: (int) Number of output classes input_shape: (tuple) Input shape -> H, W, C Additional arguments: --------------------- linear_drop: (float) Dropout rate for Dense layers attention_drop: (float) Dropout rate for the attention matrix """ if model_type == "S": config = config_MobileViT_v1_S() elif model_type == "XS": config = config_MobileViT_v1_XS() elif model_type == "XXS": config = config_MobileViT_v1_XXS() else: raise ValueError("Bad Input. 'model_type' should one of ['S', 'XS', 'XXS']") model = MobileViT_v1( out_channels=config.out_channels, expansion_factor=config.depthwise_expansion_factor, # Inverted residual block -> Bottelneck expansion size tf_repeats=config.tf_repeats, tf_embedding_dims=config.tf_embedding_dims, num_classes=num_classes, input_shape=input_shape, model_type=model_type, **kwargs, ) return model if __name__ == "__main__": model = build_MobileViT_v1( model_type=r"S", # "XS", "XXS" input_shape=(256, 256, 3), # (None, None, 3) num_classes=1000, linear_drop=0.0, attention_drop=0.0, ) model.summary(positions=[0.33, 0.64, 0.75, 1.0])
PypiClean
/NESTML-5.3.0-py3-none-any.whl/NESTML-5.3.0.data/data/doc/models_library/static.rst
static ###### Static synapse Description +++++++++++ A synapse where the synaptic strength (weight) does not evolve with simulated time, but is defined as a (constant) parameter. Parameters ++++++++++ .. csv-table:: :header: "Name", "Physical unit", "Default value", "Description" :widths: auto "w", "real", "1", "Synaptic weight" "d", "ms", "1ms", "Synaptic transmission delay" Source code +++++++++++ The model source code can be found in the NESTML models repository here: `static <https://github.com/nest/nestml/tree/master/models/synapses/static_synapse.nestml>`_. Characterisation ++++++++++++++++ .. include:: static_characterisation.rst .. footer:: Generated at 2023-03-23 09:41:54.866055
PypiClean
/ENCODEQueryTools-0.1.1.tar.gz/ENCODEQueryTools-0.1.1/docs/html/README.html
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" /> <title>About ENCODEQueryTools &mdash; ENCODEQueryTools 0.1.0a documentation</title> <link rel="stylesheet" href="_static/default.css" type="text/css" /> <link rel="stylesheet" href="_static/pygments.css" type="text/css" /> <script type="text/javascript"> var DOCUMENTATION_OPTIONS = { URL_ROOT: './', VERSION: '0.1.0a', COLLAPSE_INDEX: false, FILE_SUFFIX: '.html', HAS_SOURCE: true }; </script> <script type="text/javascript" src="_static/jquery.js"></script> <script type="text/javascript" src="_static/underscore.js"></script> <script type="text/javascript" src="_static/doctools.js"></script> <link rel="top" title="ENCODEQueryTools 0.1.0a documentation" href="index.html" /> <link rel="next" title="ENCODEQT Python Module API" href="code.html" /> <link rel="prev" title="Welcome to ENCODEQueryTools’ documentation!" href="index.html" /> </head> <body> <div class="related"> <h3>Navigation</h3> <ul> <li class="right" style="margin-right: 10px"> <a href="genindex.html" title="General Index" accesskey="I">index</a></li> <li class="right" > <a href="py-modindex.html" title="Python Module Index" >modules</a> |</li> <li class="right" > <a href="code.html" title="ENCODEQT Python Module API" accesskey="N">next</a> |</li> <li class="right" > <a href="index.html" title="Welcome to ENCODEQueryTools’ documentation!" accesskey="P">previous</a> |</li> <li><a href="index.html">ENCODEQueryTools 0.1.0a documentation</a> &raquo;</li> </ul> </div> <div class="document"> <div class="documentwrapper"> <div class="bodywrapper"> <div class="body"> <div class="section" id="about-encodequerytools"> <h1>About ENCODEQueryTools<a class="headerlink" href="#about-encodequerytools" title="Permalink to this headline">¶</a></h1> <div class="line-block"> <div class="line"><br /></div> </div> <div class="section" id="introduction"> <h2>Introduction<a class="headerlink" href="#introduction" title="Permalink to this headline">¶</a></h2> <p>The ENCODEQT module of the ENCODEQueryTools package is a Python wrapper around the ENCODE ChIP-Seq Significance Tool&#8217;s API (ENCODEQT API). The ENCODE ChIP-Seq Significane Tool is a database-backed web tool for identifying enriched trancription factors from ENCODE ChIP-Seq experiments given a list of gene or transcript identifiers. This workflow is particularly useful when combined with lists of enriched genes or transcripts from RNA-Seq or microarray experiments.</p> </div> <div class="section" id="required-libraries"> <h2>Required Libraries<a class="headerlink" href="#required-libraries" title="Permalink to this headline">¶</a></h2> <p>The ENCODEQT module imports the urllib, urllib2, and json libraries, all of which should be included with a standard Python distribution. In addition, pandas is also required as a dependency. The module was written and tested using Python 2.7.6.</p> </div> <div class="section" id="installation"> <h2>Installation<a class="headerlink" href="#installation" title="Permalink to this headline">¶</a></h2> <p>ENCODEQueryTools can be installed using the pip package manager:</p> <div class="highlight-python"><div class="highlight"><pre>pip install ENCODEQueryTools </pre></div> </div> </div> <div class="section" id="license"> <h2>License<a class="headerlink" href="#license" title="Permalink to this headline">¶</a></h2> <p>ENCODEQueryTools is distributed under the New BSD License/Modified BSD License (3-clause). Please see the LICENSE.txt file included with the package for full details.</p> </div> <div class="section" id="other-documentation"> <h2>Other Documentation<a class="headerlink" href="#other-documentation" title="Permalink to this headline">¶</a></h2> <p>Additional documentation including a complete methods summary, example queries using the ENCODEQT module, and a full description of the Python package and the corresponding JSON API for the ENCODE ChIP-Seq Significance Tool can be found in the docs folder. We highly encourage the community to use the JSON AI description to develop tools in other programming languages that leverage the ENCODE ChIP-Seq Significance Tool.</p> </div> <div class="section" id="additional-information"> <h2>Additional Information<a class="headerlink" href="#additional-information" title="Permalink to this headline">¶</a></h2> <p>The ENCODE ChIP-Seq Significance Tool is maintained by the Butte Laboratory at Stanford University in Stanford, California, USA. It is not directly affiliated with the ENCODE consortium (<a class="reference external" href="http://www.genome.gov/encode">http://www.genome.gov/encode</a>), but rather leverages data that the ENCODE consortium has released publicly without restriction. Please acknowledge the ENCODE Consortium when using data derived from this tool in manuscripts.</p> </div> <div class="section" id="citation"> <h2>Citation<a class="headerlink" href="#citation" title="Permalink to this headline">¶</a></h2> <p>For work using the ENCODEQT module, we ask that authors please cite the tool in their manuscripts as this module leverages the backend infrastructure of the web tool directly. The citation is:</p> <div class="highlight-python"><div class="highlight"><pre>Auerbach RK, Chen B, Butte AJ. Relating genes to function: identifying enriched transcription factors using the ENCODE ChIP-Seq significance tool. Bioinformatics. 2013 Aug 1;29(15):1922-4. doi: 10.1093/bioinformatics/btt316. Epub 2013 Jun 3. PubMed PMID: 23732275; PubMed Central PMCID: PMC3712221. </pre></div> </div> </div> <div class="section" id="contributions"> <h2>Contributions<a class="headerlink" href="#contributions" title="Permalink to this headline">¶</a></h2> <p>We welcome any contributions from the developer community, be they additional libraries useful for leveraging ENCODE consortium data, tools that leverage these data, or general support. ENCODE data is currently very hard to access and we hope that this package will help unlock this vast resource for everyone.</p> <p>Raymond Auerbach, the designer of the ENCODE ChIP-Seq Significance Tool and currently a postdoc in the Butte Laboratory, wrote this package as a first draft. As is usually the case in academia, postdoc positions are not forever and Raymond will be moving on. At that point, a new person will be handling questions and requests from the Butte Laboratory.</p> </div> </div> </div> </div> </div> <div class="sphinxsidebar"> <div class="sphinxsidebarwrapper"> <h3><a href="index.html">Table Of Contents</a></h3> <ul> <li><a class="reference internal" href="#">About ENCODEQueryTools</a><ul> <li><a class="reference internal" href="#introduction">Introduction</a></li> <li><a class="reference internal" href="#required-libraries">Required Libraries</a></li> <li><a class="reference internal" href="#installation">Installation</a></li> <li><a class="reference internal" href="#license">License</a></li> <li><a class="reference internal" href="#other-documentation">Other Documentation</a></li> <li><a class="reference internal" href="#additional-information">Additional Information</a></li> <li><a class="reference internal" href="#citation">Citation</a></li> <li><a class="reference internal" href="#contributions">Contributions</a></li> </ul> </li> </ul> <h4>Previous topic</h4> <p class="topless"><a href="index.html" title="previous chapter">Welcome to ENCODEQueryTools&#8217; documentation!</a></p> <h4>Next topic</h4> <p class="topless"><a href="code.html" title="next chapter">ENCODEQT Python Module API</a></p> <h3>This Page</h3> <ul class="this-page-menu"> <li><a href="_sources/README.txt" rel="nofollow">Show Source</a></li> </ul> <div id="searchbox" style="display: none"> <h3>Quick search</h3> <form class="search" action="search.html" method="get"> <input type="text" name="q" /> <input type="submit" value="Go" /> <input type="hidden" name="check_keywords" value="yes" /> <input type="hidden" name="area" value="default" /> </form> <p class="searchtip" style="font-size: 90%"> Enter search terms or a module, class or function name. </p> </div> <script type="text/javascript">$('#searchbox').show(0);</script> </div> </div> <div class="clearer"></div> </div> <div class="related"> <h3>Navigation</h3> <ul> <li class="right" style="margin-right: 10px"> <a href="genindex.html" title="General Index" >index</a></li> <li class="right" > <a href="py-modindex.html" title="Python Module Index" >modules</a> |</li> <li class="right" > <a href="code.html" title="ENCODEQT Python Module API" >next</a> |</li> <li class="right" > <a href="index.html" title="Welcome to ENCODEQueryTools’ documentation!" >previous</a> |</li> <li><a href="index.html">ENCODEQueryTools 0.1.0a documentation</a> &raquo;</li> </ul> </div> <div class="footer"> &copy; Copyright 2014, Stanford University. Created using <a href="http://sphinx-doc.org/">Sphinx</a> 1.2.2. </div> </body> </html>
PypiClean
/IdracRedfishSupportTest-0.0.7.tar.gz/IdracRedfishSupportTest-0.0.7/VirtualDiskExpansionREDFISH.py
import argparse import getpass import json import logging import re import requests import sys import time import warnings from datetime import datetime from pprint import pprint warnings.filterwarnings("ignore") parser=argparse.ArgumentParser(description="Python script using Redfish API with OEM extension to expand storage virtual disk, either add a disk or expand current size.") parser.add_argument('-ip',help='iDRAC IP address', required=False) parser.add_argument('-u', help='iDRAC username', required=False) parser.add_argument('-p', help='iDRAC password. If you do not pass in argument -p, script will prompt to enter user password which will not be echoed to the screen.', required=False) parser.add_argument('-x', help='Pass in X-Auth session token for executing Redfish calls. All Redfish calls will use X-Auth token instead of username/password', required=False) parser.add_argument('--ssl', help='SSL cert verification for all Redfish calls, pass in value \"true\" or \"false\". By default, this argument is not required and script ignores validating SSL cert for all Redfish calls.', required=False) parser.add_argument('--script-examples', help='Get executing script examples', action="store_true", dest="script_examples", required=False) parser.add_argument('--get-controllers', help='Get server storage controller FQDDs', action="store_true", dest="get_controllers", required=False) parser.add_argument('--get-disks', help='Get server storage controller disk FQDDs and their raid status, pass in storage controller FQDD, Example "\RAID.Integrated.1-1\"', dest="get_disks", required=False) parser.add_argument('--get-virtualdisks', help='Get current server storage controller virtual disk(s) and virtual disk type, pass in storage controller FQDD, Example "\RAID.Integrated.1-1\"', dest="get_virtualdisks", required=False) parser.add_argument('--get-virtualdisk-details', help='Get complete details for all virtual disks behind storage controller, pass in storage controller FQDD, Example "\RAID.Integrated.1-1\"', dest="get_virtualdisk_details", required=False) parser.add_argument('--expand', help='Pass in the new VD FQDD you want to expand.', required=False) parser.add_argument('--pdisks', help='Pass in disk(s) you want to add to the virtual disk. If you pass in multiple disk FQDDs use a comma separator between FQDDs.', required=False) parser.add_argument('--size', help='Pass in new VD size you want to expand to in MB', required=False) args = vars(parser.parse_args()) logging.basicConfig(format='%(message)s', stream=sys.stdout, level=logging.INFO) def script_examples(): print("""\n- VirtualDiskExpansionREDFISH.py -ip 192.168.0.120 -u root -p calvin --get-controllers, this example will return all storage controller FQDDs. \n- VirtualDiskExpansionREDFISH.py -ip 192.168.0.120 -u root -p calvin --get-virtualdisk-details RAID.Integrated.1-1, this example will get detailed information for all virtual disks behind this storage controller. \n- VirtualDiskExpansionREDFISH.py -ip 192.168.0.120 -u root -p calvin --expand Disk.Virtual.0:RAID.SL.3-1 --pdisks Disk.Bay.2:Enclosure.Internal.0-1:RAID.SL.3-1, this example shows expanding virtual disk 0 adding disk 2. \n- VirtualDiskExpansionREDFISH.py -ip 192.168.0.120 -u root -p calvin --expand Disk.Virtual.0:RAID.SL.3-1 --size 400000, this example shows expanding the virtual disk size to 400GB.""") sys.exit(0) def check_supported_idrac_version(): if args["x"]: response = requests.get('https://%s/redfish/v1/Dell/Systems/System.Embedded.1/DellRaidService' % idrac_ip, verify=verify_cert, headers={'X-Auth-Token': args["x"]}) else: response = requests.get('https://%s/redfish/v1/Dell/Systems/System.Embedded.1/DellRaidService' % idrac_ip, verify=verify_cert,auth=(idrac_username, idrac_password)) data = response.json() if response.status_code == 401: logging.warning("\n- WARNING, status code %s returned. Incorrect iDRAC username/password or invalid privilege detected." % response.status_code) sys.exit(0) elif response.status_code != 200: logging.warning("\n- WARNING, iDRAC version installed does not support this feature using Redfish API") sys.exit(0) def get_storage_controllers(): if args["x"]: response = requests.get('https://%s/redfish/v1/Systems/System.Embedded.1/Storage' % idrac_ip, verify=verify_cert, headers={'X-Auth-Token': args["x"]}) else: response = requests.get('https://%s/redfish/v1/Systems/System.Embedded.1/Storage' % idrac_ip, verify=verify_cert,auth=(idrac_username, idrac_password)) data = response.json() logging.info("\n- Server controller(s) detected -\n") controller_list=[] for i in data['Members']: controller_list.append(i['@odata.id'].split("/")[-1]) print(i['@odata.id'].split("/")[-1]) def get_virtual_disks(): test_valid_controller_FQDD_string(args["get_virtualdisks"]) if args["x"]: response = requests.get('https://%s/redfish/v1/Systems/System.Embedded.1/Storage/%s/Volumes' % (idrac_ip, args["get_virtualdisks"]),verify=verify_cert, headers={'X-Auth-Token': args["x"]}) else: response = requests.get('https://%s/redfish/v1/Systems/System.Embedded.1/Storage/%s/Volumes' % (idrac_ip, args["get_virtualdisks"]),verify=verify_cert,auth=(idrac_username, idrac_password)) data = response.json() vd_list=[] if data['Members'] == []: logging.warning("\n- WARNING, no volume(s) detected for %s" % args["get_virtualdisks"]) sys.exit(0) else: for i in data['Members']: vd_list.append(i['@odata.id'].split("/")[-1]) logging.info("\n- Volume(s) detected for %s controller -\n" % args["get_virtualdisks"]) for ii in vd_list: if args["x"]: response = requests.get('https://%s/redfish/v1/Systems/System.Embedded.1/Storage/Volumes/%s' % (idrac_ip, ii),verify=verify_cert, headers={'X-Auth-Token': args["x"]}) else: response = requests.get('https://%s/redfish/v1/Systems/System.Embedded.1/Storage/Volumes/%s' % (idrac_ip, ii),verify=verify_cert, auth=(idrac_username, idrac_password)) data = response.json() for i in data.items(): if i[0] == "VolumeType": print("%s, Volume type: %s" % (ii, i[1])) def get_virtual_disks_details(): test_valid_controller_FQDD_string(args["get_virtualdisk_details"]) if args["x"]: response = requests.get('https://%s/redfish/v1/Systems/System.Embedded.1/Storage/%s/Volumes' % (idrac_ip, args["get_virtualdisk_details"]),verify=verify_cert, headers={'X-Auth-Token': args["x"]}) else: response = requests.get('https://%s/redfish/v1/Systems/System.Embedded.1/Storage/%s/Volumes' % (idrac_ip, args["get_virtualdisk_details"]),verify=verify_cert, auth=(idrac_username, idrac_password)) data = response.json() vd_list = [] if data['Members'] == []: logging.error("\n- WARNING, no volume(s) detected for %s" % args["get_virtualdisk_details"]) sys.exit(0) else: logging.info("\n- Volume(s) detected for %s controller -\n" % args["get_virtualdisk_details"]) for i in data['Members']: vd_list.append(i['@odata.id'].split("/")[-1]) print(i['@odata.id'].split("/")[-1]) for ii in vd_list: if args["x"]: response = requests.get('https://%s/redfish/v1/Systems/System.Embedded.1/Storage/Volumes/%s' % (idrac_ip, ii),verify=verify_cert, headers={'X-Auth-Token': args["x"]}) else: response = requests.get('https://%s/redfish/v1/Systems/System.Embedded.1/Storage/Volumes/%s' % (idrac_ip, ii),verify=verify_cert, auth=(idrac_username, idrac_password)) data = response.json() logging.info("\n----- Detailed Volume information for %s -----\n" % ii) for i in data.items(): pprint(i) print("\n") def get_disks(): test_valid_controller_FQDD_string(args["get_disks"]) if args["x"]: response = requests.get('https://%s/redfish/v1/Systems/System.Embedded.1/Storage/%s' % (idrac_ip, args["get_disks"]), verify=verify_cert, headers={'X-Auth-Token': args["x"]}) else: response = requests.get('https://%s/redfish/v1/Systems/System.Embedded.1/Storage/%s' % (idrac_ip, args["get_disks"]), verify=verify_cert,auth=(idrac_username, idrac_password)) data = response.json() if response.status_code != 200: logging.error("\n- FAIL, GET command failed, return code %s" % response.status_code) logging.error("Extended Info Message: {0}".format(response.json())) sys.exit(0) drive_list = [] if data['Drives'] == []: logging.warning("\n- WARNING, no drives detected for %s" % args["get_disks"]) sys.exit(0) else: for i in data['Drives']: drive_list.append(i['@odata.id'].split("/")[-1]) logging.info("\n- Drives detected for controller \"%s\" and RaidStatus\n" % args["get_disks"]) for i in drive_list: if args["x"]: response = requests.get('https://%s/redfish/v1/Systems/System.Embedded.1/Storage/Drives/%s' % (idrac_ip, i), verify=verify_cert, headers={'X-Auth-Token': args["x"]}) else: response = requests.get('https://%s/redfish/v1/Systems/System.Embedded.1/Storage/Drives/%s' % (idrac_ip, i), verify=verify_cert,auth=(idrac_username, idrac_password)) data = response.json() logging.info(" - Disk: %s, Raidstatus: %s" % (i, data['Oem']['Dell']['DellPhysicalDisk']['RaidStatus'])) def expand_vd(): global job_id global job_type url = 'https://%s/redfish/v1/Dell/Systems/System.Embedded.1/DellRaidService/Actions/DellRaidService.OnlineCapacityExpansion' % (idrac_ip) if args["pdisks"]: if "," in args["pdisks"]: disk_list = args["pdisks"].split(",") payload = {"TargetFQDD": args["expand"], "PDArray": disk_list} else: payload = {"TargetFQDD": args["expand"], "PDArray": [args["pdisks"]]} elif args["size"]: payload = {"TargetFQDD": args["expand"], "Size": int(args["size"])} if args["x"]: headers = {'content-type': 'application/json', 'X-Auth-Token': args["x"]} response = requests.post(url, data=json.dumps(payload), headers=headers, verify=verify_cert) else: headers = {'content-type': 'application/json'} response = requests.post(url, data=json.dumps(payload), headers=headers, verify=verify_cert,auth=(idrac_username,idrac_password)) data = response.json() if response.status_code == 202 or response.status_code == 200: logging.info("\n- PASS: POST command passed to expand VD, status code %s returned" % response.status_code) else: logging.error("\n- FAIL, POST command failed, status code is %s" % response.status_code) logging.error("\n- POST command failure is:\n %s" % data) sys.exit(0) try: job_id = response.headers['Location'].split("/")[-1] except: logging.error("- FAIL, unable to locate job ID in JSON headers output") sys.exit(0) logging.info("\n- PASS, %s job ID successfully created to expand VD\n" % job_id) def test_valid_controller_FQDD_string(x): if args["x"]: response = requests.get('https://%s/redfish/v1/Systems/System.Embedded.1/Storage/%s' % (idrac_ip, x),verify=verify_cert, headers={'X-Auth-Token': args["x"]}) else: response = requests.get('https://%s/redfish/v1/Systems/System.Embedded.1/Storage/%s' % (idrac_ip, x),verify=verify_cert,auth=(idrac_username, idrac_password)) if response.status_code != 200: logging.error("\n- FAIL, either controller FQDD does not exist or typo in FQDD string name (FQDD controller string value is case sensitive)") sys.exit(0) def loop_job_status(): start_time = datetime.now() while True: if args["x"]: response = requests.get('https://%s/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/%s' % (idrac_ip, job_id), verify=verify_cert, headers={'X-Auth-Token': args["x"]}) else: response = requests.get('https://%s/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/%s' % (idrac_ip, job_id), verify=verify_cert,auth=(idrac_username, idrac_password)) current_time = (datetime.now()-start_time) if response.status_code != 200: logging.error("\n- FAIL, GET command failed to check job status, return code is %s" % statusCode) logging.error("Extended Info Message: {0}".format(req.json())) sys.exit(0) data = response.json() if str(current_time)[0:7] >= "2:00:00": logging.error("\n- FAIL: Timeout of 2 hours has been hit, script stopped\n") sys.exit(0) elif "Fail" in data['Message'] or "fail" in data['Message'] or data['JobState'] == "Failed": logging.error("- FAIL: job ID %s failed, failed message is: %s" % (job_id, data['Message'])) sys.exit(0) elif data['JobState'] == "Completed": logging.info("\n--- PASS, Final Detailed Job Status Results ---\n") for i in data.items(): if "odata" not in i[0] or "MessageArgs" not in i[0] or "TargetSettingsURI" not in i[0]: print("%s: %s" % (i[0],i[1])) break else: logging.info("- INFO, job status not completed, current status: \"%s\"" % data['Message'].rstrip(".")) time.sleep(3) if __name__ == "__main__": if args["script_examples"]: script_examples() if args["ip"] or args["ssl"] or args["u"] or args["p"] or args["x"]: idrac_ip = args["ip"] idrac_username = args["u"] if args["p"]: idrac_password = args["p"] if not args["p"] and not args["x"] and args["u"]: idrac_password = getpass.getpass("\n- Argument -p not detected, pass in iDRAC user %s password: " % args["u"]) if args["ssl"]: if args["ssl"].lower() == "true": verify_cert = True elif args["ssl"].lower() == "false": verify_cert = False else: verify_cert = False else: verify_cert = False check_supported_idrac_version() else: logging.error("\n- FAIL, invalid argument values or not all required parameters passed in. See help text or argument --script-examples for more details.") sys.exit(0) if args["get_controllers"]: get_storage_controllers() elif args["get_virtualdisks"]: get_virtual_disks() elif args["get_disks"]: get_disks() elif args["get_virtualdisk_details"]: get_virtual_disks_details() elif args["expand"]: expand_vd() loop_job_status() else: logging.error("\n- FAIL, invalid argument values or not all required parameters passed in. See help text or argument --script-examples for more details.")
PypiClean
/EasyModels-1.6.2-py3-none-any.whl/easymodels/utils/gui.py
import PySimpleGUIQt as g import sys from easymodels.utils import Categories from webbrowser import open_new_tab class GUI: def category_to_layout(category_info): titles = [] links = [] frameworks = [] col1 = [] col2 = [] count = 0 for model in category_info['models']: if model.get('title'): titles.append(model['title'][0:60]) else: titles.append('null') if model.get('link'): links.append(model['link'].replace('https://github.com/', '')) else: links.append('null') if model.get('framework'): frameworks.append(model['framework']) else: frameworks.append('null') for x in range(len(titles)): if (count % 2) != 0: col1.append([g.Button(titles[x] + ' | ' + frameworks[x], key='https://github.com/' + links[x])]) else: col2.append([g.Button(titles[x] + ' | ' + frameworks[x], key='https://github.com/' + links[x])]) count += 1 return col1, col2 def gui(dark=False): if 'win' in sys.platform: titlebar = True else: titlebar = False if dark: background_color="#262626" input_elements_background_color="#262626" button_color=('white', '#171717') text_color="white" text_element_background_color="#262626" input_text_color="white" else: background_color='white' input_elements_background_color='#474747' button_color=('black', 'white') text_color='black' text_element_background_color='white' input_text_color='black' g.SetOptions(background_color=background_color, input_elements_background_color=input_elements_background_color, button_color=button_color, text_color=text_color, text_element_background_color=text_element_background_color, border_width=0, input_text_color=input_text_color, auto_size_buttons=True, auto_size_text=True) layout = [ [g.T('EasyModels', font=('Arial', 15))], [g.T('Please pick a category from below:')] ] cat_json = Categories.get_all_categories() titles = [] cat_to_id = { 'Computer Vision': 'computer-vision', 'Natural Language Processing': 'natural-language-processing', 'Generative Models': 'generative-models', 'Reinforcement Learning': 'reinforcement-learning', 'Unsupervised Learning': 'unsupervised-learning', 'Audio and Speech': 'audio-speech' } for k, v in cat_json.items(): titles.append([v['title']]) for title in titles: layout.append([g.Button(str(title[0]), key=cat_to_id[title[0]])]) layout.append([g.Cancel('Close')]) window = g.Window('EasyModels', layout=layout, keep_on_top=True, grab_anywhere=True, no_titlebar=titlebar) while True: event, values = window.Read() print(event) if event == 'Close': exit() else: category_info = Categories.get_category_info(event) new_layout = [ [g.T('Available Projects', font=('Arial', 15))] ] col1, col2 = GUI.category_to_layout(category_info) new_layout.append([g.Column(col2), g.Column(col1)]) new_layout.append([g.Cancel('Close')]) new_window = g.Window('Projects', layout=new_layout, keep_on_top=True, grab_anywhere=True, no_titlebar=titlebar) while True: event1, values1 = new_window.Read() if event1 == 'Close': new_window.Close() break else: open_new_tab(event1)
PypiClean
/AmFast-0.5.3-r541.tar.gz/AmFast-0.5.3-r541/ez_setup.py
import sys DEFAULT_VERSION = "0.6c9" DEFAULT_URL = "http://pypi.python.org/packages/%s/s/setuptools/" % sys.version[:3] md5_data = { 'setuptools-0.6b1-py2.3.egg': '8822caf901250d848b996b7f25c6e6ca', 'setuptools-0.6b1-py2.4.egg': 'b79a8a403e4502fbb85ee3f1941735cb', 'setuptools-0.6b2-py2.3.egg': '5657759d8a6d8fc44070a9d07272d99b', 'setuptools-0.6b2-py2.4.egg': '4996a8d169d2be661fa32a6e52e4f82a', 'setuptools-0.6b3-py2.3.egg': 'bb31c0fc7399a63579975cad9f5a0618', 'setuptools-0.6b3-py2.4.egg': '38a8c6b3d6ecd22247f179f7da669fac', 'setuptools-0.6b4-py2.3.egg': '62045a24ed4e1ebc77fe039aa4e6f7e5', 'setuptools-0.6b4-py2.4.egg': '4cb2a185d228dacffb2d17f103b3b1c4', 'setuptools-0.6c1-py2.3.egg': 'b3f2b5539d65cb7f74ad79127f1a908c', 'setuptools-0.6c1-py2.4.egg': 'b45adeda0667d2d2ffe14009364f2a4b', 'setuptools-0.6c2-py2.3.egg': 'f0064bf6aa2b7d0f3ba0b43f20817c27', 'setuptools-0.6c2-py2.4.egg': '616192eec35f47e8ea16cd6a122b7277', 'setuptools-0.6c3-py2.3.egg': 'f181fa125dfe85a259c9cd6f1d7b78fa', 'setuptools-0.6c3-py2.4.egg': 'e0ed74682c998bfb73bf803a50e7b71e', 'setuptools-0.6c3-py2.5.egg': 'abef16fdd61955514841c7c6bd98965e', 'setuptools-0.6c4-py2.3.egg': 'b0b9131acab32022bfac7f44c5d7971f', 'setuptools-0.6c4-py2.4.egg': '2a1f9656d4fbf3c97bf946c0a124e6e2', 'setuptools-0.6c4-py2.5.egg': '8f5a052e32cdb9c72bcf4b5526f28afc', 'setuptools-0.6c5-py2.3.egg': 'ee9fd80965da04f2f3e6b3576e9d8167', 'setuptools-0.6c5-py2.4.egg': 'afe2adf1c01701ee841761f5bcd8aa64', 'setuptools-0.6c5-py2.5.egg': 'a8d3f61494ccaa8714dfed37bccd3d5d', 'setuptools-0.6c6-py2.3.egg': '35686b78116a668847237b69d549ec20', 'setuptools-0.6c6-py2.4.egg': '3c56af57be3225019260a644430065ab', 'setuptools-0.6c6-py2.5.egg': 'b2f8a7520709a5b34f80946de5f02f53', 'setuptools-0.6c7-py2.3.egg': '209fdf9adc3a615e5115b725658e13e2', 'setuptools-0.6c7-py2.4.egg': '5a8f954807d46a0fb67cf1f26c55a82e', 'setuptools-0.6c7-py2.5.egg': '45d2ad28f9750e7434111fde831e8372', 'setuptools-0.6c8-py2.3.egg': '50759d29b349db8cfd807ba8303f1902', 'setuptools-0.6c8-py2.4.egg': 'cba38d74f7d483c06e9daa6070cce6de', 'setuptools-0.6c8-py2.5.egg': '1721747ee329dc150590a58b3e1ac95b', 'setuptools-0.6c9-py2.3.egg': 'a83c4020414807b496e4cfbe08507c03', 'setuptools-0.6c9-py2.4.egg': '260a2be2e5388d66bdaee06abec6342a', 'setuptools-0.6c9-py2.5.egg': 'fe67c3e5a17b12c0e7c541b7ea43a8e6', 'setuptools-0.6c9-py2.6.egg': 'ca37b1ff16fa2ede6e19383e7b59245a', } import sys, os try: from hashlib import md5 except ImportError: from md5 import md5 def _validate_md5(egg_name, data): if egg_name in md5_data: digest = md5(data).hexdigest() if digest != md5_data[egg_name]: print >>sys.stderr, ( "md5 validation of %s failed! (Possible download problem?)" % egg_name ) sys.exit(2) return data def use_setuptools( version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir, download_delay=15 ): """Automatically find/download setuptools and make it available on sys.path `version` should be a valid setuptools version number that is available as an egg for download under the `download_base` URL (which should end with a '/'). `to_dir` is the directory where setuptools will be downloaded, if it is not already available. If `download_delay` is specified, it should be the number of seconds that will be paused before initiating a download, should one be required. If an older version of setuptools is installed, this routine will print a message to ``sys.stderr`` and raise SystemExit in an attempt to abort the calling script. """ was_imported = 'pkg_resources' in sys.modules or 'setuptools' in sys.modules def do_download(): egg = download_setuptools(version, download_base, to_dir, download_delay) sys.path.insert(0, egg) import setuptools; setuptools.bootstrap_install_from = egg try: import pkg_resources except ImportError: return do_download() try: pkg_resources.require("setuptools>="+version); return except pkg_resources.VersionConflict, e: if was_imported: print >>sys.stderr, ( "The required version of setuptools (>=%s) is not available, and\n" "can't be installed while this script is running. Please install\n" " a more recent version first, using 'easy_install -U setuptools'." "\n\n(Currently using %r)" ) % (version, e.args[0]) sys.exit(2) else: del pkg_resources, sys.modules['pkg_resources'] # reload ok return do_download() except pkg_resources.DistributionNotFound: return do_download() def download_setuptools( version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir, delay = 15 ): """Download setuptools from a specified location and return its filename `version` should be a valid setuptools version number that is available as an egg for download under the `download_base` URL (which should end with a '/'). `to_dir` is the directory where the egg will be downloaded. `delay` is the number of seconds to pause before an actual download attempt. """ import urllib2, shutil egg_name = "setuptools-%s-py%s.egg" % (version,sys.version[:3]) url = download_base + egg_name saveto = os.path.join(to_dir, egg_name) src = dst = None if not os.path.exists(saveto): # Avoid repeated downloads try: from distutils import log if delay: log.warn(""" --------------------------------------------------------------------------- This script requires setuptools version %s to run (even to display help). I will attempt to download it for you (from %s), but you may need to enable firewall access for this script first. I will start the download in %d seconds. (Note: if this machine does not have network access, please obtain the file %s and place it in this directory before rerunning this script.) ---------------------------------------------------------------------------""", version, download_base, delay, url ); from time import sleep; sleep(delay) log.warn("Downloading %s", url) src = urllib2.urlopen(url) # Read/write all in one block, so we don't create a corrupt file # if the download is interrupted. data = _validate_md5(egg_name, src.read()) dst = open(saveto,"wb"); dst.write(data) finally: if src: src.close() if dst: dst.close() return os.path.realpath(saveto) def main(argv, version=DEFAULT_VERSION): """Install or upgrade setuptools and EasyInstall""" try: import setuptools except ImportError: egg = None try: egg = download_setuptools(version, delay=0) sys.path.insert(0,egg) from setuptools.command.easy_install import main return main(list(argv)+[egg]) # we're done here finally: if egg and os.path.exists(egg): os.unlink(egg) else: if setuptools.__version__ == '0.0.1': print >>sys.stderr, ( "You have an obsolete version of setuptools installed. Please\n" "remove it from your system entirely before rerunning this script." ) sys.exit(2) req = "setuptools>="+version import pkg_resources try: pkg_resources.require(req) except pkg_resources.VersionConflict: try: from setuptools.command.easy_install import main except ImportError: from easy_install import main main(list(argv)+[download_setuptools(delay=0)]) sys.exit(0) # try to force an exit else: if argv: from setuptools.command.easy_install import main main(argv) else: print "Setuptools version",version,"or greater has been installed." print '(Run "ez_setup.py -U setuptools" to reinstall or upgrade.)' def update_md5(filenames): """Update our built-in md5 registry""" import re for name in filenames: base = os.path.basename(name) f = open(name,'rb') md5_data[base] = md5(f.read()).hexdigest() f.close() data = [" %r: %r,\n" % it for it in md5_data.items()] data.sort() repl = "".join(data) import inspect srcfile = inspect.getsourcefile(sys.modules[__name__]) f = open(srcfile, 'rb'); src = f.read(); f.close() match = re.search("\nmd5_data = {\n([^}]+)}", src) if not match: print >>sys.stderr, "Internal error!" sys.exit(2) src = src[:match.start(1)] + repl + src[match.end(1):] f = open(srcfile,'w') f.write(src) f.close() if __name__=='__main__': if len(sys.argv)>2 and sys.argv[1]=='--md5update': update_md5(sys.argv[2:]) else: main(sys.argv[1:])
PypiClean
/ACME%20oneM2M%20CSE-0.3.0.tar.gz/ACME oneM2M CSE-0.3.0/acme/HttpServer.py
from flask import Flask, request, make_response import flask from Configuration import Configuration from Constants import Constants as C import CSE, Utils from Logging import Logging, RedirectHandler from resources.Resource import Resource import json, requests, logging, os from werkzeug.serving import WSGIRequestHandler class HttpServer(object): def __init__(self): # Initialize the http server # Meaning defaults are automatically provided. self.flaskApp = Flask(Configuration.get('cse.csi')) self.rootPath = Configuration.get('http.root') Logging.log('Registering http server root at: %s' % self.rootPath) while self.rootPath.endswith('/'): self.rootPath = self.rootPath[:-1] # Add endpoints # self.addEndpoint(self.rootPath + '/', handler=self.handleGET, methods=['GET']) self.addEndpoint(self.rootPath + '/<path:path>', handler=self.handleGET, methods=['GET']) # self.addEndpoint(self.rootPath + '/', handler=self.handlePOST, methods=['POST']) self.addEndpoint(self.rootPath + '/<path:path>', handler=self.handlePOST, methods=['POST']) # self.addEndpoint(self.rootPath + '/', handler=self.handlePUT, methods=['PUT']) self.addEndpoint(self.rootPath + '/<path:path>', handler=self.handlePUT, methods=['PUT']) # self.addEndpoint(self.rootPath + '/', handler=self.handleDELETE, methods=['DELETE']) self.addEndpoint(self.rootPath + '/<path:path>', handler=self.handleDELETE, methods=['DELETE']) # Register the endpoint for the web UI if Configuration.get('cse.webui.enable'): self.webuiRoot = Configuration.get('cse.webui.root') self.webuiDirectory = '%s/webui' % CSE.rootDirectory Logging.log('Registering web ui at: %s, serving from %s' % (self.webuiRoot, self.webuiDirectory)) self.addEndpoint(self.webuiRoot, handler=self.handleWebUIGET, methods=['GET']) self.addEndpoint(self.webuiRoot + '/<path:path>', handler=self.handleWebUIGET, methods=['GET']) self.addEndpoint('/', handler=self.redirectRoot, methods=['GET']) # Add mapping / macro endpoints self.mappings = {} if (mappings := Configuration.get('server.http.mappings')) is not None: # mappings is a list of tuples for (k, v) in mappings: Logging.log('Registering mapping: %s%s -> %s%s' % (self.rootPath, k, self.rootPath, v)) self.addEndpoint(self.rootPath + k, handler=self.requestRedirect, methods=['GET', 'POST', 'PUT', 'DELETE']) self.mappings = dict(mappings) def run(self): # Redirect the http server (Flask) log output to the CSE logs werkzeugLog = logging.getLogger('werkzeug') werkzeugLog.addHandler(RedirectHandler("httpServer")) WSGIRequestHandler.protocol_version = "HTTP/1.1" # Run the http server. This runs forever. # The server can run single-threadedly since some of the underlying # components (e.g. TinyDB) may run into problems otherwise. if self.flaskApp is not None: try: self.flaskApp.run(host=Configuration.get('http.listenIF'), port=Configuration.get('http.port'), threaded=Configuration.get('http.multiThread')) except Exception as e: Logging.logErr(e) def addEndpoint(self, endpoint=None, endpoint_name=None, handler=None, methods=None): self.flaskApp.add_url_rule(endpoint, endpoint_name, handler, methods=methods) def handleGET(self, path=None): Logging.logDebug('==> Retrieve: %s' % request.path) Logging.logDebug('Headers: \n' + str(request.headers)) CSE.event.httpRetrieve() (resource, rc) = CSE.dispatcher.retrieveRequest(request) return self._prepareResponse(request, resource, rc) def handlePOST(self, path=None): Logging.logDebug('==> Create: %s' % request.path) Logging.logDebug('Headers: \n' + str(request.headers)) Logging.logDebug('Body: \n' + str(request.data)) CSE.event.httpCreate() (resource, rc) = CSE.dispatcher.createRequest(request) return self._prepareResponse(request, resource, rc) def handlePUT(self, path=None): Logging.logDebug('==> Update: %s' % request.path) Logging.logDebug('Headers: \n' + str(request.headers)) Logging.logDebug('Body: \n' + str(request.data)) CSE.event.httpUpdate() (resource, rc) = CSE.dispatcher.updateRequest(request) return self._prepareResponse(request, resource, rc) def handleDELETE(self, path=None): Logging.logDebug('==> Delete: %s' % request.path) Logging.logDebug('Headers: \n' + str(request.headers)) CSE.event.httpDelete() (resource, rc) = CSE.dispatcher.deleteRequest(request) return self._prepareResponse(request, resource, rc) ######################################################################### # Handle requests to mapped paths def requestRedirect(self): path = request.path[len(self.rootPath):] if request.path.startswith(self.rootPath) else request.path if path in self.mappings: Logging.logDebug('==> Redirecting to: %s' % path) CSE.event.httpRedirect() return flask.redirect(self.mappings[path], code=307) return '', 404 ######################################################################### # Redirect request to / to webui def redirectRoot(self): return flask.redirect(Configuration.get('cse.webui.root'), code=302) def handleWebUIGET(self, path=None): # security check whether the path will under the web root if not (CSE.rootDirectory + request.path).startswith(CSE.rootDirectory): return None, 404 # Redirect to index file. Also include base / cse RI if path == None or len(path) == 0 or (path.endswith('index.html') and len(request.args) != 1): return flask.redirect('%s/index.html?ri=/%s' % (self.webuiRoot, Configuration.get('cse.ri')), code=302) else: filename = '%s/%s' % (self.webuiDirectory, path) # return any file in the web directory try: return flask.send_file(filename) except Exception as e: flask.abort(404) ######################################################################### # # Send various types of HTTP requests # def sendRetrieveRequest(self, url, originator): return self.sendRequest(requests.get, url, originator) def sendCreateRequest(self, url, originator, ty=None, data=None): return self.sendRequest(requests.post, url, originator, ty, data) def sendUpdateRequest(self, url, originator, data): return self.sendRequest(requests.put, url, originator, data=data) def sendDeleteRequest(self, url, originator): return self.sendRequest(requests.delete, url, originator) def sendRequest(self, method, url, originator, ty=None, data=None, ct='application/json'): headers = { 'Content-Type' : '%s%s' % (ct, ';ty=%d' % ty if ty is not None else ''), 'X-M2M-Origin' : originator, 'X-M2M-RI' : Utils.uniqueRI() } try: r = method(url, data=data, headers=headers) except Exception as e: Logging.logWarn('Failed to send request: %s' % str(e)) return (None, C.rcTargetNotReachable) rc = int(r.headers['X-M2M-RSC']) if 'X-M2M-RSC' in r.headers else C.rcInternalServerError return (r.json() if len(r.content) > 0 else None, rc) ######################################################################### def _prepareResponse(self, request, resource, returnCode): if resource is None or returnCode == C.rcDeleted: r = '' elif isinstance(resource, dict): r = json.dumps(resource) else: if (r := resource.asJSON() if isinstance(resource, Resource) else resource) is None: r = '' returnCode = C.rcNotFound Logging.logDebug('Response: \n' + str(r)) resp = make_response(r) # headers resp.headers['X-M2M-RSC'] = str(returnCode) if 'X-M2M-RI' in request.headers: resp.headers['X-M2M-RI'] = request.headers['X-M2M-RI'] if 'X-M2M-RVI' in request.headers: resp.headers['X-M2M-RVI'] = request.headers['X-M2M-RVI'] resp.status_code = self._statusCode(returnCode) resp.content_type = C.hfvContentType return resp # # Mapping of oneM2M return codes to http status codes # _codes = { C.rcOK : 200, # OK C.rcDeleted : 200, # DELETED C.rcUpdated : 200, # UPDATED C.rcCreated : 201, # CREATED C.rcBadRequest : 400, # BAD REQUEST C.rcContentsUnacceptable : 400, # NOT ACCEPTABLE C.rcInsufficientArguments : 400, # INSUFFICIENT ARGUMENTS C.rcInvalidArguments : 400, # INVALID ARGUMENTS C.rcMaxNumberOfMemberExceeded : 400, # MAX NUMBER OF MEMBER EXCEEDED C.rcGroupMemberTypeInconsistent : 400, # GROUP MEMBER TYPE INCONSISTENT C.rcOriginatorHasNoPrivilege : 403, # ORIGINATOR HAS NO PRIVILEGE C.rcInvalidChildResourceType : 403, # INVALID CHILD RESOURCE TYPE C.rcTargetNotReachable : 403, # TARGET NOT REACHABLE C.rcAlreadyExists : 403, # ALREAD EXISTS C.rcTargetNotSubscribable : 403, # TARGET NOT SUBSCRIBABLE C.rcReceiverHasNoPrivileges : 403, # RECEIVER HAS NO PRIVILEGE C.rcNotFound : 404, # NOT FOUND C.rcOperationNotAllowed : 405, # OPERATION NOT ALLOWED C.rcInternalServerError : 500, # INTERNAL SERVER ERROR C.rcNotImplemented : 501, # NOT IMPLEMENTED } def _statusCode(self, sc): return self._codes[sc]
PypiClean
/Flask-BasicAuth-0.2.0.tar.gz/Flask-BasicAuth-0.2.0/flask_basicauth.py
import base64 from functools import wraps from flask import current_app, request, Response __version__ = '0.2.0' class BasicAuth(object): """ A Flask extension for adding HTTP basic access authentication to the application. :param app: a :class:`~flask.Flask` instance. Defaults to `None`. If no application is provided on creation, then it can be provided later on via :meth:`init_app`. """ def __init__(self, app=None): if app is not None: self.app = app self.init_app(app) else: self.app = None def init_app(self, app): """ Initialize this BasicAuth extension for the given application. :param app: a :class:`~flask.Flask` instance """ app.config.setdefault('BASIC_AUTH_FORCE', False) app.config.setdefault('BASIC_AUTH_REALM', '') @app.before_request def require_basic_auth(): if not current_app.config['BASIC_AUTH_FORCE']: return if not self.authenticate(): return self.challenge() def check_credentials(self, username, password): """ Check if the given username and password are correct. By default compares the given username and password to ``HTTP_BASIC_AUTH_USERNAME`` and ``HTTP_BASIC_AUTH_PASSWORD`` configuration variables. :param username: a username provided by the client :param password: a password provided by the client :returns: `True` if the username and password combination was correct, and `False` otherwise. """ correct_username = current_app.config['BASIC_AUTH_USERNAME'] correct_password = current_app.config['BASIC_AUTH_PASSWORD'] return username == correct_username and password == correct_password def authenticate(self): """ Check the request for HTTP basic access authentication header and try to authenticate the user. :returns: `True` if the user is authorized, or `False` otherwise. """ auth = request.authorization return ( auth and auth.type == 'basic' and self.check_credentials(auth.username, auth.password) ) def challenge(self): """ Challenge the client for username and password. This method is called when the client did not provide username and password in the request, or the username and password combination was wrong. :returns: a :class:`~flask.Response` with 401 response code, including the required authentication scheme and authentication realm. """ realm = current_app.config['BASIC_AUTH_REALM'] return Response( status=401, headers={'WWW-Authenticate': 'Basic realm="%s"' % realm} ) def required(self, view_func): """ A decorator that can be used to protect specific views with HTTP basic access authentication. """ @wraps(view_func) def wrapper(*args, **kwargs): if self.authenticate(): return view_func(*args, **kwargs) else: return self.challenge() return wrapper
PypiClean
/Djblets-3.3.tar.gz/Djblets-3.3/docs/releasenotes/0.10-beta-1.rst
.. default-intersphinx:: django1.6 djblets1.0 ================================= Djblets 0.10 Beta 1 Release Notes ================================= **Release date**: March 29, 2017 This release contains all bug fixes and features found in Djblets version :doc:`0.9.6 <0.9.6>`. Installation ============ To install this release, run the following:: $ sudo pip install \ --trusted-host downloads.reviewboard.org \ -f http://downloads.reviewboard.org/releases/Djblets/0.10/ \ --pre -U Djblets Or:: $ sudo easy_install \ -f http://downloads.reviewboard.org/releases/Djblets/0.10/ \ -U Djblets We **do not** recommend upgrading a production server with this version of Djblets. This version is still in-development, and is not guaranteed to have API stability. Compatibility Changes ===================== * Added initial support for Django 1.7 and higher. We're working toward supporting all versions of Django 1.6 and up. We expect to ship full support for these versions by the final Djblets 0.10 release. For now, we still recommend Django 1.6 for greatest compatibility. * Updated to django-pipeline 1.6.x. This release uses django-pipeline 1.6.x, which requires various changes in your application. Specifically, you'll need to change your :file:`settings.py` file to use the new ``PIPELINE`` attribute, and update your templates to use ``stylesheet`` and ``javascript`` instead of ``compressed_css`` and ``compressed_js``. See django-pipeline's `upgrade guide <https://django-pipeline.readthedocs.org/en/1.6.9/installation.html#upgrading-from-1-3>`_ for more details. Style Sheets ============ * Modernized the look of literal text and code blocks in Markdown rendered text. The text is shown as red with a red border and light grey background, resembling the look used on Slack and other services. Code blocks have improved margins and padding to help align the rendered text with the source text. djblets.auth ============ * The account registration view can now take extra context for the template. :py:func:`~djblets.auth.views.register` now accepts an ``extra_context`` argument for passing custom data down to the template for rendering. djblets.avatars (new) ===================== * Added support for configurable avatars. This introduces new support for avatar display using Gravatars, uploaded files, URLs, or custom backends. Avatars can be customized on a global or per-user basis. See :ref:`avatar-guides` for information on avatars. djblets.cache ============= * Added a class for synchronizing generation IDs across processes and servers. :py:class:`~djblets.cache.synchronizer.GenerationSynchronizer` can be used to synchronize a form of identification across multiple processes or servers, helping to coordinate when state needs to be reloaded from disk, database, another server, etc. When state changes, the caller just needs to mark the synchronizer as updated on their end, and other processes will see the state as expired on their end. .. _0.10-beta-1-conditions: djblets.conditions (new) ======================== * Added support for user-customizable condition rules. Conditions are a way to allow applications to give users a degree of flexibility for choosing when certain actions should take place. Users can define one or more conditions, consisting of a choice (a properly on an object to match upon in some form), an operator ("is", "starts with", etc.), and a value (depending on the type of choice and operator), along with whether all or any conditions must be matched. These can be used for extension or integration development, or for anything else needed by the application. There are form fields to drop conditions onto a page, and lots of support for crafting types of condition choices and operators. See :py:mod:`djblets.conditions` for more information. djblets.configforms =================== * Added support for dynamically-augmented configuration pages. Configuration pages inheriting from :py:class:`~djblets.configforms.mixins.DynamicConfigPageMixin` can be augmented by other callers (such as extensions). This makes use of the new `registries <0.10-beta-1-registries>`_ support. * Custom configuration pages can now pass extra context to the template. Subclasses can override :py:meth:`ConfigPageForm.get_extra_context <djblets.configforms.forms.ConfigPageForm.get_extra_context>` to return extra context that the templates for the page or a form within the page can use, allowing for data to be computed before rendering the template. djblets.db ========== * Added custom object serialization for :py:class:`~djblets.db.fields.JSONField`. Objects being stored can now handle their own serialization by implementing a :py:meth:`to_json` method. There is no support for custom deserialization into objects. * Added a method for prefixing query expressions. :py:func:`~djblets.db.query.prefix_q` is used to provide a prefix to all :py:class:`~django.db.models.Q` objects for a query. This can be used to create a common query expression and to allow a caller to tailor it for a relation on another object. djblets.extensions ================== * Simplified writing extension hooks. :py:class:`~djblets.extensions.hooks.ExtensionHook` subclasses can now override :py:meth:`~djblets.extensions.hooks.ExtensionHook.initialize` instead of :py:meth:`~djblets.extensions.hooks.ExtensionHook.__init__` to perform setup work for a hook. These don't need to call the parent method, and are simpler to use. * Add proper support for dynamically enabling/disabling extension hooks. Extension hooks can now be safely disabled by calling :py:meth:`~djblets.extensions.hooks.ExtensionHook.disable_hook` and re-enabled by calling :py:meth:`~djblets.extensions.hooks.ExtensionHook.enable_hook`. The current state can be checked by looking at :py:attr:`~djblets.extensions.hooks.ExtensionHook.hook_state` or :py:attr:`~djblets.extensions.hooks.ExtensionHook.initialized`. Extension hook instances can also be created without being enabled by default by passing ``start_enabled=True`` when instantiating. * Added a convenience method for getting the URL for an extension's static media. The new :py:meth:`~djblets.extensions.extension.Extension.get_static_url` returns the URL for a given static media file shipped by the extension. * Added a base extension hook for hooks that work with `registries <0.10-beta-1-registries>`_. :py:class:`~djblets.extensions.hooks.BaseRegistryHook` can be subclassed by applications to easily provide hooks that interface with registries, handling registration when enabled or unregistration when disabled. * Improved database synchronization and static media installation for extensions in multi-deployment setups. We previously kept a version identifier stored in the extension settings to help determine when static media needed to be installed, but this didn't work so well for multi-deployment setups. We also used this to determine when to perform a database synchronization. Now both of these requirements are stored separately, and media installation will happen automatically as needed. This will also help when moving a Review Board installation to a new server. * Failing to load an uninstalled extension now shows an appropriate error message. djblets.features (new) ====================== * Added support for light-weight feature checks. Feature checks (also known as feature switches/toggles) are a way to allow new features to be built and tested in a codebase without exposing them to every user. The feature check support in Djblets is built to make feature checks easy to use and flexible to consume. Applications can implement feature checker classes that determine how a feature is checked. These can check a hard-coded list of features in :file:`settings.py`, a list in the site configuration, a list against a user or an organization account, or anything else the application needs. See :ref:`feature-checks-guides` for more information. djblets.forms ============= * Added a new form base class for storing key/value data in a dictionary or dictionary-like object. :py:class:`~djblets.forms.forms.key_value_form.KeyValueForm` makes it easy to load data from a dictionary and save it back to the dictionary. It supports advanced features like disabling certain fields from being edited, setting text describing why the fields are disabled, and blacklisting certain fields from being loaded from or written to the dictionary. Subclasses can override this and provide smarter load/save support or adapt the form to work with other types of objects that don't act exactly like a dictionary. * Added form fields for working with `conditions <0.10-beta-1-conditions>`_. * Added a new base template for customizable administration change forms. The ``djblets_forms/admin/change_form_page.html`` template makes it easier to have an administration page for a change form, without using the Django admin model functionality. This forms the basis for extension configuration and siteconfig settings pages and supports all standard features (fieldsets, help text, custom widgets, and more). Along with this, there's a ``djblets_forms/admin/form_field.html`` template for form fields that live in the change form, and ``djblets_forms/admin/form_fieldsets.html`` for fieldsets. djblets.integrations (new) ========================== * Added new support for creating and consuming third-party service integrations. Integrations are similar to extensions in that they can augment a product with new functionality. Unlike extensions, they have built-in support for creating and using any number of distinct configurations, allowing, for instance, a Slack integration to post to different channels depending on different conditions. Integrations can make use of extension hooks, just like an extension. Integrations and their hooks are not enabled until there's at least one enabled configuration for the integration. See :ref:`integration-guides` for information on writing and consuming integrations. djblets.recaptcha (new) ======================= * Added a module for working with reCAPTCHA_. This provides easy support for using reCAPTCHA. Forms can make use of the :py:class:`~djblets.recaptcha.mixins.RecaptchaFormMixin` to display and process a reCAPTCHA. There are also widgets, template tags, and siteconfig support, which can be used as well. See :ref:`recaptcha-guides` for more information. .. _reCAPTCHA: https://www.google.com/recaptcha/intro/ .. _0.10-beta-1-registries: djblets.registries (new) ======================== * Added registries, which are used to register and look up objects. Registries are classes that provide registration, lookup, iteration, validation, and error reporting for a type of value. These can be used to provide extensibility for parts of an application. Consumers can subclass the base registry class (:py:class:`~djblets.registries.registry.Registry`) to provide registry functionality, and then create an instance in a module for callers to use. The :py:class:`~djblets.registries.registry.OrderedRegistry` subclass can be used when items in a registry need to maintain their order when listed. The :py:class:`~djblets.registries.registry.EntryPointRegistry` subclass can be used for registries that are backed by Python Entrypoints, helping bring extensibility to applications already allowing hooks from other Python packages. See :ref:`registry-guides` to learn more. djblets.pipeline ================ * Added a django-pipeline compiler for compiling :file:`*.es6.js` files as ES6 JavaScript. The :py:class:`~djblets.pipeline.compilers.es6.ES6Compiler` can be used to match :file:`*.es6.js` files and compile them as ES6 JavaScript. This can be used by adding ``djblets.pipeline.compilers.es6.ES6Compiler`` to ``settings.PIPELINE['COMPILERS']``. * Added a more efficient LessCSS compiler that only recompiles when necessary. The :py:class:`~djblets.pipeline.compilers.less.LessCompiler` is an improvement over the default compiler that better inspects dependencies and recompiles files when there are actual changes, rather than recompiling on every page load. This can be used by adding ``djblets.pipeline.compilers.less.LessCompiler`` to ``settings.PIPELINE['COMPILERS']``. djblets.util.decorators ======================= * Deprecated :py:func:`~djblets.util.decorators.basictag`. Django's :py:meth:`~django.template.Library.simple_tag` now provides all the same functionality that ``basictag`` provided. djblets.util.templatetags ========================= * Added a template tag for iterating over fieldsets in a form. The :py:func:`~djblets.util.templatetags.djblets_forms.get_fieldsets` template tag can be used to iterate over all fieldsets on a form, helping to craft custom templates for building more advanced forms. djblets.webapi ============== * Resources can now specify the title of serialized links. By default, link titles are always based on the string representation of the object. Now, resources can override :py:meth:`~djblets.webapi.resources.base.WebAPIResource.get_object_title` to provide a custom title. * Uploading files to an API no longer returns a :mimetype:`text/plain` mimetype. This used to be sent in order to meet a requirement in older versions of Review Board, but this is no longer the case. The proper mimetype for the resource is now returned. Contributors ============ * Barret Rennie * Christian Hammond * David Trowbridge * John Larmie
PypiClean